aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpi_extlog.c1
-rw-r--r--drivers/acpi/acpi_processor.c4
-rw-r--r--drivers/acpi/acpica/acapps.h14
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h9
-rw-r--r--drivers/acpi/acpica/acmacros.h74
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h24
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h22
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbcmds.c2
-rw-r--r--drivers/acpi/acpica/dbconvert.c2
-rw-r--r--drivers/acpi/acpica/dbdisply.c2
-rw-r--r--drivers/acpi/acpica/dbexec.c2
-rw-r--r--drivers/acpi/acpica/dbfileio.c2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c2
-rw-r--r--drivers/acpi/acpica/dbmethod.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c2
-rw-r--r--drivers/acpi/acpica/dbobject.c2
-rw-r--r--drivers/acpi/acpica/dbstats.c2
-rw-r--r--drivers/acpi/acpica/dbtest.c2
-rw-r--r--drivers/acpi/acpica/dbutils.c2
-rw-r--r--drivers/acpi/acpica/dbxface.c6
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c3
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c3
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c37
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c155
-rw-r--r--drivers/acpi/acpica/hwsleep.c13
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c99
-rw-r--r--drivers/acpi/acpica/psloop.c6
-rw-r--r--drivers/acpi/acpica/psobject.c12
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c12
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c2
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c11
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c19
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c6
-rw-r--r--drivers/acpi/acpica/utdelete.c8
-rw-r--r--drivers/acpi/acpica/uterror.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utnonansi.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c19
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstring.c2
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/arm64/iort.c2
-rw-r--r--drivers/acpi/bgrt.c28
-rw-r--r--drivers/acpi/bus.c42
-rw-r--r--drivers/acpi/button.c11
-rw-r--r--drivers/acpi/ec.c115
-rw-r--r--drivers/acpi/gsi.c98
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/irq.c297
-rw-r--r--drivers/acpi/nfit/core.c6
-rw-r--r--drivers/acpi/nfit/mce.c1
-rw-r--r--drivers/acpi/osl.c27
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/resource.c18
-rw-r--r--drivers/acpi/sleep.c27
-rw-r--r--drivers/acpi/video_detect.c11
-rw-r--r--drivers/ata/Kconfig19
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci_imx.c196
-rw-r--r--drivers/ata/ahci_qoriq.c35
-rw-r--r--drivers/ata/ahci_xgene.c6
-rw-r--r--drivers/ata/libata-core.c67
-rw-r--r--drivers/ata/libata-eh.c45
-rw-r--r--drivers/ata/libata-scsi.c101
-rw-r--r--drivers/ata/libata-sff.c45
-rw-r--r--drivers/ata/libata-transport.c1
-rw-r--r--drivers/ata/libata.h9
-rw-r--r--drivers/ata/pata_at91.c6
-rw-r--r--drivers/ata/pata_atiixp.c5
-rw-r--r--drivers/ata/pata_bf54x.c7
-rw-r--r--drivers/ata/pata_ep93xx.c4
-rw-r--r--drivers/ata/pata_falcon.c184
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c4
-rw-r--r--drivers/ata/pata_legacy.c15
-rw-r--r--drivers/ata/pata_octeon_cf.c20
-rw-r--r--drivers/ata/pata_of_platform.c9
-rw-r--r--drivers/ata/pata_pcmcia.c6
-rw-r--r--drivers/ata/pata_samsung_cf.c4
-rw-r--r--drivers/ata/sata_mv.c12
-rw-r--r--drivers/ata/sata_rcar.c4
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/idt77252.c12
-rw-r--r--drivers/atm/midway.h2
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/firmware_class.c5
-rw-r--r--drivers/base/memory.c12
-rw-r--r--drivers/base/platform-msi.c2
-rw-r--r--drivers/base/platform.c10
-rw-r--r--drivers/base/power/domain.c124
-rw-r--r--drivers/base/power/opp/core.c1011
-rw-r--r--drivers/base/power/opp/cpu.c66
-rw-r--r--drivers/base/power/opp/of.c154
-rw-r--r--drivers/base/power/opp/opp.h40
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/runtime.c11
-rw-r--r--drivers/base/power/wakeirq.c22
-rw-r--r--drivers/base/property.c229
-rw-r--r--drivers/base/regmap/regcache-rbtree.c7
-rw-r--r--drivers/base/regmap/regcache.c20
-rw-r--r--drivers/base/regmap/regmap-irq.c62
-rw-r--r--drivers/base/regmap/regmap.c129
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/driver_chipcommon.c11
-rw-r--r--drivers/bcma/driver_mips.c3
-rw-r--r--drivers/bcma/main.c25
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/cciss.c131
-rw-r--r--drivers/block/cciss.h36
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c13
-rw-r--r--drivers/block/drbd/drbd_nl.c12
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_req.c33
-rw-r--r--drivers/block/floppy.c6
-rw-r--r--drivers/block/hd.c45
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/block/mg_disk.c31
-rw-r--r--drivers/block/nbd.c264
-rw-r--r--drivers/block/null_blk.c10
-rw-r--r--drivers/block/osdblk.c6
-rw-r--r--drivers/block/paride/Kconfig1
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c15
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/ps3disk.c15
-rw-r--r--drivers/block/rbd.c32
-rw-r--r--drivers/block/skd_main.c15
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/virtio_blk.c207
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/block/xen-blkfront.c24
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/Kconfig2
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btbcm.c3
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c14
-rw-r--r--drivers/bluetooth/btqcomsmd.c1
-rw-r--r--drivers/bluetooth/btusb.c164
-rw-r--r--drivers/bluetooth/hci_bcm.c68
-rw-r--r--drivers/bluetooth/hci_qca.c4
-rw-r--r--drivers/cdrom/cdrom.c92
-rw-r--r--drivers/cdrom/gdrom.c41
-rw-r--r--drivers/char/hw_random/core.c3
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c1
-rw-r--r--drivers/char/tpm/tpm-chip.c8
-rw-r--r--drivers/char/tpm/tpm-dev.c5
-rw-r--r--drivers/char/tpm/tpm-interface.c175
-rw-r--r--drivers/char/tpm/tpm-sysfs.c28
-rw-r--r--drivers/char/tpm/tpm.h45
-rw-r--r--drivers/char/tpm/tpm1_eventlog.c (renamed from drivers/char/tpm/tpm_eventlog.c)35
-rw-r--r--drivers/char/tpm/tpm2-cmd.c338
-rw-r--r--drivers/char/tpm/tpm2_eventlog.c203
-rw-r--r--drivers/char/tpm/tpm_acpi.c3
-rw-r--r--drivers/char/tpm/tpm_atmel.h6
-rw-r--r--drivers/char/tpm/tpm_crb.c8
-rw-r--r--drivers/char/tpm/tpm_eventlog.h51
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c106
-rw-r--r--drivers/char/tpm/tpm_nsc.c12
-rw-r--r--drivers/char/tpm/tpm_of.c27
-rw-r--r--drivers/char/tpm/tpm_tis.c4
-rw-r--r--drivers/char/tpm/tpm_tis_core.c30
-rw-r--r--drivers/char/tpm/tpm_tis_core.h2
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c1
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c48
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c14
-rw-r--r--drivers/clk/tegra/clk-dfll.c17
-rw-r--r--drivers/clocksource/Kconfig38
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_arch_timer.c153
-rw-r--r--drivers/clocksource/clkevt-probe.c56
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/clocksource/renesas-ostm.c265
-rw-r--r--drivers/clocksource/tcb_clksrc.c16
-rw-r--r--drivers/clocksource/timer-gemini.c277
-rw-r--r--drivers/cpufreq/Kconfig20
-rw-r--r--drivers/cpufreq/Kconfig.arm13
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c188
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c7
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c15
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c429
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c8
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c50
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c3
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c148
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c1
-rw-r--r--drivers/cpufreq/sti-cpufreq.c13
-rw-r--r--drivers/cpufreq/ti-cpufreq.c268
-rw-r--r--drivers/cpuidle/governors/menu.c11
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.h1
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h3
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c4
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c114
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c329
-rw-r--r--drivers/devfreq/exynos-bus.c22
-rw-r--r--drivers/devfreq/governor.h2
-rw-r--r--drivers/devfreq/governor_passive.c10
-rw-r--r--drivers/devfreq/governor_userspace.c11
-rw-r--r--drivers/devfreq/rk3399_dmc.c16
-rw-r--r--drivers/devfreq/tegra-devfreq.c4
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/cppi41.c69
-rw-r--r--drivers/dma/dmaengine.c21
-rw-r--r--drivers/dma/dw/core.c211
-rw-r--r--drivers/dma/dw/pci.c19
-rw-r--r--drivers/dma/dw/platform.c1
-rw-r--r--drivers/dma/dw/regs.h59
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/pl330.c24
-rw-r--r--drivers/dma/sh/rcar-dmac.c1
-rw-r--r--drivers/dma/ste_dma40.c7
-rw-r--r--drivers/dma/stm32-dma.c88
-rw-r--r--drivers/dma/zx_dma.c (renamed from drivers/dma/zx296702_dma.c)6
-rw-r--r--drivers/edac/amd64_edac.c64
-rw-r--r--drivers/edac/amd64_edac.h9
-rw-r--r--drivers/edac/edac_mc.c14
-rw-r--r--drivers/edac/edac_mc.h9
-rw-r--r--drivers/edac/edac_mc_sysfs.c40
-rw-r--r--drivers/edac/fsl_ddr_edac.c12
-rw-r--r--drivers/edac/i7300_edac.c6
-rw-r--r--drivers/edac/i7core_edac.c1
-rw-r--r--drivers/edac/i82975x_edac.c4
-rw-r--r--drivers/edac/mce_amd.c19
-rw-r--r--drivers/edac/mce_amd.h1
-rw-r--r--drivers/edac/mpc85xx_edac.c1
-rw-r--r--drivers/edac/sb_edac.c47
-rw-r--r--drivers/edac/skx_edac.c3
-rw-r--r--drivers/firmware/efi/arm-init.c1
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile26
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c132
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c74
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c84
-rw-r--r--drivers/firmware/efi/memattr.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c187
-rw-r--r--drivers/gpio/gpio-bcm-kona.c14
-rw-r--r--drivers/gpio/gpio-dln2.c12
-rw-r--r--drivers/gpio/gpio-dwapb.c14
-rw-r--r--drivers/gpio/gpio-ep93xx.c11
-rw-r--r--drivers/gpio/gpio-f7188x.c19
-rw-r--r--drivers/gpio/gpio-lp873x.c14
-rw-r--r--drivers/gpio/gpio-max77620.c20
-rw-r--r--drivers/gpio/gpio-menz127.c34
-rw-r--r--drivers/gpio/gpio-merrifield.c14
-rw-r--r--drivers/gpio/gpio-omap.c14
-rw-r--r--drivers/gpio/gpio-tc3589x.c15
-rw-r--r--drivers/gpio/gpio-tegra.c14
-rw-r--r--drivers/gpio/gpio-tps65218.c14
-rw-r--r--drivers/gpio/gpio-vx855.c13
-rw-r--r--drivers/gpio/gpio-wcove.c13
-rw-r--r--drivers/gpio/gpio-wm831x.c21
-rw-r--r--drivers/gpio/gpio-wm8994.c13
-rw-r--r--drivers/gpio/gpiolib.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c24
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c157
-rw-r--r--drivers/gpu/drm/ast/ast_post.c18
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c7
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig9
-rw-r--r--drivers/gpu/drm/drm_atomic.c25
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c23
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c20
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c176
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h11
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c25
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c63
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/hid/Kconfig5
-rw-r--r--drivers/hid/hid-core.c30
-rw-r--r--drivers/hid/hid-corsair.c60
-rw-r--r--drivers/hid/hid-cp2112.c28
-rw-r--r--drivers/hid/hid-ids.h14
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-mf.c19
-rw-r--r--drivers/hid/hid-microsoft.c12
-rw-r--r--drivers/hid/hid-multitouch.c44
-rw-r--r--drivers/hid/hid-picolcd_cir.c5
-rw-r--r--drivers/hid/hid-rmi.c975
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h8
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h12
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c38
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h3
-rw-r--r--drivers/hid/usbhid/hid-core.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c13
-rw-r--r--drivers/hid/usbhid/usbkbd.c3
-rw-r--r--drivers/hid/usbhid/usbmouse.c3
-rw-r--r--drivers/hid/wacom.h5
-rw-r--r--drivers/hid/wacom_sys.c163
-rw-r--r--drivers/hid/wacom_wac.c323
-rw-r--r--drivers/hid/wacom_wac.h37
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adc128d818.c147
-rw-r--r--drivers/hwmon/adm1021.c14
-rw-r--r--drivers/hwmon/adm1025.c16
-rw-r--r--drivers/hwmon/adm1026.c128
-rw-r--r--drivers/hwmon/adm1031.c15
-rw-r--r--drivers/hwmon/adm9240.c28
-rw-r--r--drivers/hwmon/adt7411.c361
-rw-r--r--drivers/hwmon/adt7470.c48
-rw-r--r--drivers/hwmon/adt7475.c28
-rw-r--r--drivers/hwmon/adt7x10.c7
-rw-r--r--drivers/hwmon/asb100.c36
-rw-r--r--drivers/hwmon/atxp1.c35
-rw-r--r--drivers/hwmon/dme1737.c46
-rw-r--r--drivers/hwmon/ds1621.c16
-rw-r--r--drivers/hwmon/emc2103.c36
-rw-r--r--drivers/hwmon/f71805f.c16
-rw-r--r--drivers/hwmon/f71882fg.c6
-rw-r--r--drivers/hwmon/fam15h_power.c34
-rw-r--r--drivers/hwmon/fschmd.c6
-rw-r--r--drivers/hwmon/g760a.c22
-rw-r--r--drivers/hwmon/g762.c86
-rw-r--r--drivers/hwmon/gl518sm.c13
-rw-r--r--drivers/hwmon/gl520sm.c73
-rw-r--r--drivers/hwmon/gpio-fan.c54
-rw-r--r--drivers/hwmon/hwmon.c20
-rw-r--r--drivers/hwmon/i5500_temp.c6
-rw-r--r--drivers/hwmon/i5k_amb.c4
-rw-r--r--drivers/hwmon/it87.c164
-rw-r--r--drivers/hwmon/jz4740-hwmon.c6
-rw-r--r--drivers/hwmon/k10temp.c12
-rw-r--r--drivers/hwmon/k8temp.c4
-rw-r--r--drivers/hwmon/lm63.c48
-rw-r--r--drivers/hwmon/lm70.c18
-rw-r--r--drivers/hwmon/lm78.c38
-rw-r--r--drivers/hwmon/lm80.c4
-rw-r--r--drivers/hwmon/lm83.c4
-rw-r--r--drivers/hwmon/lm85.c22
-rw-r--r--drivers/hwmon/lm87.c43
-rw-r--r--drivers/hwmon/lm90.c8
-rw-r--r--drivers/hwmon/lm92.c10
-rw-r--r--drivers/hwmon/lm93.c39
-rw-r--r--drivers/hwmon/lm95234.c12
-rw-r--r--drivers/hwmon/ltc4151.c1
-rw-r--r--drivers/hwmon/max1111.c4
-rw-r--r--drivers/hwmon/max1619.c4
-rw-r--r--drivers/hwmon/max197.c6
-rw-r--r--drivers/hwmon/max6650.c44
-rw-r--r--drivers/hwmon/mc13783-adc.c6
-rw-r--r--drivers/hwmon/mcp3021.c6
-rw-r--r--drivers/hwmon/nct6683.c17
-rw-r--r--drivers/hwmon/nct6775.c4
-rw-r--r--drivers/hwmon/nsa320-hwmon.c12
-rw-r--r--drivers/hwmon/pc87360.c26
-rw-r--r--drivers/hwmon/pc87427.c4
-rw-r--r--drivers/hwmon/pcf8591.c24
-rw-r--r--drivers/hwmon/sch5627.c4
-rw-r--r--drivers/hwmon/sch56xx-common.c1
-rw-r--r--drivers/hwmon/sht15.c68
-rw-r--r--drivers/hwmon/sht21.c92
-rw-r--r--drivers/hwmon/sis5595.c36
-rw-r--r--drivers/hwmon/smsc47m1.c10
-rw-r--r--drivers/hwmon/smsc47m192.c14
-rw-r--r--drivers/hwmon/stts751.c834
-rw-r--r--drivers/hwmon/tmp401.c60
-rw-r--r--drivers/hwmon/via-cputemp.c6
-rw-r--r--drivers/hwmon/via686a.c8
-rw-r--r--drivers/hwmon/vt8231.c59
-rw-r--r--drivers/hwmon/w83627ehf.c8
-rw-r--r--drivers/hwmon/w83627hf.c53
-rw-r--r--drivers/hwmon/w83781d.c34
-rw-r--r--drivers/hwmon/w83791d.c23
-rw-r--r--drivers/hwmon/w83792d.c15
-rw-r--r--drivers/hwmon/w83793.c6
-rw-r--r--drivers/i2c/busses/i2c-cadence.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c45
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c20
-rw-r--r--drivers/i2c/busses/i2c-piix4.c14
-rw-r--r--drivers/i2c/i2c-core.c19
-rw-r--r--drivers/ide/Kconfig1
-rw-r--r--drivers/ide/ide-atapi.c78
-rw-r--r--drivers/ide/ide-cd.c192
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-cd_verbose.c6
-rw-r--r--drivers/ide/ide-devsets.c13
-rw-r--r--drivers/ide/ide-disk.c12
-rw-r--r--drivers/ide/ide-eh.c8
-rw-r--r--drivers/ide/ide-floppy.c37
-rw-r--r--drivers/ide/ide-io.c13
-rw-r--r--drivers/ide/ide-ioctls.c14
-rw-r--r--drivers/ide/ide-park.c20
-rw-r--r--drivers/ide/ide-pm.c20
-rw-r--r--drivers/ide/ide-probe.c36
-rw-r--r--drivers/ide/ide-tape.c41
-rw-r--r--drivers/ide/ide-taskfile.c8
-rw-r--r--drivers/ide/sis5513.c2
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c2
-rw-r--r--drivers/iio/humidity/dht11.c6
-rw-r--r--drivers/infiniband/core/cma.c9
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c21
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h30
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c149
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c339
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c32
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h121
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c518
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c424
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c450
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c12
-rw-r--r--drivers/infiniband/hw/qedr/main.c23
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c14
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c12
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c16
-rw-r--r--drivers/input/misc/uinput.c20
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/rmi4/Kconfig8
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/amd_iommu.c72
-rw-r--r--drivers/iommu/amd_iommu_init.c11
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/arm-smmu-v3.c90
-rw-r--r--drivers/iommu/arm-smmu.c135
-rw-r--r--drivers/iommu/dma-iommu.c183
-rw-r--r--drivers/iommu/dmar.c20
-rw-r--r--drivers/iommu/exynos-iommu.c55
-rw-r--r--drivers/iommu/intel-iommu.c116
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/iommu-sysfs.c61
-rw-r--r--drivers/iommu/iommu.c285
-rw-r--r--drivers/iommu/iova.c23
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c73
-rw-r--r--drivers/iommu/msm_iommu.h3
-rw-r--r--drivers/iommu/mtk_iommu.c27
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/of_iommu.c4
-rw-r--r--drivers/irqchip/Kconfig9
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-gemini.c185
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c86
-rw-r--r--drivers/irqchip/irq-keystone.c28
-rw-r--r--drivers/irqchip/irq-mips-gic.c29
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c296
-rw-r--r--drivers/isdn/hardware/eicon/message.c19
-rw-r--r--drivers/isdn/mISDN/stack.c4
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/led-class.c76
-rw-r--r--drivers/leds/leds-ktd2692.c8
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c15
-rw-r--r--drivers/lightnvm/Kconfig9
-rw-r--r--drivers/lightnvm/Makefile3
-rw-r--r--drivers/lightnvm/core.c1027
-rw-r--r--drivers/lightnvm/gennvm.c657
-rw-r--r--drivers/lightnvm/gennvm.h62
-rw-r--r--drivers/lightnvm/rrpc.c7
-rw-r--r--drivers/lightnvm/rrpc.h3
-rw-r--r--drivers/lightnvm/sysblk.c733
-rw-r--r--drivers/macintosh/rack-meter.c28
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c353
-rw-r--r--drivers/md/dm-cache-metadata.h11
-rw-r--r--drivers/md/dm-cache-target.c59
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c12
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-mpath.c136
-rw-r--r--drivers/md/dm-raid.c296
-rw-r--r--drivers/md/dm-round-robin.c67
-rw-r--r--drivers/md/dm-rq.c272
-rw-r--r--drivers/md/dm-rq.h2
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-target.c7
-rw-r--r--drivers/md/dm-thin.c15
-rw-r--r--drivers/md/dm.c104
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c11
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/persistent-data/dm-array.c21
-rw-r--r--drivers/md/persistent-data/dm-array.h1
-rw-r--r--drivers/md/persistent-data/dm-bitset.c146
-rw-r--r--drivers/md/persistent-data/dm-bitset.h39
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c12
-rw-r--r--drivers/md/persistent-data/dm-btree.c18
-rw-r--r--drivers/md/persistent-data/dm-btree.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c16
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c4
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5-cache.c106
-rw-r--r--drivers/md/raid5.c133
-rw-r--r--drivers/md/raid5.h7
-rw-r--r--drivers/media/cec/cec-adap.c112
-rw-r--r--drivers/media/cec/cec-core.c3
-rw-r--r--drivers/media/common/b2c2/flexcop-fe-tuner.c3
-rw-r--r--drivers/media/common/b2c2/flexcop.c4
-rw-r--r--drivers/media/common/cx2341x.c4
-rw-r--r--drivers/media/common/siano/sms-cards.c4
-rw-r--r--drivers/media/common/siano/sms-cards.h4
-rw-r--r--drivers/media/common/siano/smscoreapi.c4
-rw-r--r--drivers/media/common/siano/smsir.c5
-rw-r--r--drivers/media/common/tveeprom.c4
-rw-r--r--drivers/media/dvb-core/demux.h4
-rw-r--r--drivers/media/dvb-core/dmxdev.c16
-rw-r--r--drivers/media/dvb-core/dmxdev.h4
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h5
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c7
-rw-r--r--drivers/media/dvb-core/dvb_demux.c4
-rw-r--r--drivers/media/dvb-core/dvb_demux.h4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c31
-rw-r--r--drivers/media/dvb-core/dvb_math.c4
-rw-r--r--drivers/media/dvb-core/dvb_math.h4
-rw-r--r--drivers/media/dvb-core/dvb_net.c22
-rw-r--r--drivers/media/dvb-core/dvb_net.h4
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c4
-rw-r--r--drivers/media/dvb-core/dvbdev.c4
-rw-r--r--drivers/media/dvb-core/dvbdev.h4
-rw-r--r--drivers/media/dvb-frontends/Kconfig17
-rw-r--r--drivers/media/dvb-frontends/Makefile2
-rw-r--r--drivers/media/dvb-frontends/af9013.c4
-rw-r--r--drivers/media/dvb-frontends/af9013.h4
-rw-r--r--drivers/media/dvb-frontends/af9013_priv.h4
-rw-r--r--drivers/media/dvb-frontends/af9033.c837
-rw-r--r--drivers/media/dvb-frontends/af9033.h13
-rw-r--r--drivers/media/dvb-frontends/af9033_priv.h185
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c4
-rw-r--r--drivers/media/dvb-frontends/atbm8830.h4
-rw-r--r--drivers/media/dvb-frontends/atbm8830_priv.h4
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c5
-rw-r--r--drivers/media/dvb-frontends/bcm3510.h4
-rw-r--r--drivers/media/dvb-frontends/bcm3510_priv.h4
-rw-r--r--drivers/media/dvb-frontends/bsbe1-d01a.h7
-rw-r--r--drivers/media/dvb-frontends/bsbe1.h7
-rw-r--r--drivers/media/dvb-frontends/bsru6.h7
-rw-r--r--drivers/media/dvb-frontends/cx24113.c4
-rw-r--r--drivers/media/dvb-frontends/cx24113.h4
-rw-r--r--drivers/media/dvb-frontends/cx24123.c6
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/dvb-frontends/dib0070.c4
-rw-r--r--drivers/media/dvb-frontends/dib0090.c4
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c15
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx39xxj.h4
-rw-r--r--drivers/media/dvb-frontends/drxd.h8
-rw-r--r--drivers/media/dvb-frontends/drxd_firm.c8
-rw-r--r--drivers/media/dvb-frontends/drxd_firm.h8
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c8
-rw-r--r--drivers/media/dvb-frontends/drxd_map_firm.h8
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c8
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c4
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c4
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.h4
-rw-r--r--drivers/media/dvb-frontends/ec100.c4
-rw-r--r--drivers/media/dvb-frontends/ec100.h4
-rw-r--r--drivers/media/dvb-frontends/hd29l2.c870
-rw-r--r--drivers/media/dvb-frontends/hd29l2.h65
-rw-r--r--drivers/media/dvb-frontends/hd29l2_priv.h301
-rw-r--r--drivers/media/dvb-frontends/isl6405.c7
-rw-r--r--drivers/media/dvb-frontends/isl6405.h7
-rw-r--r--drivers/media/dvb-frontends/isl6421.c7
-rw-r--r--drivers/media/dvb-frontends/isl6421.h7
-rw-r--r--drivers/media/dvb-frontends/itd1000.c4
-rw-r--r--drivers/media/dvb-frontends/itd1000.h4
-rw-r--r--drivers/media/dvb-frontends/itd1000_priv.h4
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c4
-rw-r--r--drivers/media/dvb-frontends/ix2505v.h4
-rw-r--r--drivers/media/dvb-frontends/lg2160.c4
-rw-r--r--drivers/media/dvb-frontends/lg2160.h4
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.h4
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c108
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.h4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.h4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x_priv.h4
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c4
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.h4
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx_priv.h4
-rw-r--r--drivers/media/dvb-frontends/lnbh24.h4
-rw-r--r--drivers/media/dvb-frontends/lnbp21.c7
-rw-r--r--drivers/media/dvb-frontends/lnbp21.h7
-rw-r--r--drivers/media/dvb-frontends/lnbp22.c7
-rw-r--r--drivers/media/dvb-frontends/lnbp22.h7
-rw-r--r--drivers/media/dvb-frontends/mn88473.c10
-rw-r--r--drivers/media/dvb-frontends/mt352.c4
-rw-r--r--drivers/media/dvb-frontends/mt352.h4
-rw-r--r--drivers/media/dvb-frontends/mt352_priv.h4
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c4
-rw-r--r--drivers/media/dvb-frontends/nxt200x.h4
-rw-r--r--drivers/media/dvb-frontends/or51132.c4
-rw-r--r--drivers/media/dvb-frontends/or51132.h4
-rw-r--r--drivers/media/dvb-frontends/or51211.c4
-rw-r--r--drivers/media/dvb-frontends/or51211.h4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c4
-rw-r--r--drivers/media/dvb-frontends/s5h1420.h4
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c4
-rw-r--r--drivers/media/dvb-frontends/s5h1432.h4
-rw-r--r--drivers/media/dvb-frontends/si2168.c70
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h1
-rw-r--r--drivers/media/dvb-frontends/stv0367.c4
-rw-r--r--drivers/media/dvb-frontends/stv0367.h4
-rw-r--r--drivers/media/dvb-frontends/stv0367_priv.h4
-rw-r--r--drivers/media/dvb-frontends/stv0367_regs.h4
-rw-r--r--drivers/media/dvb-frontends/stv0900.h4
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c4
-rw-r--r--drivers/media/dvb-frontends/stv0900_init.h4
-rw-r--r--drivers/media/dvb-frontends/stv0900_priv.h4
-rw-r--r--drivers/media/dvb-frontends/stv0900_reg.h4
-rw-r--r--drivers/media/dvb-frontends/stv0900_sw.c4
-rw-r--r--drivers/media/dvb-frontends/stv6110.c4
-rw-r--r--drivers/media/dvb-frontends/stv6110.h4
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c8
-rw-r--r--drivers/media/dvb-frontends/tdhd1.h7
-rw-r--r--drivers/media/dvb-frontends/tua6100.c4
-rw-r--r--drivers/media/dvb-frontends/tua6100.h4
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c551
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.h73
-rw-r--r--drivers/media/dvb-frontends/zl10036.c4
-rw-r--r--drivers/media/dvb-frontends/zl10036.h4
-rw-r--r--drivers/media/dvb-frontends/zl10039.c4
-rw-r--r--drivers/media/dvb-frontends/zl10353.c4
-rw-r--r--drivers/media/dvb-frontends/zl10353.h4
-rw-r--r--drivers/media/dvb-frontends/zl10353_priv.h4
-rw-r--r--drivers/media/i2c/Kconfig2
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adp1653.c5
-rw-r--r--drivers/media/i2c/adv7170.c9
-rw-r--r--drivers/media/i2c/adv7175.c4
-rw-r--r--drivers/media/i2c/adv7180.c4
-rw-r--r--drivers/media/i2c/adv7183.c4
-rw-r--r--drivers/media/i2c/adv7183_regs.h4
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/i2c/ak881x.c6
-rw-r--r--drivers/media/i2c/aptina-pll.c5
-rw-r--r--drivers/media/i2c/aptina-pll.h5
-rw-r--r--drivers/media/i2c/as3645a.c5
-rw-r--r--drivers/media/i2c/bt819.c4
-rw-r--r--drivers/media/i2c/bt856.c4
-rw-r--r--drivers/media/i2c/cs5345.c4
-rw-r--r--drivers/media/i2c/cs53l32a.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-audio.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.h4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-firmware.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c5
-rw-r--r--drivers/media/i2c/cx25840/cx25840-vbi.c4
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig6
-rw-r--r--drivers/media/i2c/et8ek8/Makefile2
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c1514
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c587
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_reg.h96
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c6
-rw-r--r--drivers/media/i2c/ks0127.c4
-rw-r--r--drivers/media/i2c/ks0127.h4
-rw-r--r--drivers/media/i2c/m52790.c4
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c7
-rw-r--r--drivers/media/i2c/ml86v7667.c6
-rw-r--r--drivers/media/i2c/msp3400-driver.c5
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c5
-rw-r--r--drivers/media/i2c/mt9m032.c5
-rw-r--r--drivers/media/i2c/mt9p031.c8
-rw-r--r--drivers/media/i2c/mt9v032.c11
-rw-r--r--drivers/media/i2c/noon010pc30.c4
-rw-r--r--drivers/media/i2c/ov2659.c1
-rw-r--r--drivers/media/i2c/ov7640.c4
-rw-r--r--drivers/media/i2c/ov9650.c4
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c2
-rw-r--r--drivers/media/i2c/s5k6a3.c6
-rw-r--r--drivers/media/i2c/saa7110.c4
-rw-r--r--drivers/media/i2c/saa7115.c4
-rw-r--r--drivers/media/i2c/saa7127.c4
-rw-r--r--drivers/media/i2c/saa717x.c4
-rw-r--r--drivers/media/i2c/saa7185.c4
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c33
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c2
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c4
-rw-r--r--drivers/media/i2c/tc358743.c47
-rw-r--r--drivers/media/i2c/tc358743_regs.h1
-rw-r--r--drivers/media/i2c/tlv320aic23b.c4
-rw-r--r--drivers/media/i2c/tvp514x.c4
-rw-r--r--drivers/media/i2c/tvp514x_regs.h4
-rw-r--r--drivers/media/i2c/tvp5150.c56
-rw-r--r--drivers/media/i2c/tvp5150_reg.h9
-rw-r--r--drivers/media/i2c/tvp7002.c4
-rw-r--r--drivers/media/i2c/tvp7002_reg.h4
-rw-r--r--drivers/media/i2c/tw2804.c4
-rw-r--r--drivers/media/i2c/tw9903.c4
-rw-r--r--drivers/media/i2c/tw9906.c4
-rw-r--r--drivers/media/i2c/uda1342.c4
-rw-r--r--drivers/media/i2c/upd64031a.c4
-rw-r--r--drivers/media/i2c/upd64083.c5
-rw-r--r--drivers/media/i2c/vp27smpx.c4
-rw-r--r--drivers/media/i2c/vpx3220.c4
-rw-r--r--drivers/media/i2c/vs6624.c4
-rw-r--r--drivers/media/i2c/vs6624_regs.h4
-rw-r--r--drivers/media/i2c/wm8739.c4
-rw-r--r--drivers/media/i2c/wm8775.c4
-rw-r--r--drivers/media/media-device.c14
-rw-r--r--drivers/media/media-devnode.c4
-rw-r--r--drivers/media/media-entity.c166
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c6
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c11
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c5
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.h4
-rw-r--r--drivers/media/pci/cobalt/cobalt-cpld.c4
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c5
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-mixer.c5
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-mixer.h5
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c5
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.h5
-rw-r--r--drivers/media/pci/cx18/cx18-alsa.h5
-rw-r--r--drivers/media/pci/cx18/cx18-audio.c5
-rw-r--r--drivers/media/pci/cx18/cx18-audio.h5
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c5
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c5
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.h5
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c5
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c5
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c5
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h4
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c5
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c5
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h5
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.c4
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.h4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c5
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h5
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c5
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.h5
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c5
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h4
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c5
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.h5
-rw-r--r--drivers/media/pci/cx18/cx18-io.c5
-rw-r--r--drivers/media/pci/cx18/cx18-io.h5
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c5
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h5
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c5
-rw-r--r--drivers/media/pci/cx18/cx18-irq.h5
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c5
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.h5
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c5
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h5
-rw-r--r--drivers/media/pci/cx18/cx18-scb.c5
-rw-r--r--drivers/media/pci/cx18/cx18-scb.h5
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c5
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h5
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c5
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.h5
-rw-r--r--drivers/media/pci/cx18/cx18-version.h5
-rw-r--r--drivers/media/pci/cx18/cx18-video.c5
-rw-r--r--drivers/media/pci/cx18/cx18-video.h5
-rw-r--r--drivers/media/pci/cx18/cx23418.h5
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c54
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c25
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-biffuncs.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-cards.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-gpio.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-defines.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-reg.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-video.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-medusa-video.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-reg.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-sram.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821.h4
-rw-r--r--drivers/media/pci/cx88/cx88-input.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c8
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h8
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h8
-rw-r--r--drivers/media/pci/dm1105/Kconfig2
-rw-r--r--drivers/media/pci/dm1105/dm1105.c7
-rw-r--r--drivers/media/pci/ivtv/Kconfig13
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c31
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-mixer.c18
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-mixer.h5
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c21
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.h5
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa.h5
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c12
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h37
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c49
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c4
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c23
-rw-r--r--drivers/media/pci/mantis/mantis_dvb.c5
-rw-r--r--drivers/media/pci/mantis/mantis_input.c2
-rw-r--r--drivers/media/pci/meye/meye.c5
-rw-r--r--drivers/media/pci/meye/meye.h4
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c15
-rw-r--r--drivers/media/pci/ngene/ngene-core.c8
-rw-r--r--drivers/media/pci/ngene/ngene-dvb.c8
-rw-r--r--drivers/media/pci/ngene/ngene-i2c.c8
-rw-r--r--drivers/media/pci/ngene/ngene.h8
-rw-r--r--drivers/media/pci/pluto2/pluto2.c4
-rw-r--r--drivers/media/pci/pt1/pt1.c4
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007s.c4
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007s.h4
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007t.c4
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007t.h4
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134.h4
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-dvb.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-reg.h4
-rw-r--r--drivers/media/pci/saa7164/saa7164-types.h4
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164.h4
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.h4
-rw-r--r--drivers/media/pci/ttpci/av7110.c7
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c7
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c7
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c15
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.h12
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c7
-rw-r--r--drivers/media/pci/ttpci/av7110_v4l.c7
-rw-r--r--drivers/media/pci/ttpci/budget-av.c7
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c9
-rw-r--r--drivers/media/pci/ttpci/budget-core.c7
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c7
-rw-r--r--drivers/media/pci/ttpci/budget.c7
-rw-r--r--drivers/media/pci/ttpci/dvb_filter.h4
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c2
-rw-r--r--drivers/media/pci/zoran/videocodec.c4
-rw-r--r--drivers/media/pci/zoran/videocodec.h4
-rw-r--r--drivers/media/pci/zoran/zoran.h4
-rw-r--r--drivers/media/pci/zoran/zoran_card.c4
-rw-r--r--drivers/media/pci/zoran/zoran_card.h4
-rw-r--r--drivers/media/pci/zoran/zoran_device.c4
-rw-r--r--drivers/media/pci/zoran/zoran_device.h4
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c5
-rw-r--r--drivers/media/pci/zoran/zoran_procfs.c4
-rw-r--r--drivers/media/pci/zoran/zoran_procfs.h4
-rw-r--r--drivers/media/pci/zoran/zr36016.c4
-rw-r--r--drivers/media/pci/zoran/zr36016.h4
-rw-r--r--drivers/media/pci/zoran/zr36050.c4
-rw-r--r--drivers/media/pci/zoran/zr36050.h4
-rw-r--r--drivers/media/pci/zoran/zr36057.h4
-rw-r--r--drivers/media/pci/zoran/zr36060.c4
-rw-r--r--drivers/media/pci/zoran/zr36060.h4
-rw-r--r--drivers/media/platform/Kconfig53
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c4
-rw-r--r--drivers/media/platform/blackfin/ppi.c4
-rw-r--r--drivers/media/platform/coda/Makefile1
-rw-r--r--drivers/media/platform/coda/coda-bit.c93
-rw-r--r--drivers/media/platform/coda/coda-common.c181
-rw-r--r--drivers/media/platform/coda/coda.h5
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c338
-rw-r--r--drivers/media/platform/coda/imx-vdoa.h58
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h4
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c4
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc_regs.h4
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c4
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc_regs.h4
-rw-r--r--drivers/media/platform/davinci/isif.c4
-rw-r--r--drivers/media/platform/davinci/isif_regs.h4
-rw-r--r--drivers/media/platform/davinci/vpbe.c4
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c4
-rw-r--r--drivers/media/platform/davinci/vpbe_osd_regs.h4
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c4
-rw-r--r--drivers/media/platform/davinci/vpbe_venc_regs.h4
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c6
-rw-r--r--drivers/media/platform/davinci/vpif.c14
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c28
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h6
-rw-r--r--drivers/media/platform/davinci/vpif_display.c6
-rw-r--r--drivers/media/platform/davinci/vpss.c4
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c3
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c12
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c9
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c20
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h2
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c160
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c14
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c5
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.c4
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c18
-rw-r--r--drivers/media/platform/rcar_fdp1.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c8
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c6
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c2
-rw-r--r--drivers/media/platform/sti/delta/Makefile6
-rw-r--r--drivers/media/platform/sti/delta/delta-cfg.h64
-rw-r--r--drivers/media/platform/sti/delta/delta-debug.c72
-rw-r--r--drivers/media/platform/sti/delta/delta-debug.h18
-rw-r--r--drivers/media/platform/sti/delta/delta-ipc.c594
-rw-r--r--drivers/media/platform/sti/delta/delta-ipc.h76
-rw-r--r--drivers/media/platform/sti/delta/delta-mem.c51
-rw-r--r--drivers/media/platform/sti/delta/delta-mem.h14
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg-dec.c455
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg-fw.h225
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg-hdr.c149
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg.h35
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c1993
-rw-r--r--drivers/media/platform/sti/delta/delta.h566
-rw-r--r--drivers/media/platform/sti/hva/Makefile1
-rw-r--r--drivers/media/platform/sti/hva/hva-debugfs.c422
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c6
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c48
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.h3
-rw-r--r--drivers/media/platform/sti/hva/hva-mem.c5
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c78
-rw-r--r--drivers/media/platform/sti/hva/hva.h96
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c2
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c5
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c17
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c16
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c8
-rw-r--r--drivers/media/radio/dsbr100.c4
-rw-r--r--drivers/media/radio/radio-cadet.c8
-rw-r--r--drivers/media/radio/radio-isa.c5
-rw-r--r--drivers/media/radio/radio-isa.h5
-rw-r--r--drivers/media/radio/radio-keene.c4
-rw-r--r--drivers/media/radio/radio-ma901.c4
-rw-r--r--drivers/media/radio/radio-mr800.c4
-rw-r--r--drivers/media/radio/radio-shark.c4
-rw-r--r--drivers/media/radio/radio-shark2.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c4
-rw-r--r--drivers/media/radio/radio-tea5777.c4
-rw-r--r--drivers/media/radio/radio-tea5777.h4
-rw-r--r--drivers/media/radio/radio-timb.c4
-rw-r--r--drivers/media/radio/radio-wl1273.c4
-rw-r--r--drivers/media/radio/saa7706h.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h4
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c4
-rw-r--r--drivers/media/radio/si4713/si4713.c4
-rw-r--r--drivers/media/radio/tef6862.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.h4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.h4
-rw-r--r--drivers/media/rc/Kconfig22
-rw-r--r--drivers/media/rc/Makefile2
-rw-r--r--drivers/media/rc/ati_remote.c7
-rw-r--r--drivers/media/rc/ene_ir.c10
-rw-r--r--drivers/media/rc/ene_ir.h5
-rw-r--r--drivers/media/rc/fintek-cir.c10
-rw-r--r--drivers/media/rc/fintek-cir.h5
-rw-r--r--drivers/media/rc/gpio-ir-recv.c5
-rw-r--r--drivers/media/rc/igorplugusb.c7
-rw-r--r--drivers/media/rc/iguanair.c13
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c15
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c21
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c3
-rw-r--r--drivers/media/rc/img-ir/img-ir-sony.c26
-rw-r--r--drivers/media/rc/imon.c138
-rw-r--r--drivers/media/rc/ir-hix5hd2.c5
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c39
-rw-r--r--drivers/media/rc/ir-lirc-codec.c17
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c2
-rw-r--r--drivers/media/rc/ir-nec-decoder.c86
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c105
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c117
-rw-r--r--drivers/media/rc/ir-rx51.c332
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c43
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c50
-rw-r--r--drivers/media/rc/ir-sony-decoder.c48
-rw-r--r--drivers/media/rc/ir-spi.c199
-rw-r--r--drivers/media/rc/ite-cir.c10
-rw-r--r--drivers/media/rc/ite-cir.h5
-rw-r--r--drivers/media/rc/keymaps/Makefile4
-rw-r--r--drivers/media/rc/keymaps/rc-d680-dmb.c75
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-mce.c85
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-portable.c76
-rw-r--r--drivers/media/rc/keymaps/rc-geekbox.c55
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c1
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c4
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c86
-rw-r--r--drivers/media/rc/lirc_dev.c13
-rw-r--r--drivers/media/rc/mceusb.c13
-rw-r--r--drivers/media/rc/meson-ir.c5
-rw-r--r--drivers/media/rc/mtk-cir.c335
-rw-r--r--drivers/media/rc/nuvoton-cir.c130
-rw-r--r--drivers/media/rc/nuvoton-cir.h5
-rw-r--r--drivers/media/rc/rc-core-priv.h109
-rw-r--r--drivers/media/rc/rc-ir-raw.c308
-rw-r--r--drivers/media/rc/rc-loopback.c48
-rw-r--r--drivers/media/rc/rc-main.c527
-rw-r--r--drivers/media/rc/redrat3.c9
-rw-r--r--drivers/media/rc/serial_ir.c29
-rw-r--r--drivers/media/rc/st_rc.c5
-rw-r--r--drivers/media/rc/streamzap.c9
-rw-r--r--drivers/media/rc/sunxi-cir.c5
-rw-r--r--drivers/media/rc/ttusbir.c14
-rw-r--r--drivers/media/rc/winbond-cir.c266
-rw-r--r--drivers/media/tuners/fc0011.c4
-rw-r--r--drivers/media/tuners/fc0012-priv.h4
-rw-r--r--drivers/media/tuners/fc0012.c4
-rw-r--r--drivers/media/tuners/fc0012.h4
-rw-r--r--drivers/media/tuners/fc0013-priv.h4
-rw-r--r--drivers/media/tuners/fc0013.c4
-rw-r--r--drivers/media/tuners/fc0013.h4
-rw-r--r--drivers/media/tuners/fc001x-common.h4
-rw-r--r--drivers/media/tuners/it913x.c96
-rw-r--r--drivers/media/tuners/it913x.h30
-rw-r--r--drivers/media/tuners/max2165.c4
-rw-r--r--drivers/media/tuners/max2165.h4
-rw-r--r--drivers/media/tuners/max2165_priv.h4
-rw-r--r--drivers/media/tuners/mc44s803.c4
-rw-r--r--drivers/media/tuners/mc44s803.h4
-rw-r--r--drivers/media/tuners/mc44s803_priv.h4
-rw-r--r--drivers/media/tuners/mt2060.c129
-rw-r--r--drivers/media/tuners/mt2060.h27
-rw-r--r--drivers/media/tuners/mt2060_priv.h15
-rw-r--r--drivers/media/tuners/mt2131.c4
-rw-r--r--drivers/media/tuners/mt2131.h4
-rw-r--r--drivers/media/tuners/mt2131_priv.h4
-rw-r--r--drivers/media/tuners/mxl5007t.c4
-rw-r--r--drivers/media/tuners/mxl5007t.h4
-rw-r--r--drivers/media/tuners/qt1010.c4
-rw-r--r--drivers/media/tuners/qt1010.h4
-rw-r--r--drivers/media/tuners/qt1010_priv.h4
-rw-r--r--drivers/media/tuners/tda18218.c4
-rw-r--r--drivers/media/tuners/tda18218.h4
-rw-r--r--drivers/media/tuners/tda18218_priv.h4
-rw-r--r--drivers/media/tuners/tda827x.c4
-rw-r--r--drivers/media/tuners/xc4000.c4
-rw-r--r--drivers/media/tuners/xc4000.h4
-rw-r--r--drivers/media/tuners/xc5000.c4
-rw-r--r--drivers/media/tuners/xc5000.h4
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-cards.h4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c29
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c4
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c4
-rw-r--r--drivers/media/usb/au0828/au0828-input.c3
-rw-r--r--drivers/media/usb/au0828/au0828-reg.h4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c5
-rw-r--r--drivers/media/usb/au0828/au0828.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2_registers.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c8
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/cx231xx/Kconfig1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c29
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c7
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dif.h4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c70
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig8
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c267
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h7
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/au6610.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/au6610.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c22
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/zd1301.c298
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005-remote.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005.h4
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c4
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2.h4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c327
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c7
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c4
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.h4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c19
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c3
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c133
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c19
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c74
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c15
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/autogain_functions.c4
-rw-r--r--drivers/media/usb/gspca/benq.c4
-rw-r--r--drivers/media/usb/gspca/conex.c4
-rw-r--r--drivers/media/usb/gspca/cpia1.c4
-rw-r--r--drivers/media/usb/gspca/etoms.c4
-rw-r--r--drivers/media/usb/gspca/finepix.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c4
-rw-r--r--drivers/media/usb/gspca/jeilinj.c4
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c4
-rw-r--r--drivers/media/usb/gspca/jpeg.h4
-rw-r--r--drivers/media/usb/gspca/kinect.c4
-rw-r--r--drivers/media/usb/gspca/konica.c4
-rw-r--r--drivers/media/usb/gspca/mars.c4
-rw-r--r--drivers/media/usb/gspca/mr97310a.c4
-rw-r--r--drivers/media/usb/gspca/nw80x.c4
-rw-r--r--drivers/media/usb/gspca/ov519.c4
-rw-r--r--drivers/media/usb/gspca/ov534.c4
-rw-r--r--drivers/media/usb/gspca/ov534_9.c4
-rw-r--r--drivers/media/usb/gspca/pac207.c4
-rw-r--r--drivers/media/usb/gspca/pac7302.c4
-rw-r--r--drivers/media/usb/gspca/pac7311.c4
-rw-r--r--drivers/media/usb/gspca/pac_common.h4
-rw-r--r--drivers/media/usb/gspca/se401.c4
-rw-r--r--drivers/media/usb/gspca/se401.h4
-rw-r--r--drivers/media/usb/gspca/sn9c2028.c4
-rw-r--r--drivers/media/usb/gspca/sn9c2028.h4
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c4
-rw-r--r--drivers/media/usb/gspca/sonixb.c4
-rw-r--r--drivers/media/usb/gspca/sonixj.c4
-rw-r--r--drivers/media/usb/gspca/spca1528.c4
-rw-r--r--drivers/media/usb/gspca/spca500.c4
-rw-r--r--drivers/media/usb/gspca/spca501.c4
-rw-r--r--drivers/media/usb/gspca/spca505.c4
-rw-r--r--drivers/media/usb/gspca/spca506.c4
-rw-r--r--drivers/media/usb/gspca/spca508.c4
-rw-r--r--drivers/media/usb/gspca/spca561.c4
-rw-r--r--drivers/media/usb/gspca/sq905.c4
-rw-r--r--drivers/media/usb/gspca/sq905c.c4
-rw-r--r--drivers/media/usb/gspca/sq930x.c4
-rw-r--r--drivers/media/usb/gspca/stk014.c4
-rw-r--r--drivers/media/usb/gspca/stk1135.c4
-rw-r--r--drivers/media/usb/gspca/stk1135.h4
-rw-r--r--drivers/media/usb/gspca/stv0680.c4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.h4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.h4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.h4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_sensor.h4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_st6422.h4
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c7
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.h4
-rw-r--r--drivers/media/usb/gspca/sunplus.c4
-rw-r--r--drivers/media/usb/gspca/t613.c4
-rw-r--r--drivers/media/usb/gspca/tv8532.c4
-rw-r--r--drivers/media/usb/gspca/vc032x.c4
-rw-r--r--drivers/media/usb/gspca/vicam.c4
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c4
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c4
-rw-r--r--drivers/media/usb/gspca/zc3xx.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-audio.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-audio.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ctrl.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ctrl.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debug.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debugifc.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debugifc.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-devattr.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-devattr.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c127
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ioread.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ioread.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-main.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-util.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-wm8775.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-wm8775.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2.h4
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/siano/smsusb.c18
-rw-r--r--drivers/media/usb/stk1160/Kconfig10
-rw-r--r--drivers/media/usb/stk1160/Makefile4
-rw-r--r--drivers/media/usb/stk1160/stk1160-ac97.c183
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c8
-rw-r--r--drivers/media/usb/stk1160/stk1160-reg.h10
-rw-r--r--drivers/media/usb/stk1160/stk1160.h11
-rw-r--r--drivers/media/usb/stkwebcam/stk-sensor.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.h4
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-regs.h4
-rw-r--r--drivers/media/usb/tm6000/tm6000-stds.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-usb-isoc.h4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000.h4
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c4
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c4
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.h4
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c8
-rw-r--r--drivers/media/usb/usbvision/usbvision-cards.c4
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c6
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c4
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c5
-rw-r--r--drivers/media/usb/usbvision/usbvision.h5
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c15
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c13
-rw-r--r--drivers/media/usb/uvc/uvc_video.c3
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h4
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c26
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c44
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c4
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/memstick/core/ms_block.c11
-rw-r--r--drivers/memstick/core/mspro_block.c13
-rw-r--r--drivers/message/fusion/mptfc.c1
-rw-r--r--drivers/message/fusion/mptlan.h1
-rw-r--r--drivers/message/fusion/mptsas.c10
-rw-r--r--drivers/mfd/lpc_ich.c131
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/lkdtm.h8
-rw-r--r--drivers/misc/lkdtm_bugs.c87
-rw-r--r--drivers/misc/lkdtm_core.c8
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/mmc/core/Kconfig10
-rw-r--r--drivers/mmc/core/Makefile3
-rw-r--r--drivers/mmc/core/block.c413
-rw-r--r--drivers/mmc/core/block.h10
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/bus.h16
-rw-r--r--drivers/mmc/core/card.h221
-rw-r--r--drivers/mmc/core/core.c116
-rw-r--r--drivers/mmc/core/core.h45
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/host.c24
-rw-r--r--drivers/mmc/core/host.h48
-rw-r--r--drivers/mmc/core/mmc.c80
-rw-r--r--drivers/mmc/core/mmc_ops.c69
-rw-r--r--drivers/mmc/core/mmc_ops.h14
-rw-r--r--drivers/mmc/core/mmc_test.c116
-rw-r--r--drivers/mmc/core/pwrseq.h6
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c117
-rw-r--r--drivers/mmc/core/queue.c25
-rw-r--r--drivers/mmc/core/queue.h13
-rw-r--r--drivers/mmc/core/quirks.c83
-rw-r--r--drivers/mmc/core/quirks.h148
-rw-r--r--drivers/mmc/core/sd.c5
-rw-r--r--drivers/mmc/core/sd.h5
-rw-r--r--drivers/mmc/core/sd_ops.c30
-rw-r--r--drivers/mmc/core/sd_ops.h9
-rw-r--r--drivers/mmc/core/sdio.c46
-rw-r--r--drivers/mmc/core/sdio_bus.c1
-rw-r--r--drivers/mmc/core/sdio_bus.h3
-rw-r--r--drivers/mmc/core/sdio_cis.h3
-rw-r--r--drivers/mmc/core/sdio_io.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c2
-rw-r--r--drivers/mmc/core/sdio_ops.c10
-rw-r--r--drivers/mmc/core/sdio_ops.h5
-rw-r--r--drivers/mmc/core/slot-gpio.c6
-rw-r--r--drivers/mmc/core/slot-gpio.h2
-rw-r--r--drivers/mmc/host/Kconfig9
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/davinci_mmc.c1
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c1
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c1
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c1
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c1
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c1
-rw-r--r--drivers/mmc/host/dw_mmc-zx.c241
-rw-r--r--drivers/mmc/host/dw_mmc-zx.h31
-rw-r--r--drivers/mmc/host/dw_mmc.c37
-rw-r--r--drivers/mmc/host/dw_mmc.h263
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c126
-rw-r--r--drivers/mmc/host/mmci.c39
-rw-r--r--drivers/mmc/host/mmci.h3
-rw-r--r--drivers/mmc/host/mtk-sd.c8
-rw-r--r--drivers/mmc/host/mxs-mmc.c22
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c29
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c8
-rw-r--r--drivers/mmc/host/sdhci-cadence.c3
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h44
-rw-r--r--drivers/mmc/host/sdhci-iproc.c11
-rw-r--r--drivers/mmc/host/sdhci-msm.c377
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c39
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c97
-rw-r--r--drivers/mmc/host/sdhci-pci.h1
-rw-r--r--drivers/mmc/host/sdhci-s3c-regs.h87
-rw-r--r--drivers/mmc/host/sdhci-s3c.c71
-rw-r--r--drivers/mmc/host/sdhci.c13
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sh_mmcif.c28
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c95
-rw-r--r--drivers/mmc/host/sunxi-mmc.c114
-rw-r--r--drivers/mmc/host/tmio_mmc.h3
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c61
-rw-r--r--drivers/mmc/host/via-sdmmc.c1
-rw-r--r--drivers/mmc/host/vub300.c8
-rw-r--r--drivers/mmc/host/wbsd.c7
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c1
-rw-r--r--drivers/mtd/bcm47xxpart.c161
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c30
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h3
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h7
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c28
-rw-r--r--drivers/mtd/maps/Kconfig12
-rw-r--r--drivers/mtd/maps/Makefile7
-rw-r--r--drivers/mtd/maps/ichxrom.c6
-rw-r--r--drivers/mtd/maps/lantiq-flash.c4
-rw-r--r--drivers/mtd/maps/physmap_of.c9
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.c117
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.h16
-rw-r--r--drivers/mtd/maps/physmap_of_versatile.c1
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c13
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/mtdcore.c6
-rw-r--r--drivers/mtd/mtdpart.c11
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c8
-rw-r--r--drivers/mtd/nand/fsmc_nand.c153
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c9
-rw-r--r--drivers/mtd/nand/mtk_nand.c1
-rw-r--r--drivers/mtd/nand/nand_base.c40
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/sunxi_nand.c36
-rw-r--r--drivers/mtd/nand/tango_nand.c4
-rw-r--r--drivers/mtd/nand/xway_nand.c7
-rw-r--r--drivers/mtd/ofpart.c1
-rw-r--r--drivers/mtd/spi-nor/Kconfig32
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c754
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c10
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c48
-rw-r--r--drivers/mtd/spi-nor/intel-spi-platform.c57
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c777
-rw-r--r--drivers/mtd/spi-nor/intel-spi.h24
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c275
-rw-r--r--drivers/mtd/ubi/block.c15
-rw-r--r--drivers/net/Kconfig26
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bonding/bond_main.c22
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/dev.c136
-rw-r--r--drivers/net/can/flexcan.c419
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/can/rx-offload.c289
-rw-r--r--drivers/net/can/softing/softing_cs.c2
-rw-r--r--drivers/net/can/ti_hecc.c16
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/b53/b53_common.c178
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c13
-rw-r--r--drivers/net/dsa/b53/b53_priv.h66
-rw-r--r--drivers/net/dsa/b53/b53_regs.h32
-rw-r--r--drivers/net/dsa/bcm_sf2.c247
-rw-r--r--drivers/net/dsa/bcm_sf2.h58
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c613
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h197
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c579
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c352
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h39
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h129
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c114
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h9
-rw-r--r--drivers/net/dsa/qca8k.c21
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/dummy.c222
-rw-r--r--drivers/net/ethernet/3com/typhoon.c27
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c45
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c6
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h20
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c41
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c190
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h9
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c164
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c195
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c57
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig24
-rw-r--r--drivers/net/ethernet/aquantia/Makefile5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h77
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c262
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h177
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c68
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c239
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c990
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h110
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c292
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c326
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h153
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_rss.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h49
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c396
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c905
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h155
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c958
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h207
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1394
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h677
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h2375
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c570
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h210
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h18
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c20
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c359
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h80
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c108
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c38
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c199
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1039
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h194
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c178
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h724
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c240
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h19
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.c222
-rw-r--r--drivers/net/ethernet/cadence/macb.h95
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c74
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c21
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c49
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c41
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c10
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c19
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h16
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c214
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h127
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c109
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c66
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c149
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c29
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c360
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c34
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h51
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h78
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c91
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c41
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c14
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c71
-rw-r--r--drivers/net/ethernet/dlink/sundance.c14
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c73
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c201
-rw-r--r--drivers/net/ethernet/ethoc.c21
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c14
-rw-r--r--drivers/net/ethernet/fealnx.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c18
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c18
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c25
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c11
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c10
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c34
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c51
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c70
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c39
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c179
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h5
-rw-r--r--drivers/net/ethernet/intel/e100.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c387
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c287
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c20
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c279
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c46
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h177
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c115
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c256
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c754
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h90
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c594
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h114
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c136
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c24
-rw-r--r--drivers/net/ethernet/jme.c34
-rw-r--r--drivers/net/ethernet/jme.h6
-rw-r--r--drivers/net/ethernet/korina.c16
-rw-r--r--drivers/net/ethernet/lantiq_etop.c21
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c69
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c224
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c22
-rw-r--r--drivers/net/ethernet/marvell/skge.c71
-rw-r--r--drivers/net/ethernet/marvell/sky2.c80
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c356
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c299
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c679
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c475
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h238
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h554
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c277
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h137
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c572
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h109
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c1084
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c316
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1453
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c102
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c21
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c14
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c62
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c31
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c32
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c231
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c121
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c46
-rw-r--r--drivers/net/ethernet/neterion/s2io.c55
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c47
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c10
-rw-r--r--drivers/net/ethernet/netronome/Kconfig18
-rw-r--r--drivers/net/ethernet/netronome/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c460
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h99
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h73
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c165
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c109
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c586
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c115
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h65
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h110
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c1364
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h246
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h433
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c1746
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c281
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c318
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c174
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c323
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h95
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c426
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c270
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h81
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c279
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c306
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c764
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c14
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c97
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c58
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c14
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c14
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c123
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c130
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h76
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c1014
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.h87
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h813
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c310
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c144
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h57
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c92
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c57
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h39
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c323
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.h47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h71
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c339
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h32
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h87
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c130
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c759
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1700
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2507
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c536
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h65
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c94
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c132
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c36
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/Makefile2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c261
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c114
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.h13
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c183
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h25
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c96
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.h126
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c18
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c11
-rw-r--r--drivers/net/ethernet/realtek/8139too.c17
-rw-r--r--drivers/net/ethernet/realtek/atp.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb.h10
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c166
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c334
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h52
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c10
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1213
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c21
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h3
-rw-r--r--drivers/net/ethernet/sfc/efx.c279
-rw-r--r--drivers/net/ethernet/sfc/efx.h6
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c34
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c53
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c29
-rw-r--r--drivers/net/ethernet/sfc/falcon/mdio_10g.c44
-rw-r--r--drivers/net/ethernet/sfc/falcon/mdio_10g.h3
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h137
-rw-r--r--drivers/net/ethernet/sfc/falcon/qt202x_phy.c9
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/tenxpress.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/txc43128_phy.c9
-rw-r--r--drivers/net/ethernet/sfc/farch.c16
-rw-r--r--drivers/net/ethernet/sfc/filter.h41
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c34
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h21
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h188
-rw-r--r--drivers/net/ethernet/sfc/nic.h27
-rw-r--r--drivers/net/ethernet/sfc/rx.c8
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/siena.c31
-rw-r--r--drivers/net/ethernet/sfc/sriov.c11
-rw-r--r--drivers/net/ethernet/sfc/sriov.h3
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c31
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/stmicro/Kconfig3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c117
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c227
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c132
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h4
-rw-r--r--drivers/net/ethernet/sun/Kconfig8
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c19
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c14
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c119
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig27
-rw-r--r--drivers/net/ethernet/synopsys/Makefile5
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c2998
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c201
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c180
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h17
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c164
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h5
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/netcp.h21
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c104
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c25
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c10
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c10
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c148
-rw-r--r--drivers/net/fddi/skfp/cfm.c22
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c4
-rw-r--r--drivers/net/fddi/skfp/ecm.c34
-rw-r--r--drivers/net/fddi/skfp/ess.c66
-rw-r--r--drivers/net/fddi/skfp/fplustm.c24
-rw-r--r--drivers/net/fddi/skfp/h/cmtdef.h67
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h24
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c178
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c83
-rw-r--r--drivers/net/fddi/skfp/pmf.c4
-rw-r--r--drivers/net/fddi/skfp/rmt.c40
-rw-r--r--drivers/net/fddi/skfp/smt.c109
-rw-r--r--drivers/net/fddi/skfp/srf.c14
-rw-r--r--drivers/net/fjes/fjes_main.c9
-rw-r--r--drivers/net/gtp.c37
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hyperv/hyperv_net.h216
-rw-r--r--drivers/net/hyperv/netvsc.c324
-rw-r--r--drivers/net/hyperv/netvsc_drv.c582
-rw-r--r--drivers/net/hyperv/rndis_filter.c338
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/atusb.c59
-rw-r--r--drivers/net/ifb.c22
-rw-r--r--drivers/net/ipvlan/Makefile1
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c135
-rw-r--r--drivers/net/ipvlan/ipvtap.c241
-rw-r--r--drivers/net/irda/au1k_ir.c8
-rw-r--r--drivers/net/irda/bfin_sir.c5
-rw-r--r--drivers/net/irda/sh_sir.c1
-rw-r--r--drivers/net/loopback.c6
-rw-r--r--drivers/net/macsec.c11
-rw-r--r--drivers/net/macvlan.c9
-rw-r--r--drivers/net/macvtap.c1229
-rw-r--r--drivers/net/mdio.c178
-rw-r--r--drivers/net/nlmon.c4
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/bcm63xx.c21
-rw-r--r--drivers/net/phy/bcm7xxx.c38
-rw-r--r--drivers/net/phy/broadcom.c103
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/dp83867.c69
-rw-r--r--drivers/net/phy/marvell.c467
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c6
-rw-r--r--drivers/net/phy/mdio-boardinfo.c86
-rw-r--r--drivers/net/phy/mdio-boardinfo.h19
-rw-r--r--drivers/net/phy/mdio-gpio.c60
-rw-r--r--drivers/net/phy/mdio-xgene.c50
-rw-r--r--drivers/net/phy/mdio-xgene.h4
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/mdio_device.c13
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/mscc.c85
-rw-r--r--drivers/net/phy/phy.c41
-rw-r--r--drivers/net/phy/phy_device.c29
-rw-r--r--drivers/net/phy/phy_led_triggers.c9
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/slip/slip.c3
-rw-r--r--drivers/net/tap.c1285
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c91
-rw-r--r--drivers/net/usb/catc.c56
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/pegasus.c29
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c45
-rw-r--r--drivers/net/usb/rtl8150.c34
-rw-r--r--drivers/net/usb/sierra_net.c111
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/virtio_net.c533
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c14
-rw-r--r--drivers/net/vxlan.c604
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c3
-rw-r--r--drivers/net/wan/hd64572.c2
-rw-r--r--drivers/net/wan/slic_ds26522.c14
-rw-r--r--drivers/net/wireless/admtek/adm8211.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c93
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c115
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c65
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c35
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c216
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c68
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h32
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c92
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h23
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h85
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c139
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c131
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c165
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c174
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c169
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c69
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h9
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c19
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h31
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c175
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c136
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c87
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c60
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c79
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c75
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h29
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c131
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h67
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c107
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c149
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c407
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c230
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c252
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c236
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c302
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c23
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c27
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c41
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c145
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c332
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c508
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c41
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c15
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h29
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c357
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c49
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c32
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c34
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00lib.h31
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00link.c132
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c24
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c167
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c1082
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c851
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c993
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c916
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c1262
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c52
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h51
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c53
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c71
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c47
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c143
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c116
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c205
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h272
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c105
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c123
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c39
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c46
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c55
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c68
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c95
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c113
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c43
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c64
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c67
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h71
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c14
-rw-r--r--drivers/net/xen-netback/common.h8
-rw-r--r--drivers/net/xen-netback/interface.c16
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/net/xen-netback/xenbus.c21
-rw-r--r--drivers/net/xen-netfront.c60
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c24
-rw-r--r--drivers/ntb/ntb_transport.c5
-rw-r--r--drivers/ntb/test/ntb_perf.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c34
-rw-r--r--drivers/nvdimm/pfn_devs.c7
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c86
-rw-r--r--drivers/nvme/host/fc.c8
-rw-r--r--drivers/nvme/host/lightnvm.c315
-rw-r--r--drivers/nvme/host/nvme.h13
-rw-r--r--drivers/nvme/host/pci.c19
-rw-r--r--drivers/nvme/host/rdma.c6
-rw-r--r--drivers/nvme/host/scsi.c7
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c36
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c17
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/of/fdt.c9
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/parport/parport_gsc.c8
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pcie-designware.c10
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c6
-rw-r--r--drivers/pci/hotplug/pnv_php.c2
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/pci/pcie/aspm.c19
-rw-r--r--drivers/pci/pcie/pme.c12
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pinctrl/Kconfig12
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c1115
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c1524
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c165
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h33
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2.c9
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2cd.c9
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2q.c9
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c11
-rw-r--r--drivers/pinctrl/core.c401
-rw-r--r--drivers/pinctrl/core.h55
-rw-r--r--drivers/pinctrl/devicetree.c31
-rw-r--r--drivers/pinctrl/devicetree.h12
-rw-r--r--drivers/pinctrl/freescale/Kconfig3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c300
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h34
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c79
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c512
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c201
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h8
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c1
-rw-r--r--drivers/pinctrl/mediatek/Kconfig15
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c26
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c34
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c32
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-375.c32
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c32
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-39x.c32
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c199
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c113
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c41
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c180
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.h65
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c16
-rw-r--r--drivers/pinctrl/pinconf.c12
-rw-r--r--drivers/pinctrl/pinconf.h9
-rw-r--r--drivers/pinctrl/pinctrl-amd.c51
-rw-r--r--drivers/pinctrl/pinctrl-amd.h8
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c3
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c2
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h2
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c10
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c2
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c299
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c55
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/pinmux.c216
-rw-r--r--drivers/pinctrl/pinmux.h56
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c48
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c386
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c12
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c132
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h43
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c87
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c450
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c1920
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c4
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c16
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c7
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c12
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c12
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c12
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c12
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c12
-rw-r--r--drivers/pinctrl/stm32/Kconfig5
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c38
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32h743.c1980
-rw-r--r--drivers/pinctrl/sunxi/Kconfig22
-rw-r--r--drivers/pinctrl/sunxi/Makefile7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-gr8.c536
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c558
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c403
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c (renamed from drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c)190
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c184
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c809
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c321
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c82
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h32
-rw-r--r--drivers/pinctrl/ti/Kconfig10
-rw-r--r--drivers/pinctrl/ti/Makefile1
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c937
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c2
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c326
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c6
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/at91-poweroff.c54
-rw-r--r--drivers/power/reset/at91-reset.c18
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c49
-rw-r--r--drivers/power/supply/Kconfig36
-rw-r--r--drivers/power/supply/Makefile4
-rw-r--r--drivers/power/supply/ab8500_btemp.c16
-rw-r--r--drivers/power/supply/axp20x_ac_power.c253
-rw-r--r--drivers/power/supply/axp20x_usb_power.c187
-rw-r--r--drivers/power/supply/axp288_charger.c387
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c539
-rw-r--r--drivers/power/supply/bq2415x_charger.c5
-rw-r--r--drivers/power/supply/bq24190_charger.c188
-rw-r--r--drivers/power/supply/bq24735-charger.c108
-rw-r--r--drivers/power/supply/bq27xxx_battery.c356
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c22
-rw-r--r--drivers/power/supply/gpio-charger.c84
-rw-r--r--drivers/power/supply/intel_mid_battery.c795
-rw-r--r--drivers/power/supply/max14656_charger_detector.c327
-rw-r--r--drivers/power/supply/max8997_charger.c15
-rw-r--r--drivers/power/supply/pcf50633-charger.c13
-rw-r--r--drivers/power/supply/qcom_smbb.c72
-rw-r--r--drivers/power/supply/sbs-charger.c274
-rw-r--r--drivers/power/supply/tps65217_charger.c99
-rw-r--r--drivers/power/supply/wm97xx_battery.c5
-rw-r--r--drivers/ptp/ptp_clock.c22
-rw-r--r--drivers/ptp/ptp_private.h7
-rw-r--r--drivers/ptp/ptp_sysfs.c167
-rw-r--r--drivers/regulator/88pm800.c4
-rw-r--r--drivers/regulator/88pm8607.c4
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/act8945a-regulator.c2
-rw-r--r--drivers/regulator/ad5398.c2
-rw-r--r--drivers/regulator/anatop-regulator.c12
-rw-r--r--drivers/regulator/arizona-ldo1.c4
-rw-r--r--drivers/regulator/arizona-micsupp.c8
-rw-r--r--drivers/regulator/as3711-regulator.c6
-rw-r--r--drivers/regulator/axp20x-regulator.c10
-rw-r--r--drivers/regulator/bcm590xx-regulator.c6
-rw-r--r--drivers/regulator/core.c173
-rw-r--r--drivers/regulator/cpcap-regulator.c464
-rw-r--r--drivers/regulator/devres.c66
-rw-r--r--drivers/regulator/fan53555.c2
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/hi655x-regulator.c4
-rw-r--r--drivers/regulator/internal.h10
-rw-r--r--drivers/regulator/lp8755.c2
-rw-r--r--drivers/regulator/ltc3589.c8
-rw-r--r--drivers/regulator/ltc3676.c6
-rw-r--r--drivers/regulator/max14577-regulator.c6
-rw-r--r--drivers/regulator/max77620-regulator.c2
-rw-r--r--drivers/regulator/max77686-regulator.c8
-rw-r--r--drivers/regulator/max77693-regulator.c2
-rw-r--r--drivers/regulator/max77802-regulator.c10
-rw-r--r--drivers/regulator/max8907-regulator.c10
-rw-r--r--drivers/regulator/max8925-regulator.c4
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/palmas-regulator.c24
-rw-r--r--drivers/regulator/pbias-regulator.c2
-rw-r--r--drivers/regulator/pcap-regulator.c2
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c8
-rw-r--r--drivers/regulator/pv88060-regulator.c4
-rw-r--r--drivers/regulator/pv88080-regulator.c4
-rw-r--r--drivers/regulator/pv88090-regulator.c4
-rw-r--r--drivers/regulator/qcom_smd-regulator.c102
-rw-r--r--drivers/regulator/rc5t583-regulator.c2
-rw-r--r--drivers/regulator/rn5t618-regulator.c2
-rw-r--r--drivers/regulator/s2mpa01.c4
-rw-r--r--drivers/regulator/tps65086-regulator.c10
-rw-r--r--drivers/regulator/tps65217-regulator.c6
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/reset/core.c2
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/rtc-jz4740.c12
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/s390/block/scm_blk.c7
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c135
-rw-r--r--drivers/s390/net/qeth_core_mpc.h17
-rw-r--r--drivers/s390/net/qeth_l2_main.c189
-rw-r--r--drivers/s390/net/qeth_l3_main.c15
-rw-r--r--drivers/s390/net/qeth_l3_sys.c33
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c8
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c64
-rw-r--r--drivers/scsi/NCR5380.h17
-rw-r--r--drivers/scsi/aacraid/aachba.c1288
-rw-r--r--drivers/scsi/aacraid/aacraid.h644
-rw-r--r--drivers/scsi/aacraid/commctrl.c342
-rw-r--r--drivers/scsi/aacraid/comminit.c338
-rw-r--r--drivers/scsi/aacraid/commsup.c964
-rw-r--r--drivers/scsi/aacraid/dpcsup.c159
-rw-r--r--drivers/scsi/aacraid/linit.c562
-rw-r--r--drivers/scsi/aacraid/nark.c3
-rw-r--r--drivers/scsi/aacraid/rkt.c5
-rw-r--r--drivers/scsi/aacraid/rx.c17
-rw-r--r--drivers/scsi/aacraid/sa.c9
-rw-r--r--drivers/scsi/aacraid/src.c336
-rw-r--r--drivers/scsi/atari_scsi.c36
-rw-r--r--drivers/scsi/be2iscsi/be.h3
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c41
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h17
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c165
-rw-r--r--drivers/scsi/be2iscsi/be_main.c345
-rw-r--r--drivers/scsi/be2iscsi/be_main.h44
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c117
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h98
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c181
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h4
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c8
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c1
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h4
-rw-r--r--drivers/scsi/cxlflash/common.h32
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c31
-rw-r--r--drivers/scsi/cxlflash/main.c465
-rw-r--r--drivers/scsi/cxlflash/sislite.h19
-rw-r--r--drivers/scsi/cxlflash/superpipe.c183
-rw-r--r--drivers/scsi/cxlflash/vlun.c169
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c247
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c222
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c174
-rw-r--r--drivers/scsi/dpt_i2o.c8
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_log.h4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c14
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/g_NCR5380.c45
-rw-r--r--drivers/scsi/g_NCR5380.h56
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c23
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c135
-rw-r--r--drivers/scsi/hosts.c24
-rw-r--r--drivers/scsi/hpsa.c12
-rw-r--r--drivers/scsi/hpsa.h40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c8
-rw-r--r--drivers/scsi/iscsi_tcp.c1
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libiscsi.c5
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c38
-rw-r--r--drivers/scsi/libsas/sas_init.c1
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c7
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
-rw-r--r--drivers/scsi/mac_scsi.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h199
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c648
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c468
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c1334
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h412
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c20
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h19
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c114
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvumi.c6
-rw-r--r--drivers/scsi/osd/osd_initiator.c22
-rw-r--r--drivers/scsi/osst.c18
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c35
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pmcraid.c92
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c9
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c24
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c8
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c6
-rw-r--r--drivers/scsi/scsi.c354
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--drivers/scsi/scsi_error.c47
-rw-r--r--drivers/scsi/scsi_lib.c267
-rw-r--r--drivers/scsi/scsi_priv.h5
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c14
-rw-r--r--drivers/scsi/scsi_transport_sas.c5
-rw-r--r--drivers/scsi/scsi_transport_srp.c21
-rw-r--r--drivers/scsi/sd.c92
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c37
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/snic/snic.h1
-rw-r--r--drivers/scsi/snic/snic_isr.c48
-rw-r--r--drivers/scsi/sr.c11
-rw-r--r--drivers/scsi/st.c28
-rw-r--r--drivers/scsi/storvsc_drv.c160
-rw-r--r--drivers/scsi/sun3_scsi.c85
-rw-r--r--drivers/scsi/sun3_scsi.h102
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c49
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufs.h12
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h28
-rw-r--r--drivers/scsi/ufs/ufshcd.c1578
-rw-r--r--drivers/scsi/ufs/ufshcd.h121
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/scsi/vmw_pvscsi.c104
-rw-r--r--drivers/scsi/vmw_pvscsi.h5
-rw-r--r--drivers/soc/samsung/exynos-pmu.c22
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c1
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-armada-3700.c25
-rw-r--r--drivers/spi/spi-ath79.c23
-rw-r--r--drivers/spi/spi-axi-spi-engine.c3
-rw-r--r--drivers/spi/spi-bcm-qspi.c200
-rw-r--r--drivers/spi/spi-bcm53xx.c18
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw.c9
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-ep93xx.c139
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-fsl-spi.c17
-rw-r--r--drivers/spi/spi-imx.c16
-rw-r--r--drivers/spi/spi-lantiq-ssc.c983
-rw-r--r--drivers/spi/spi-mpc52xx.c12
-rw-r--r--drivers/spi/spi-mt65xx.c37
-rw-r--r--drivers/spi/spi-ppc4xx.c7
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c32
-rw-r--r--drivers/spi/spi-pxa2xx.c37
-rw-r--r--drivers/spi/spi-rockchip.c5
-rw-r--r--drivers/spi/spi-rspi.c9
-rw-r--r--drivers/spi/spi-s3c64xx.c59
-rw-r--r--drivers/spi/spi-sh-msiof.c8
-rw-r--r--drivers/spi/spi-ti-qspi.c18
-rw-r--r--drivers/spi/spi-topcliff-pch.c31
-rw-r--r--drivers/spi/spi.c82
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/greybus/gpio.c15
-rw-r--r--drivers/staging/greybus/timesync_platform.c6
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c25
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h2
-rw-r--r--drivers/staging/media/lirc/Kconfig22
-rw-r--r--drivers/staging/media/lirc/Makefile3
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c401
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c979
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c741
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.h26
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c296
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c34
-rw-r--r--drivers/staging/media/s5p-cec/Kconfig2
-rw-r--r--drivers/staging/media/s5p-cec/exynos_hdmi_cec.h1
-rw-r--r--drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c5
-rw-r--r--drivers/staging/netlogic/xlr_net.c11
-rw-r--r--drivers/staging/octeon/ethernet-rx.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1
-rw-r--r--drivers/target/target_core_device.c10
-rw-r--r--drivers/target/target_core_pr.c10
-rw-r--r--drivers/target/target_core_pscsi.c14
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_transport.c86
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/thermal/cpu_cooling.c11
-rw-r--r--drivers/thermal/devfreq_cooling.c15
-rw-r--r--drivers/thermal/rockchip_thermal.c153
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/tty_ldsem.c18
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c18
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/params.c10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c27
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/mon/mon_main.c2
-rw-r--r--drivers/usb/musb/musb_core.c26
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/serial/cp210x.c13
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c37
-rw-r--r--drivers/vfio/vfio_iommu_type1.c40
-rw-r--r--drivers/vhost/Kconfig2
-rw-r--r--drivers/vhost/net.c26
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vsock.c13
-rw-r--r--drivers/video/fbdev/core/fbcmap.c26
-rw-r--r--drivers/virtio/virtio.c42
-rw-r--r--drivers/virtio/virtio_mmio.c20
-rw-r--r--drivers/xen/cpu_hotplug.c7
-rw-r--r--drivers/xen/events/events_base.c1
-rw-r--r--drivers/xen/grant-table.c8
-rw-r--r--drivers/xen/manage.c8
-rw-r--r--drivers/xen/platform-pci.c71
-rw-r--r--drivers/xen/privcmd.c226
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xenbus/xenbus.h135
-rw-r--r--drivers/xen/xenbus/xenbus_client.c45
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c309
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h51
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c213
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c14
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h88
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c11
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c17
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c526
-rw-r--r--drivers/xen/xenfs/super.c2
-rw-r--r--drivers/xen/xenfs/xenstored.c2
3218 files changed, 129524 insertions, 60472 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 9ed087853dee..a391bbc48105 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -55,7 +55,7 @@ acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
-acpi-$(CONFIG_ACPI_GENERIC_GSI) += gsi.o
+acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
# These are (potentially) separate modules
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index b3842ffc19ba..a15270a806fc 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -212,6 +212,7 @@ static bool __init extlog_get_l1addr(void)
}
static struct notifier_block extlog_mce_dec = {
.notifier_call = extlog_print,
+ .priority = MCE_PRIO_EXTLOG,
};
static int __init extlog_init(void)
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 3de3b6b8f0f1..4467a8089ab8 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -165,7 +165,7 @@ static int acpi_processor_errata(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
int __weak acpi_map_cpu(acpi_handle handle,
- phys_cpuid_t physid, int *pcpu)
+ phys_cpuid_t physid, u32 acpi_id, int *pcpu)
{
return -ENODEV;
}
@@ -203,7 +203,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
+ ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
if (ret)
goto out;
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 0bd6307e1f3c..b65f2731e9e2 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,26 +51,26 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2016 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2017 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_WIDTH "-64"
+#define ACPI_WIDTH " (64-bit version)"
#elif ACPI_MACHINE_WIDTH == 32
-#define ACPI_WIDTH "-32"
+#define ACPI_WIDTH " (32-bit version)"
#else
#error unknown ACPI_MACHINE_WIDTH
-#define ACPI_WIDTH "-??"
+#define ACPI_WIDTH " (unknown bit width, not 32 or 64)"
#endif
/* Macros for signons and file headers */
#define ACPI_COMMON_SIGNON(utility_name) \
- "\n%s\n%s version %8.8X%s\n%s\n\n", \
+ "\n%s\n%s version %8.8X\n%s\n\n", \
ACPICA_NAME, \
- utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, \
+ utility_name, ((u32) ACPI_CA_VERSION), \
ACPICA_COPYRIGHT
#define ACPI_COMMON_HEADER(utility_name, prefix) \
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 19d6ec815d12..49bf47ca5477 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 94737f8845ac..71743e5252f5 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index dcd48bfedb4d..0d95c85cce06 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 8a0049d5cdf3..a2adfd42f85c 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index edbb42e251a6..1d955fe216c4 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 27addcf50c37..fd4f3cacb356 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 7ead235555cf..29a863c85318 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 792660054992..8fd495e8fdce 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -770,7 +770,7 @@ union acpi_parse_value {
char *operator_symbol;/* Used for C-style operator name strings */\
char aml_op_name[16]) /* Op name (debug only) */
-/* Flags for disasm_flags field above */
+/* Internal opcodes for disasm_opcode field above */
#define ACPI_DASM_BUFFER 0x00 /* Buffer is a simple data buffer */
#define ACPI_DASM_RESOURCE 0x01 /* Buffer is a Resource Descriptor */
@@ -783,7 +783,10 @@ union acpi_parse_value {
#define ACPI_DASM_LNOT_PREFIX 0x08 /* Start of a Lnot_equal (etc.) pair of opcodes */
#define ACPI_DASM_LNOT_SUFFIX 0x09 /* End of a Lnot_equal (etc.) pair of opcodes */
#define ACPI_DASM_HID_STRING 0x0A /* String is a _HID or _CID */
-#define ACPI_DASM_IGNORE 0x0B /* Not used at this time */
+#define ACPI_DASM_IGNORE_SINGLE 0x0B /* Ignore the opcode but not it's children */
+#define ACPI_DASM_SWITCH_PREDICATE 0x0C /* Object is a predicate for a Switch or Case block */
+#define ACPI_DASM_CASE 0x0D /* If/Else is a Case in a Switch/Case block */
+#define ACPI_DASM_DEFAULT 0x0E /* Else is a Default in a Switch/Case block */
/*
* Generic operation (for example: If, While, Store)
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index a3b95431b7c5..c3337514e0ed 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
/*
* Extract data using a pointer. Any more than a byte and we
- * get into potential aligment issues -- see the STORE macros below.
+ * get into potential alignment issues -- see the STORE macros below.
* Use with care.
*/
#define ACPI_CAST8(ptr) ACPI_CAST_PTR (u8, (ptr))
@@ -63,7 +63,7 @@
#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
/*
- * printf() format helper. This macros is a workaround for the difficulties
+ * printf() format helper. This macro is a workaround for the difficulties
* with emitting 64-bit integers and 64-bit pointers with the same code
* for both 32-bit and 64-bit hosts.
*/
@@ -260,8 +260,70 @@
#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1))
+/* Generic bit manipulation */
+
+#ifndef ACPI_USE_NATIVE_BIT_FINDER
+
+#define __ACPI_FIND_LAST_BIT_2(a, r) ((((u8) (a)) & 0x02) ? (r)+1 : (r))
+#define __ACPI_FIND_LAST_BIT_4(a, r) ((((u8) (a)) & 0x0C) ? \
+ __ACPI_FIND_LAST_BIT_2 ((a)>>2, (r)+2) : \
+ __ACPI_FIND_LAST_BIT_2 ((a), (r)))
+#define __ACPI_FIND_LAST_BIT_8(a, r) ((((u8) (a)) & 0xF0) ? \
+ __ACPI_FIND_LAST_BIT_4 ((a)>>4, (r)+4) : \
+ __ACPI_FIND_LAST_BIT_4 ((a), (r)))
+#define __ACPI_FIND_LAST_BIT_16(a, r) ((((u16) (a)) & 0xFF00) ? \
+ __ACPI_FIND_LAST_BIT_8 ((a)>>8, (r)+8) : \
+ __ACPI_FIND_LAST_BIT_8 ((a), (r)))
+#define __ACPI_FIND_LAST_BIT_32(a, r) ((((u32) (a)) & 0xFFFF0000) ? \
+ __ACPI_FIND_LAST_BIT_16 ((a)>>16, (r)+16) : \
+ __ACPI_FIND_LAST_BIT_16 ((a), (r)))
+#define __ACPI_FIND_LAST_BIT_64(a, r) ((((u64) (a)) & 0xFFFFFFFF00000000) ? \
+ __ACPI_FIND_LAST_BIT_32 ((a)>>32, (r)+32) : \
+ __ACPI_FIND_LAST_BIT_32 ((a), (r)))
+
+#define ACPI_FIND_LAST_BIT_8(a) ((a) ? __ACPI_FIND_LAST_BIT_8 (a, 1) : 0)
+#define ACPI_FIND_LAST_BIT_16(a) ((a) ? __ACPI_FIND_LAST_BIT_16 (a, 1) : 0)
+#define ACPI_FIND_LAST_BIT_32(a) ((a) ? __ACPI_FIND_LAST_BIT_32 (a, 1) : 0)
+#define ACPI_FIND_LAST_BIT_64(a) ((a) ? __ACPI_FIND_LAST_BIT_64 (a, 1) : 0)
+
+#define __ACPI_FIND_FIRST_BIT_2(a, r) ((((u8) (a)) & 0x01) ? (r) : (r)+1)
+#define __ACPI_FIND_FIRST_BIT_4(a, r) ((((u8) (a)) & 0x03) ? \
+ __ACPI_FIND_FIRST_BIT_2 ((a), (r)) : \
+ __ACPI_FIND_FIRST_BIT_2 ((a)>>2, (r)+2))
+#define __ACPI_FIND_FIRST_BIT_8(a, r) ((((u8) (a)) & 0x0F) ? \
+ __ACPI_FIND_FIRST_BIT_4 ((a), (r)) : \
+ __ACPI_FIND_FIRST_BIT_4 ((a)>>4, (r)+4))
+#define __ACPI_FIND_FIRST_BIT_16(a, r) ((((u16) (a)) & 0x00FF) ? \
+ __ACPI_FIND_FIRST_BIT_8 ((a), (r)) : \
+ __ACPI_FIND_FIRST_BIT_8 ((a)>>8, (r)+8))
+#define __ACPI_FIND_FIRST_BIT_32(a, r) ((((u32) (a)) & 0x0000FFFF) ? \
+ __ACPI_FIND_FIRST_BIT_16 ((a), (r)) : \
+ __ACPI_FIND_FIRST_BIT_16 ((a)>>16, (r)+16))
+#define __ACPI_FIND_FIRST_BIT_64(a, r) ((((u64) (a)) & 0x00000000FFFFFFFF) ? \
+ __ACPI_FIND_FIRST_BIT_32 ((a), (r)) : \
+ __ACPI_FIND_FIRST_BIT_32 ((a)>>32, (r)+32))
+
+#define ACPI_FIND_FIRST_BIT_8(a) ((a) ? __ACPI_FIND_FIRST_BIT_8 (a, 1) : 0)
+#define ACPI_FIND_FIRST_BIT_16(a) ((a) ? __ACPI_FIND_FIRST_BIT_16 (a, 1) : 0)
+#define ACPI_FIND_FIRST_BIT_32(a) ((a) ? __ACPI_FIND_FIRST_BIT_32 (a, 1) : 0)
+#define ACPI_FIND_FIRST_BIT_64(a) ((a) ? __ACPI_FIND_FIRST_BIT_64 (a, 1) : 0)
+
+#endif /* ACPI_USE_NATIVE_BIT_FINDER */
+
/* Generic (power-of-two) rounding */
+#define ACPI_ROUND_UP_POWER_OF_TWO_8(a) ((u8) \
+ (((u16) 1) << ACPI_FIND_LAST_BIT_8 ((a) - 1)))
+#define ACPI_ROUND_DOWN_POWER_OF_TWO_8(a) ((u8) \
+ (((u16) 1) << (ACPI_FIND_LAST_BIT_8 ((a)) - 1)))
+#define ACPI_ROUND_UP_POWER_OF_TWO_16(a) ((u16) \
+ (((u32) 1) << ACPI_FIND_LAST_BIT_16 ((a) - 1)))
+#define ACPI_ROUND_DOWN_POWER_OF_TWO_16(a) ((u16) \
+ (((u32) 1) << (ACPI_FIND_LAST_BIT_16 ((a)) - 1)))
+#define ACPI_ROUND_UP_POWER_OF_TWO_32(a) ((u32) \
+ (((u64) 1) << ACPI_FIND_LAST_BIT_32 ((a) - 1)))
+#define ACPI_ROUND_DOWN_POWER_OF_TWO_32(a) ((u32) \
+ (((u64) 1) << (ACPI_FIND_LAST_BIT_32 ((a)) - 1)))
#define ACPI_IS_ALIGNED(a, s) (((a) & ((s) - 1)) == 0)
#define ACPI_IS_POWER_OF_TWO(a) ACPI_IS_ALIGNED(a, a)
@@ -270,8 +332,8 @@
* Bit positions start at zero.
* MASK_BITS_ABOVE creates a mask starting AT the position and above
* MASK_BITS_BELOW creates a mask starting one bit BELOW the position
- * MASK_BITS_ABOVE/BELOW accpets a bit offset to create a mask
- * MASK_BITS_ABOVE/BELOW_32/64 accpets a bit width to create a mask
+ * MASK_BITS_ABOVE/BELOW accepts a bit offset to create a mask
+ * MASK_BITS_ABOVE/BELOW_32/64 accepts a bit width to create a mask
* Note: The ACPI_INTEGER_BIT_SIZE check is used to bypass compiler
* differences with the shift operator
*/
@@ -389,7 +451,7 @@
*/
#ifndef ACPI_NO_ERROR_MESSAGES
/*
- * Error reporting. Callers module and line number are inserted by AE_INFO,
+ * Error reporting. The callers module and line number are inserted by AE_INFO,
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 7affdcdfcc81..54a0c51b3e37 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 094b042678f7..27c3f982d810 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index ca4bda1a60be..e758f098ff4b 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,7 +92,7 @@
#define ARGP_BYTELIST_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
-#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_NAME_OR_REF,ARGP_TARGET)
+#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SIMPLENAME, ARGP_TARGET)
#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_CONTINUE_OP ARG_NONE
#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
@@ -105,7 +105,7 @@
#define ARGP_DATA_REGION_OP ARGP_LIST4 (ARGP_NAME, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_DEBUG_OP ARG_NONE
#define ARGP_DECREMENT_OP ARGP_LIST1 (ARGP_SUPERNAME)
-#define ARGP_DEREF_OF_OP ARGP_LIST1 (ARGP_TERMARG)
+#define ARGP_DEREF_OF_OP ARGP_LIST1 (ARGP_SUPERNAME)
#define ARGP_DEVICE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_OBJLIST)
#define ARGP_DIVIDE_OP ARGP_LIST4 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET, ARGP_TARGET)
#define ARGP_DWORD_OP ARGP_LIST1 (ARGP_DWORDDATA)
@@ -152,14 +152,14 @@
#define ARGP_NAMEPATH_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_NOOP_OP ARG_NONE
#define ARGP_NOTIFY_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_TERMARG)
-#define ARGP_OBJECT_TYPE_OP ARGP_LIST1 (ARGP_NAME_OR_REF)
+#define ARGP_OBJECT_TYPE_OP ARGP_LIST1 (ARGP_SIMPLENAME)
#define ARGP_ONE_OP ARG_NONE
#define ARGP_ONES_OP ARG_NONE
#define ARGP_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_BYTEDATA, ARGP_DATAOBJLIST)
#define ARGP_POWER_RES_OP ARGP_LIST5 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_WORDDATA, ARGP_OBJLIST)
#define ARGP_PROCESSOR_OP ARGP_LIST6 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_DWORDDATA, ARGP_BYTEDATA, ARGP_OBJLIST)
#define ARGP_QWORD_OP ARGP_LIST1 (ARGP_QWORDDATA)
-#define ARGP_REF_OF_OP ARGP_LIST1 (ARGP_NAME_OR_REF)
+#define ARGP_REF_OF_OP ARGP_LIST1 (ARGP_SIMPLENAME)
#define ARGP_REGION_OP ARGP_LIST4 (ARGP_NAME, ARGP_BYTEDATA, ARGP_TERMARG, ARGP_TERMARG)
#define ARGP_RELEASE_OP ARGP_LIST1 (ARGP_SUPERNAME)
#define ARGP_RESERVEDFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
@@ -249,7 +249,7 @@
#define ARGI_FIELD_OP ARGI_INVALID_OPCODE
#define ARGI_FIND_SET_LEFT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_FIND_SET_RIGHT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
-#define ARGI_FROM_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_FIXED_TARGET)
+#define ARGI_FROM_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_IF_OP ARGI_INVALID_OPCODE
#define ARGI_INCREMENT_OP ARGI_LIST1 (ARGI_TARGETREF)
#define ARGI_INDEX_FIELD_OP ARGI_INVALID_OPCODE
@@ -313,12 +313,12 @@
#define ARGI_SUBTRACT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_THERMAL_ZONE_OP ARGI_INVALID_OPCODE
#define ARGI_TIMER_OP ARG_NONE
-#define ARGI_TO_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_FIXED_TARGET)
-#define ARGI_TO_BUFFER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
-#define ARGI_TO_DEC_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
-#define ARGI_TO_HEX_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
-#define ARGI_TO_INTEGER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
-#define ARGI_TO_STRING_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_FIXED_TARGET)
+#define ARGI_TO_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_TO_BUFFER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_TARGETREF)
+#define ARGI_TO_DEC_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_TARGETREF)
+#define ARGI_TO_HEX_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_TARGETREF)
+#define ARGI_TO_INTEGER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_TARGETREF)
+#define ARGI_TO_STRING_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_UNLOAD_OP ARGI_LIST1 (ARGI_DDBHANDLE)
#define ARGI_VAR_PACKAGE_OP ARGI_LIST1 (ARGI_INTEGER)
#define ARGI_WAIT_OP ARGI_LIST2 (ARGI_EVENT, ARGI_INTEGER)
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 939d41113970..c23c47328060 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 888440b2cf2e..dcfc05d40e36 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 63da1e37caba..b4d22f6e48e2 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 6235642e31d3..62134bdbeda6 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 94be8a8e6c08..c8da453bd960 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 845afb180a7e..6f28cfae2212 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 6bd8d4bcff65..b536fd471292 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -277,9 +277,23 @@
#define ARGI_DEVICE_REF 0x0D
#define ARGI_REFERENCE 0x0E
#define ARGI_TARGETREF 0x0F /* Target, subject to implicit conversion */
-#define ARGI_FIXED_TARGET 0x10 /* Target, no implicit conversion */
-#define ARGI_SIMPLE_TARGET 0x11 /* Name, Local, Arg -- no implicit conversion */
-#define ARGI_STORE_TARGET 0x12 /* Target for store is TARGETREF + package objects */
+#define ARGI_SIMPLE_TARGET 0x10 /* Name, Local, Arg -- no implicit conversion */
+#define ARGI_STORE_TARGET 0x11 /* Target for store is TARGETREF + package objects */
+/*
+ * #define ARGI_FIXED_TARGET 0x10 Target, no implicit conversion
+ *
+ * Removed 10/2016. ARGI_FIXED_TARGET was used for these operators:
+ * from_BCD
+ * to_BCD
+ * to_decimal_string
+ * to_hex_string
+ * to_integer
+ * to_buffer
+ * The purpose of this type was to disable "implicit result conversion",
+ * but this was incorrect per the ACPI spec and other ACPI implementations.
+ * These operators now have the target operand defined as a normal
+ * ARGI_TARGETREF.
+ */
/* Multiple/complex types */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index dee6c7ea4773..653a3d1ef5d5 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 62bd446535f5..5984b90eb590 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 147ce8894f76..251f9477a984 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 502bb587f112..46bf270ac525 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index fe3da7c31bb7..b611cd92b5f5 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index 6f05b8c271a5..4d81ea291d93 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index 46bd65d38df9..7d08974c64c2 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 068214f9cc9d..2626d79db064 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 314b94cf086a..15c8237b8a80 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 8667f14d535e..8c207c772517 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index 08eaaf350b24..f2252b1ac0b3 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c
index a414e1fa6f9d..99fb0160b8fb 100644
--- a/drivers/acpi/acpica/dbstats.c
+++ b/drivers/acpi/acpica/dbstats.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbtest.c b/drivers/acpi/acpica/dbtest.c
index 74aa38156cdc..c6bee6143266 100644
--- a/drivers/acpi/acpica/dbtest.c
+++ b/drivers/acpi/acpica/dbtest.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbutils.c b/drivers/acpi/acpica/dbutils.c
index ae80106d1000..bfa972b64171 100644
--- a/drivers/acpi/acpica/dbutils.c
+++ b/drivers/acpi/acpica/dbutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index 124db237775d..205b8e0eded5 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -430,7 +430,7 @@ acpi_status acpi_initialize_debugger(void)
/* These were created with one unit, grab it */
- status = acpi_os_initialize_command_signals();
+ status = acpi_os_initialize_debugger();
if (ACPI_FAILURE(status)) {
acpi_os_printf("Could not get debugger mutex\n");
return_ACPI_STATUS(status);
@@ -482,7 +482,7 @@ void acpi_terminate_debugger(void)
acpi_os_sleep(100);
}
- acpi_os_terminate_command_signals();
+ acpi_os_terminate_debugger();
}
if (acpi_gbl_db_buffer) {
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index ad0413beeeae..287b3fd73cfc 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 4ddcbf100234..d31b49feaa79 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 56c3aadb4cba..4d885eb8eda9 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 6a4b603d0e83..c5dccc54307d 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 5de3f10cab03..b1842dd4edf7 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 2b3210f42a46..31c9c7aec3d5 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 45cbebaa32c0..adcc72cd53a7 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index a91de2b4603c..8deaa16493a0 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 77fd7c84ec39..148523205d41 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 7d8ef52fb88d..049fbab4e5a6 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 438597cf6cc5..78f8e6a4f72f 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index fd34040d4f44..cafb3ab567ab 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 651f35a66cc2..44d4553dfbdd 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9f32e08a07d9..3e081983d2ee 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index e3338698e56b..da111a1f5bfb 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 80fc0b9b11e5..d3b6b314fa50 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 9f015782cdd3..0ce33b0f430c 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index bdb10bee13ce..229382035550 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index d54014cab01d..9c941947a063 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 16ce4835e7d0..8649c6242478 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 3f150d567e64..c8adb400330a 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 24768ca03f19..2db61ef1b4a3 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index f51d43adb7d1..4f6bb3f016ab 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 4c6f79514040..28b447ff92df 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index a9092251ce80..93ec528bcd9a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 3b7757c9c916..8ce73b962006 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index e4e9260cdc57..dd1b9dd64cef 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 9179e9abe3db..82e8971f23a4 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index d7a3b2775505..57718a3e029a 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index d2743067126a..beba9d56a0d8 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 5429c2a6bc41..76bfb7dcae2f 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index c32c7829878a..61813bd43f9e 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 588ad1409dbe..f71028e334ee 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -592,7 +592,6 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
*/
switch (GET_CURRENT_ARG_TYPE(walk_state->op_info->runtime_args)) {
case ARGI_SIMPLE_TARGET:
- case ARGI_FIXED_TARGET:
case ARGI_INTEGER_REF: /* Handles Increment, Decrement cases */
switch (destination_type) {
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 613ba6eb08bb..d43d7da4c734 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 37a509d016da..ec614f5a3bcb 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index fce6b2e10209..970dc6c53994 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index d7d3ee36338b..5fda981f6498 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index ee76d299b3d0..a656608dca84 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 37c88b424a02..1a6f59079ea5 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 26faa91e930c..ecd95b3f35f1 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 3d6af93fe561..ee7b62a86661 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 007300433cde..af73fcde7e5c 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 79ef3b6811a9..44ecba50c0da 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 69e4e269ad2f..ce857addc8db 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 786d53b0bb37..31e4df97cbe1 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index aed8d3459220..8de060664204 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 31b381cae94d..7bcc9d809b7e 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index a183cb740d24..91c1de046442 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index e1d3878be2c6..7fecefc2e1b4 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index f29eba1dc5e9..c4852429e2ff 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -305,7 +305,6 @@ acpi_ex_resolve_operands(u16 opcode,
case ARGI_OBJECT_REF:
case ARGI_DEVICE_REF:
case ARGI_TARGETREF: /* Allows implicit conversion rules before store */
- case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */
case ARGI_SIMPLE_TARGET: /* Name, Local, or arg - no implicit conversion */
case ARGI_STORE_TARGET:
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index cd70cbcf6de6..a2f8001aeb86 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 13bbb2b241a3..85db4716a043 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 1dab82746d06..4ba7fcbf23b0 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index ac09c31cc70e..ad3b610057f3 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index c9ca82610d77..ae9df8672d9e 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index a8b857a7e9fb..34d608358eaf 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 3ebbb09030b4..fad249e774b4 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 3f2fb4b31fdc..12626d021a9b 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,6 @@
*/
#include <acpi/acpi.h>
-#include <linux/acpi.h>
#include "accommon.h"
#define _COMPONENT ACPI_HARDWARE
@@ -103,7 +102,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
acpi_status acpi_hw_extended_sleep(u8 sleep_state)
{
acpi_status status;
- u8 sleep_type_value;
+ u8 sleep_control;
u64 sleep_status;
ACPI_FUNCTION_TRACE(hw_extended_sleep);
@@ -125,18 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
acpi_gbl_system_awake_and_running = FALSE;
- /* Flush caches, as per ACPI specification */
-
- ACPI_FLUSH_CPU_CACHE();
-
- status = acpi_os_prepare_extended_sleep(sleep_state,
- acpi_gbl_sleep_type_a,
- acpi_gbl_sleep_type_b);
- if (ACPI_SKIP(status))
- return_ACPI_STATUS(AE_OK);
- if (ACPI_FAILURE(status))
- return_ACPI_STATUS(status);
-
/*
* Set the SLP_TYP and SLP_EN bits.
*
@@ -146,12 +133,22 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Entering sleep state [S%u]\n", sleep_state));
- sleep_type_value =
- ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
- ACPI_X_SLEEP_TYPE_MASK);
+ sleep_control = ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
+ ACPI_X_SLEEP_TYPE_MASK) | ACPI_X_SLEEP_ENABLE;
+
+ /* Flush caches, as per ACPI specification */
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ status = acpi_os_enter_sleep(sleep_state, sleep_control, 0);
+ if (status == AE_CTRL_TERMINATE) {
+ return_ACPI_STATUS(AE_OK);
+ }
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
- status = acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
- &acpi_gbl_FADT.sleep_control);
+ status = acpi_write((u64)sleep_control, &acpi_gbl_FADT.sleep_control);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 76b0e350f5bb..5eb11b30a79e 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 3dd60c96aa07..283819930be6 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 3b7fb99362b6..de74a4c25085 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,8 @@ ACPI_MODULE_NAME("hwregs")
#if (!ACPI_REDUCED_HARDWARE)
/* Local Prototypes */
static u8
-acpi_hw_get_access_bit_width(struct acpi_generic_address *reg,
+acpi_hw_get_access_bit_width(u64 address,
+ struct acpi_generic_address *reg,
u8 max_bit_width);
static acpi_status
@@ -71,7 +72,8 @@ acpi_hw_write_multiple(u32 value,
*
* FUNCTION: acpi_hw_get_access_bit_width
*
- * PARAMETERS: reg - GAS register structure
+ * PARAMETERS: address - GAS register address
+ * reg - GAS register structure
* max_bit_width - Max bit_width supported (32 or 64)
*
* RETURN: Status
@@ -81,27 +83,59 @@ acpi_hw_write_multiple(u32 value,
******************************************************************************/
static u8
-acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
+acpi_hw_get_access_bit_width(u64 address,
+ struct acpi_generic_address *reg, u8 max_bit_width)
{
- if (!reg->access_width) {
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- max_bit_width = 32;
- }
+ u8 access_bit_width;
- /*
- * Detect old register descriptors where only the bit_width field
- * makes senses.
- */
- if (reg->bit_width < max_bit_width &&
- !reg->bit_offset && reg->bit_width &&
- ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
- ACPI_IS_ALIGNED(reg->bit_width, 8)) {
- return (reg->bit_width);
- }
- return (max_bit_width);
+ /*
+ * GAS format "register", used by FADT:
+ * 1. Detected if bit_offset is 0 and bit_width is 8/16/32/64;
+ * 2. access_size field is ignored and bit_width field is used for
+ * determining the boundary of the IO accesses.
+ * GAS format "region", used by APEI registers:
+ * 1. Detected if bit_offset is not 0 or bit_width is not 8/16/32/64;
+ * 2. access_size field is used for determining the boundary of the
+ * IO accesses;
+ * 3. bit_offset/bit_width fields are used to describe the "region".
+ *
+ * Note: This algorithm assumes that the "Address" fields should always
+ * contain aligned values.
+ */
+ if (!reg->bit_offset && reg->bit_width &&
+ ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
+ ACPI_IS_ALIGNED(reg->bit_width, 8)) {
+ access_bit_width = reg->bit_width;
+ } else if (reg->access_width) {
+ access_bit_width = (1 << (reg->access_width + 2));
} else {
- return (1 << (reg->access_width + 2));
+ access_bit_width =
+ ACPI_ROUND_UP_POWER_OF_TWO_8(reg->bit_offset +
+ reg->bit_width);
+ if (access_bit_width <= 8) {
+ access_bit_width = 8;
+ } else {
+ while (!ACPI_IS_ALIGNED(address, access_bit_width >> 3)) {
+ access_bit_width >>= 1;
+ }
+ }
}
+
+ /* Maximum IO port access bit width is 32 */
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ max_bit_width = 32;
+ }
+
+ /*
+ * Return access width according to the requested maximum access bit width,
+ * as the caller should know the format of the register and may enforce
+ * a 32-bit accesses.
+ */
+ if (access_bit_width < max_bit_width) {
+ return (access_bit_width);
+ }
+ return (max_bit_width);
}
/******************************************************************************
@@ -163,7 +197,8 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
/* Validate the bit_width, convert access_width into number of bits */
- access_width = acpi_hw_get_access_bit_width(reg, max_bit_width);
+ access_width =
+ acpi_hw_get_access_bit_width(*address, reg, max_bit_width);
bit_width =
ACPI_ROUND_UP(reg->bit_offset + reg->bit_width, access_width);
if (max_bit_width < bit_width) {
@@ -219,7 +254,7 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
* into number of bits based
*/
*value = 0;
- access_width = acpi_hw_get_access_bit_width(reg, 32);
+ access_width = acpi_hw_get_access_bit_width(address, reg, 32);
bit_width = reg->bit_offset + reg->bit_width;
bit_offset = reg->bit_offset;
@@ -252,20 +287,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
&value32,
access_width);
}
-
- /*
- * Use offset style bit masks because:
- * bit_offset < access_width/bit_width < access_width, and
- * access_width is ensured to be less than 32-bits by
- * acpi_hw_validate_register().
- */
- if (bit_offset) {
- value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
- bit_offset = 0;
- }
- if (bit_width < access_width) {
- value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
- }
}
/*
@@ -306,6 +327,12 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
{
u64 address;
+ u8 access_width;
+ u32 bit_width;
+ u8 bit_offset;
+ u64 value64;
+ u32 value32;
+ u8 index;
acpi_status status;
ACPI_FUNCTION_NAME(hw_write);
@@ -317,23 +344,61 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
return (status);
}
+ /* Convert access_width into number of bits based */
+
+ access_width = acpi_hw_get_access_bit_width(address, reg, 32);
+ bit_width = reg->bit_offset + reg->bit_width;
+ bit_offset = reg->bit_offset;
+
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_write_memory((acpi_physical_address)
- address, (u64)value,
- reg->bit_width);
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- status = acpi_hw_write_port((acpi_io_address)
- address, value, reg->bit_width);
+ index = 0;
+ while (bit_width) {
+ /*
+ * Use offset style bit reads because "Index * AccessWidth" is
+ * ensured to be less than 32-bits by acpi_hw_validate_register().
+ */
+ value32 = ACPI_GET_BITS(&value, index * access_width,
+ ACPI_MASK_BITS_ABOVE_32(access_width));
+
+ if (bit_offset >= access_width) {
+ bit_offset -= access_width;
+ } else {
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ value64 = (u64)value32;
+ status =
+ acpi_os_write_memory((acpi_physical_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ value64, access_width);
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ status = acpi_hw_write_port((acpi_io_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ value32,
+ access_width);
+ }
+ }
+
+ /*
+ * Index * access_width is ensured to be less than 32-bits by
+ * acpi_hw_validate_register().
+ */
+ bit_width -=
+ bit_width > access_width ? access_width : bit_width;
+ index++;
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
- value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+ value, access_width, ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d00c9810845b..1fe7387a00e6 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,6 @@
*/
#include <acpi/acpi.h>
-#include <linux/acpi.h>
#include "accommon.h"
#define _COMPONENT ACPI_HARDWARE
@@ -152,12 +151,14 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
ACPI_FLUSH_CPU_CACHE();
- status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
- pm1b_control);
- if (ACPI_SKIP(status))
+ status = acpi_os_enter_sleep(sleep_state, pm1a_control, pm1b_control);
+ if (status == AE_CTRL_TERMINATE) {
return_ACPI_STATUS(AE_OK);
- if (ACPI_FAILURE(status))
+ }
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
+
/* Write #2: Write both SLP_TYP + SLP_EN */
status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 04cc9406c7d8..b3c5d8c754bb 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index ad0a745712a9..531620abed80 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 98c26ff39409..34684ae89981 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f76e0eab32b8..5733b1167e46 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 73f98d3fed25..498bb8f70e6b 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index c2cf73fd3918..8ba5b32c9f71 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index f45bff632692..9095d51f6b37 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 2b85dee6d4c0..e4a7da8a11f0 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 84f35dd27033..4123b5077a7d 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 7060a5668989..5026594763ea 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 5d59cfcef6f4..d22167cbd0ca 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 36643a8cf65a..ce33e7297ea7 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index d1f20143bb11..d2915e186ae1 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 94d5d3339845..3db9ca25a620 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index cfa2bb7162d8..707b2aa501e1 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 4f14e9205bff..2fc33a5203f4 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 6d7844580b2a..3dbbecf22087 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index fbedc6e8ab36..4954cb6c9090 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 9523d41c7ae9..38316266521e 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d5336122486b..352265498e90 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 61036d210274..5de8957f5ef0 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 691814dfed31..661676714f7b 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index ebd731fe8e45..6b6e6f498cff 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index d2a9b4fd739f..8e365c0e766b 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index e525cbe7d83b..106966235805 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 32d372b85243..47f689ec3fcb 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index c29c930ffa08..05b62ad44c3e 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -269,23 +269,27 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
*/
if (ACPI_SUCCESS(status) &&
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
- if (walk_state->opcode == AML_UNLOAD_OP) {
+ if ((GET_CURRENT_ARG_TYPE(walk_state->arg_types) ==
+ ARGP_SUPERNAME)
+ || (GET_CURRENT_ARG_TYPE(walk_state->arg_types) ==
+ ARGP_TARGET)) {
/*
- * acpi_ps_get_next_namestring has increased the AML pointer,
- * so we need to restore the saved AML pointer for method call.
+ * acpi_ps_get_next_namestring has increased the AML pointer past
+ * the method invocation namestring, so we need to restore the
+ * saved AML pointer back to the original method invocation
+ * namestring.
*/
walk_state->parser_state.aml = start;
walk_state->arg_count = 1;
acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
- return_ACPI_STATUS(AE_OK);
}
/* This name is actually a control method invocation */
method_desc = acpi_ns_get_attached_object(node);
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Control Method - %p Desc %p Path=%p\n", node,
- method_desc, path));
+ "Control Method invocation %4.4s - %p Desc %p Path=%p\n",
+ node->name.ascii, node, method_desc, path));
name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, start);
if (!name_op) {
@@ -719,6 +723,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state);
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Expected argument type ARGP: %s (%2.2X)\n",
+ acpi_ut_get_argument_type_name(arg_type), arg_type));
+
switch (arg_type) {
case ARGP_BYTEDATA:
case ARGP_WORDDATA:
@@ -796,11 +804,14 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
}
break;
- case ARGP_TARGET:
- case ARGP_SUPERNAME:
case ARGP_SIMPLENAME:
case ARGP_NAME_OR_REF:
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "**** SimpleName/NameOrRef: %s (%2.2X)\n",
+ acpi_ut_get_argument_type_name(arg_type),
+ arg_type));
+
subop = acpi_ps_peek_opcode(parser_state);
if (subop == 0 ||
acpi_ps_is_leading_char(subop) ||
@@ -816,28 +827,49 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_NO_MEMORY);
}
- /* To support super_name arg of Unload */
-
- if (walk_state->opcode == AML_UNLOAD_OP) {
- status =
- acpi_ps_get_next_namepath(walk_state,
- parser_state, arg,
- ACPI_POSSIBLE_METHOD_CALL);
-
- /*
- * If the super_name argument is a method call, we have
- * already restored the AML pointer, just free this Arg
- */
- if (arg->common.aml_opcode ==
- AML_INT_METHODCALL_OP) {
- acpi_ps_free_op(arg);
- arg = NULL;
- }
- } else {
- status =
- acpi_ps_get_next_namepath(walk_state,
- parser_state, arg,
- ACPI_NOT_METHOD_CALL);
+ status =
+ acpi_ps_get_next_namepath(walk_state, parser_state,
+ arg,
+ ACPI_NOT_METHOD_CALL);
+ } else {
+ /* Single complex argument, nothing returned */
+
+ walk_state->arg_count = 1;
+ }
+ break;
+
+ case ARGP_TARGET:
+ case ARGP_SUPERNAME:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "**** Target/Supername: %s (%2.2X)\n",
+ acpi_ut_get_argument_type_name(arg_type),
+ arg_type));
+
+ subop = acpi_ps_peek_opcode(parser_state);
+ if (subop == 0 ||
+ acpi_ps_is_leading_char(subop) ||
+ ACPI_IS_ROOT_PREFIX(subop) ||
+ ACPI_IS_PARENT_PREFIX(subop)) {
+
+ /* NULL target (zero). Convert to a NULL namepath */
+
+ arg =
+ acpi_ps_alloc_op(AML_INT_NAMEPATH_OP,
+ parser_state->aml);
+ if (!arg) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ status =
+ acpi_ps_get_next_namepath(walk_state, parser_state,
+ arg,
+ ACPI_POSSIBLE_METHOD_CALL);
+
+ if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {
+ acpi_ps_free_op(arg);
+ arg = NULL;
+ walk_state->arg_count = 1;
}
} else {
/* Single complex argument, nothing returned */
@@ -849,6 +881,11 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_DATAOBJ:
case ARGP_TERMARG:
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "**** TermArg/DataObj: %s (%2.2X)\n",
+ acpi_ut_get_argument_type_name(arg_type),
+ arg_type));
+
/* Single complex argument, nothing returned */
walk_state->arg_count = 1;
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 6a9f5059f682..14d689606d2f 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,10 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Get arguments for opcode [%s]\n",
+ op->common.aml_op_name));
+
switch (op->common.aml_opcode) {
case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
case AML_WORD_OP: /* AML_WORDDATA_ARG */
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index db0e90342e82..5c4aff0f4f26 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -348,7 +348,15 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
argument_count) {
op->common.flags |= ACPI_PARSEOP_TARGET;
}
- } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
+ }
+
+ /*
+ * Special case for both Increment() and Decrement(), where
+ * the lone argument is both a source and a target.
+ */
+ else if ((parent_scope->common.aml_opcode == AML_INCREMENT_OP)
+ || (parent_scope->common.aml_opcode ==
+ AML_DECREMENT_OP)) {
op->common.flags |= ACPI_PARSEOP_TARGET;
}
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 8e0c97dca01f..451b672915f1 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 177b05b239b7..89f95b7f26e9 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 1ce26d9f8ff6..a813bbbd5a8b 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 560c3684ef43..22d7f1d6849b 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 0288cdbda88e..9677fff8fd47 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -129,10 +129,10 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
union acpi_parse_object *prev_arg;
const struct acpi_opcode_info *op_info;
- ACPI_FUNCTION_ENTRY();
+ ACPI_FUNCTION_TRACE(ps_append_arg);
if (!op) {
- return;
+ return_VOID;
}
/* Get the info structure for this opcode */
@@ -144,7 +144,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X",
op->common.aml_opcode));
- return;
+ return_VOID;
}
/* Check if this opcode requires argument sub-objects */
@@ -153,7 +153,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
/* Has no linked argument objects */
- return;
+ return_VOID;
}
/* Append the argument to the linked argument list */
@@ -181,6 +181,8 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
op->common.arg_list_length++;
}
+
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 89cb4bffcc7c..2fa38bb76a55 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 04f98c0a7684..22a37c82af19 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index f3c87264bd1b..c88a681586bf 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 492d5b011f33..a131a28bb09d 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index f1e83addd5b4..74e47f829ccb 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 809b61c114fe..f72ff0b54a63 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 5ffdb5602d8d..f4cdf8d832dc 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 61e8f16c857d..8aacd28293fa 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 8e067cb73973..475da9d6aed5 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 07dfbed10d55..b7a47fbc519b 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index bc8f34590d95..092a733c42b8 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 8c42dd734559..36a6657dd34d 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 88b53ef9105d..273eecb3001b 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 25165ca42051..2ae79613f6b7 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index b82c061f205a..c20e6d07928d 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index fa491c64c040..b2aeca01204a 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 465ed8137167..59a4f9ed06a7 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 82b0b5710979..27c5c27d4818 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
ACPI_FUNCTION_TRACE(tb_install_and_load_table);
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
/* Install the table and load it into the namespace */
status = acpi_tb_install_standard_table(address, flags, TRUE,
override, &i);
if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
+ goto exit;
}
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
status = acpi_tb_load_table(i, acpi_gbl_root_node);
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-unlock_and_exit:
+exit:
*table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 81473a4880ce..51860bfc111e 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index f6b9b4e4298b..fea89c8d305c 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 5fdf251a9f97..4620f3c68c13 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
goto release_and_exit;
}
+ /* Acquire the table lock */
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
if (reload) {
/*
* Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
new_table_desc.signature.integer));
status = AE_BAD_SIGNATURE;
- goto release_and_exit;
+ goto unlock_and_exit;
}
/* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Table is still loaded, this is an error */
status = AE_ALREADY_EXISTS;
- goto release_and_exit;
+ goto unlock_and_exit;
} else {
/*
* Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
* indicate the re-installation.
*/
acpi_tb_uninstall_table(&new_table_desc);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
*table_index = i;
return_ACPI_STATUS(AE_OK);
}
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Invoke table handler if present */
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
if (acpi_gbl_table_handler) {
(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
new_table_desc.pointer,
acpi_gbl_table_handler_context);
}
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+ /* Release the table lock */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
release_and_exit:
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 26d61dbace0a..edfd7b10be19 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 86854e846800..5a968a78652b 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 7684707b254b..010b1c43df92 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 82019c01a0e5..b71ce3b817ea 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 0adb1c78d863..f9f9a7da2cad 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 433d822798b6..26a0633115be 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 13324a27b99b..a3401bd29413 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 706c1f346490..909bdb198651 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index ff2981275b9a..f17eaa009dde 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 3b8d23ef351f..11c7f72f2d56 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 82f971402d85..e9382255d6c6 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 044df9b0356e..bd5ea3101eb7 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index b3d8421cfb80..60868309e326 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -238,7 +238,7 @@ const char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
if (!obj_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n"));
- return_PTR("[NULL Object Descriptor]");
+ return_STR("[NULL Object Descriptor]");
}
/* These descriptor types share a common area */
@@ -251,7 +251,7 @@ const char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
acpi_ut_get_descriptor_name(obj_desc),
obj_desc));
- return_PTR("Invalid object");
+ return_STR("Invalid object");
}
return_STR(acpi_ut_get_type_name(obj_desc->common.type));
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 529d6c38ea7c..c6eb9fae70f9 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -421,8 +421,10 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
- "Obj %p Type %.2X Refs %.2X [Incremented]\n",
- object, object->common.type, new_count));
+ "Obj %p Type %.2X [%s] Refs %.2X [Incremented]\n",
+ object, object->common.type,
+ acpi_ut_get_object_type_name(object),
+ new_count));
break;
case REF_DECREMENT:
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 475932cecf1a..e3368186e1c1 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 7bad13f2e518..3fce7519c690 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 695240338e00..eb6dcab33d2f 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index dd3fd7f97f8e..230a50c82f22 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 36d2fc789088..6600bc257516 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index f7cd2d52643b..a6eb580ee21d 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 1711fdf41709..23e766d1691d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 3cd0978925ef..db2d9910866e 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 2d6530ee7e51..aa0502d1d019 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 389de3bd1ff1..443ffad01209 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 15073375bd00..586354788018 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 2514239282c2..792664982ea3 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 72b9a062bbab..64e6641bfe82 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index f0484b058c44..3175b133c0e4 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 3cd573c5f7f9..c82399f9b456 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index ce18346b6144..350709f23e4c 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 40eba804d49c..7e6e1ae6140f 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 1de3376da66a..c86bae7b1d0f 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -421,8 +421,10 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
- /* The absolute minimum resource template is one end_tag descriptor */
-
+ /*
+ * The absolute minimum resource template is one end_tag descriptor.
+ * However, we will treat a lone end_tag as just a simple buffer.
+ */
if (aml_length < sizeof(struct aml_resource_end_tag)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
@@ -454,9 +456,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
/* Invoke the user function */
if (user_function) {
- status =
- user_function(aml, length, offset, resource_index,
- context);
+ status = user_function(aml, length, offset,
+ resource_index, context);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -480,6 +481,12 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
*context = aml;
}
+ /* Check if buffer is defined to be longer than the resource length */
+
+ if (aml_length > (offset + length)) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
/* Normal exit */
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index f3d4dbd5fac0..64308c304ade 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 288913a0e709..9eacbcb9e4f4 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index b4f341c98a95..f42be01d99fd 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index df31d71ce596..9a07a42cae34 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 81088ff9d67b..5028e06718b1 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index ec503c862961..6b9ba4029f8e 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index d9f15cbcd8a0..a16bd9eac653 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index a5ca0f57cd08..6d5180601cf2 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 850de0155528..c016211c35ae 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2016, Intel Corp.
+ * Copyright (C) 2000 - 2017, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index eebb7e39c49c..ec50c32ea3da 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -711,7 +711,7 @@ static int __init einj_init(void)
rc = einj_check_table(einj_tab);
if (rc) {
- pr_warn(FW_BUG "Invalid EINJ table.n");
+ pr_warn(FW_BUG "Invalid EINJ table.\n");
return -EINVAL;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e0d2e6e6e40c..3752521c62ab 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
if (!iort_fwnode)
return NULL;
- ops = iommu_get_instance(iort_fwnode);
+ ops = iommu_ops_from_fwnode(iort_fwnode);
if (!ops)
return NULL;
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 75f128e766a9..ca28aa572aa9 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -15,40 +15,41 @@
#include <linux/sysfs.h>
#include <linux/efi-bgrt.h>
+static void *bgrt_image;
static struct kobject *bgrt_kobj;
static ssize_t show_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version);
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
}
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static ssize_t show_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status);
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
}
static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
static ssize_t show_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type);
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
}
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static ssize_t show_xoffset(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x);
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
}
static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
static ssize_t show_yoffset(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y);
+ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
}
static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
@@ -84,15 +85,24 @@ static int __init bgrt_init(void)
{
int ret;
- if (!bgrt_image)
+ if (!bgrt_tab.image_address)
return -ENODEV;
+ bgrt_image = memremap(bgrt_tab.image_address, bgrt_image_size,
+ MEMREMAP_WB);
+ if (!bgrt_image) {
+ pr_notice("Ignoring BGRT: failed to map image memory\n");
+ return -ENOMEM;
+ }
+
bin_attr_image.private = bgrt_image;
bin_attr_image.size = bgrt_image_size;
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
- if (!bgrt_kobj)
- return -EINVAL;
+ if (!bgrt_kobj) {
+ ret = -EINVAL;
+ goto out_memmap;
+ }
ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group);
if (ret)
@@ -102,6 +112,8 @@ static int __init bgrt_init(void)
out_kobject:
kobject_put(bgrt_kobj);
+out_memmap:
+ memunmap(bgrt_image);
return ret;
}
device_initcall(bgrt_init);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 95855cb9d6fb..80cb5eb75b63 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -677,6 +677,48 @@ static bool acpi_of_match_device(struct acpi_device *adev,
return false;
}
+static bool acpi_of_modalias(struct acpi_device *adev,
+ char *modalias, size_t len)
+{
+ const union acpi_object *of_compatible;
+ const union acpi_object *obj;
+ const char *str, *chr;
+
+ of_compatible = adev->data.of_compatible;
+ if (!of_compatible)
+ return false;
+
+ if (of_compatible->type == ACPI_TYPE_PACKAGE)
+ obj = of_compatible->package.elements;
+ else /* Must be ACPI_TYPE_STRING. */
+ obj = of_compatible;
+
+ str = obj->string.pointer;
+ chr = strchr(str, ',');
+ strlcpy(modalias, chr ? chr + 1 : str, len);
+
+ return true;
+}
+
+/**
+ * acpi_set_modalias - Set modalias using "compatible" property or supplied ID
+ * @adev: ACPI device object to match
+ * @default_id: ID string to use as default if no compatible string found
+ * @modalias: Pointer to buffer that modalias value will be copied into
+ * @len: Length of modalias buffer
+ *
+ * This is a counterpart of of_modalias_node() for struct acpi_device objects.
+ * If there is a compatible string for @adev, it will be copied to @modalias
+ * with the vendor prefix stripped; otherwise, @default_id will be used.
+ */
+void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
+ char *modalias, size_t len)
+{
+ if (!acpi_of_modalias(adev, modalias, len))
+ strlcpy(modalias, default_id, len);
+}
+EXPORT_SYMBOL_GPL(acpi_set_modalias);
+
static bool __acpi_match_device_cls(const struct acpi_device_id *id,
struct acpi_hardware_id *hwid)
{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e19f530f1083..668137e4a069 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -57,7 +57,6 @@
#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
#define ACPI_BUTTON_LID_INIT_OPEN 0x01
-#define ACPI_BUTTON_LID_INIT_METHOD 0x02
#define _COMPONENT ACPI_BUTTON_COMPONENT
ACPI_MODULE_NAME("button");
@@ -113,7 +112,7 @@ struct acpi_button {
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);
@@ -377,9 +376,6 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
case ACPI_BUTTON_LID_INIT_OPEN:
(void)acpi_lid_notify_state(device, 1);
break;
- case ACPI_BUTTON_LID_INIT_METHOD:
- (void)acpi_lid_update_state(device);
- break;
case ACPI_BUTTON_LID_INIT_IGNORE:
default:
break;
@@ -563,9 +559,6 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
if (!strncmp(val, "open", sizeof("open") - 1)) {
lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
pr_info("Notify initial lid state as open\n");
- } else if (!strncmp(val, "method", sizeof("method") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
- pr_info("Notify initial lid state with _LID return value\n");
} else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
pr_info("Do not notify initial lid state\n");
@@ -579,8 +572,6 @@ static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
switch (lid_init_state) {
case ACPI_BUTTON_LID_INIT_OPEN:
return sprintf(buffer, "open");
- case ACPI_BUTTON_LID_INIT_METHOD:
- return sprintf(buffer, "method");
case ACPI_BUTTON_LID_INIT_IGNORE:
return sprintf(buffer, "ignore");
default:
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 48e19d013170..c24235d8fb52 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -188,7 +188,6 @@ EXPORT_SYMBOL(first_ec);
static bool boot_ec_is_ecdt = false;
static struct workqueue_struct *ec_query_wq;
-static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
@@ -492,26 +491,6 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
ec_log_drv("event blocked");
}
-/*
- * Process _Q events that might have accumulated in the EC.
- * Run with locked ec mutex.
- */
-static void acpi_ec_clear(struct acpi_ec *ec)
-{
- int i, status;
- u8 value = 0;
-
- for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
- status = acpi_ec_query(ec, &value);
- if (status || !value)
- break;
- }
- if (unlikely(i == ACPI_EC_CLEAR_MAX))
- pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
- else
- pr_info("%d stale EC events cleared\n", i);
-}
-
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
unsigned long flags;
@@ -520,10 +499,6 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
spin_unlock_irqrestore(&ec->lock, flags);
-
- /* Drain additional events if hardware requires that */
- if (EC_FLAGS_CLEAR_ON_RESUME)
- acpi_ec_clear(ec);
}
#ifdef CONFIG_PM_SLEEP
@@ -729,12 +704,12 @@ static void start_transaction(struct acpi_ec *ec)
static int ec_guard(struct acpi_ec *ec)
{
- unsigned long guard = usecs_to_jiffies(ec_polling_guard);
+ unsigned long guard = usecs_to_jiffies(ec->polling_guard);
unsigned long timeout = ec->timestamp + guard;
/* Ensure guarding period before polling EC status */
do {
- if (ec_busy_polling) {
+ if (ec->busy_polling) {
/* Perform busy polling */
if (ec_transaction_completed(ec))
return 0;
@@ -998,6 +973,28 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
spin_unlock_irqrestore(&ec->lock, flags);
}
+static void acpi_ec_enter_noirq(struct acpi_ec *ec)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ ec->busy_polling = true;
+ ec->polling_guard = 0;
+ ec_log_drv("interrupt blocked");
+ spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static void acpi_ec_leave_noirq(struct acpi_ec *ec)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ ec->busy_polling = ec_busy_polling;
+ ec->polling_guard = ec_polling_guard;
+ ec_log_drv("interrupt unblocked");
+ spin_unlock_irqrestore(&ec->lock, flags);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -1278,7 +1275,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
- if (ec_busy_polling || bits > 8)
+ if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
for (i = 0; i < bytes; ++i, ++address, ++value)
@@ -1286,7 +1283,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
acpi_ec_read(ec, address, value) :
acpi_ec_write(ec, address, *value);
- if (ec_busy_polling || bits > 8)
+ if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
switch (result) {
@@ -1329,6 +1326,8 @@ static struct acpi_ec *acpi_ec_alloc(void)
spin_lock_init(&ec->lock);
INIT_WORK(&ec->work, acpi_ec_event_handler);
ec->timestamp = jiffies;
+ ec->busy_polling = true;
+ ec->polling_guard = 0;
return ec;
}
@@ -1390,6 +1389,7 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ acpi_ec_enter_noirq(ec);
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
@@ -1429,6 +1429,7 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
/* This is not fatal as we can poll EC events */
if (ACPI_SUCCESS(status)) {
set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1)
acpi_ec_enable_gpe(ec, true);
@@ -1741,31 +1742,6 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
#endif
/*
- * On some hardware it is necessary to clear events accumulated by the EC during
- * sleep. These ECs stop reporting GPEs until they are manually polled, if too
- * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
- *
- * https://bugzilla.kernel.org/show_bug.cgi?id=44161
- *
- * Ideally, the EC should also be instructed NOT to accumulate events during
- * sleep (which Windows seems to do somehow), but the interface to control this
- * behaviour is not known at this time.
- *
- * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
- * however it is very likely that other Samsung models are affected.
- *
- * On systems which don't accumulate _Q events during sleep, this extra check
- * should be harmless.
- */
-static int ec_clear_on_resume(const struct dmi_system_id *id)
-{
- pr_debug("Detected system needing EC poll on resume.\n");
- EC_FLAGS_CLEAR_ON_RESUME = 1;
- ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
- return 0;
-}
-
-/*
* Some ECDTs contain wrong register addresses.
* MSI MS-171F
* https://bugzilla.kernel.org/show_bug.cgi?id=12461
@@ -1782,9 +1758,6 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
- {
- ec_clear_on_resume, "Samsung hardware", {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
@@ -1839,34 +1812,6 @@ error:
}
#ifdef CONFIG_PM_SLEEP
-static void acpi_ec_enter_noirq(struct acpi_ec *ec)
-{
- unsigned long flags;
-
- if (ec == first_ec) {
- spin_lock_irqsave(&ec->lock, flags);
- ec->saved_busy_polling = ec_busy_polling;
- ec->saved_polling_guard = ec_polling_guard;
- ec_busy_polling = true;
- ec_polling_guard = 0;
- ec_log_drv("interrupt blocked");
- spin_unlock_irqrestore(&ec->lock, flags);
- }
-}
-
-static void acpi_ec_leave_noirq(struct acpi_ec *ec)
-{
- unsigned long flags;
-
- if (ec == first_ec) {
- spin_lock_irqsave(&ec->lock, flags);
- ec_busy_polling = ec->saved_busy_polling;
- ec_polling_guard = ec->saved_polling_guard;
- ec_log_drv("interrupt unblocked");
- spin_unlock_irqrestore(&ec->lock, flags);
- }
-}
-
static int acpi_ec_suspend_noirq(struct device *dev)
{
struct acpi_ec *ec =
diff --git a/drivers/acpi/gsi.c b/drivers/acpi/gsi.c
deleted file mode 100644
index ee9e0f27b2bf..000000000000
--- a/drivers/acpi/gsi.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * ACPI GSI IRQ layer
- *
- * Copyright (C) 2015 ARM Ltd.
- * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/acpi.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/of.h>
-
-enum acpi_irq_model_id acpi_irq_model;
-
-static struct fwnode_handle *acpi_gsi_domain_id;
-
-/**
- * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
- * @gsi: GSI IRQ number to map
- * @irq: pointer where linux IRQ number is stored
- *
- * irq location updated with irq value [>0 on success, 0 on failure]
- *
- * Returns: linux IRQ number on success (>0)
- * -EINVAL on failure
- */
-int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-{
- struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
- DOMAIN_BUS_ANY);
-
- *irq = irq_find_mapping(d, gsi);
- /*
- * *irq == 0 means no mapping, that should
- * be reported as a failure
- */
- return (*irq > 0) ? *irq : -EINVAL;
-}
-EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
-
-/**
- * acpi_register_gsi() - Map a GSI to a linux IRQ number
- * @dev: device for which IRQ has to be mapped
- * @gsi: GSI IRQ number
- * @trigger: trigger type of the GSI number to be mapped
- * @polarity: polarity of the GSI to be mapped
- *
- * Returns: a valid linux IRQ number on success
- * -EINVAL on failure
- */
-int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
- int polarity)
-{
- struct irq_fwspec fwspec;
-
- if (WARN_ON(!acpi_gsi_domain_id)) {
- pr_warn("GSI: No registered irqchip, giving up\n");
- return -EINVAL;
- }
-
- fwspec.fwnode = acpi_gsi_domain_id;
- fwspec.param[0] = gsi;
- fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
- fwspec.param_count = 2;
-
- return irq_create_fwspec_mapping(&fwspec);
-}
-EXPORT_SYMBOL_GPL(acpi_register_gsi);
-
-/**
- * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
- * @gsi: GSI IRQ number
- */
-void acpi_unregister_gsi(u32 gsi)
-{
- struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
- DOMAIN_BUS_ANY);
- int irq = irq_find_mapping(d, gsi);
-
- irq_dispose_mapping(irq);
-}
-EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
-
-/**
- * acpi_set_irq_model - Setup the GSI irqdomain information
- * @model: the value assigned to acpi_irq_model
- * @fwnode: the irq_domain identifier for mapping and looking up
- * GSI interrupts
- */
-void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *fwnode)
-{
- acpi_irq_model = model;
- acpi_gsi_domain_id = fwnode;
-}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 0c452265c111..219b90bc0922 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -172,8 +172,8 @@ struct acpi_ec {
struct work_struct work;
unsigned long timestamp;
unsigned long nr_pending_queries;
- bool saved_busy_polling;
- unsigned int saved_polling_guard;
+ bool busy_polling;
+ unsigned int polling_guard;
};
extern struct acpi_ec *first_ec;
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
new file mode 100644
index 000000000000..830299a74b84
--- /dev/null
+++ b/drivers/acpi/irq.c
@@ -0,0 +1,297 @@
+/*
+ * ACPI GSI IRQ layer
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+
+enum acpi_irq_model_id acpi_irq_model;
+
+static struct fwnode_handle *acpi_gsi_domain_id;
+
+/**
+ * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
+ * @gsi: GSI IRQ number to map
+ * @irq: pointer where linux IRQ number is stored
+ *
+ * irq location updated with irq value [>0 on success, 0 on failure]
+ *
+ * Returns: linux IRQ number on success (>0)
+ * -EINVAL on failure
+ */
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+ DOMAIN_BUS_ANY);
+
+ *irq = irq_find_mapping(d, gsi);
+ /*
+ * *irq == 0 means no mapping, that should
+ * be reported as a failure
+ */
+ return (*irq > 0) ? *irq : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+/**
+ * acpi_register_gsi() - Map a GSI to a linux IRQ number
+ * @dev: device for which IRQ has to be mapped
+ * @gsi: GSI IRQ number
+ * @trigger: trigger type of the GSI number to be mapped
+ * @polarity: polarity of the GSI to be mapped
+ *
+ * Returns: a valid linux IRQ number on success
+ * -EINVAL on failure
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+ int polarity)
+{
+ struct irq_fwspec fwspec;
+
+ if (WARN_ON(!acpi_gsi_domain_id)) {
+ pr_warn("GSI: No registered irqchip, giving up\n");
+ return -EINVAL;
+ }
+
+ fwspec.fwnode = acpi_gsi_domain_id;
+ fwspec.param[0] = gsi;
+ fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
+ fwspec.param_count = 2;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
+
+/**
+ * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
+ * @gsi: GSI IRQ number
+ */
+void acpi_unregister_gsi(u32 gsi)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+ DOMAIN_BUS_ANY);
+ int irq = irq_find_mapping(d, gsi);
+
+ irq_dispose_mapping(irq);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+
+/**
+ * acpi_get_irq_source_fwhandle() - Retrieve fwhandle from IRQ resource source.
+ * @source: acpi_resource_source to use for the lookup.
+ *
+ * Description:
+ * Retrieve the fwhandle of the device referenced by the given IRQ resource
+ * source.
+ *
+ * Return:
+ * The referenced device fwhandle or NULL on failure
+ */
+static struct fwnode_handle *
+acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source)
+{
+ struct fwnode_handle *result;
+ struct acpi_device *device;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!source->string_length)
+ return acpi_gsi_domain_id;
+
+ status = acpi_get_handle(NULL, source->string_ptr, &handle);
+ if (WARN_ON(ACPI_FAILURE(status)))
+ return NULL;
+
+ device = acpi_bus_get_acpi_device(handle);
+ if (WARN_ON(!device))
+ return NULL;
+
+ result = &device->fwnode;
+ acpi_bus_put_acpi_device(device);
+ return result;
+}
+
+/*
+ * Context for the resource walk used to lookup IRQ resources.
+ * Contains a return code, the lookup index, and references to the flags
+ * and fwspec where the result is returned.
+ */
+struct acpi_irq_parse_one_ctx {
+ int rc;
+ unsigned int index;
+ unsigned long *res_flags;
+ struct irq_fwspec *fwspec;
+};
+
+/**
+ * acpi_irq_parse_one_match - Handle a matching IRQ resource.
+ * @fwnode: matching fwnode
+ * @hwirq: hardware IRQ number
+ * @triggering: triggering attributes of hwirq
+ * @polarity: polarity attributes of hwirq
+ * @polarity: polarity attributes of hwirq
+ * @shareable: shareable attributes of hwirq
+ * @ctx: acpi_irq_parse_one_ctx updated by this function
+ *
+ * Description:
+ * Handle a matching IRQ resource by populating the given ctx with
+ * the information passed.
+ */
+static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode,
+ u32 hwirq, u8 triggering,
+ u8 polarity, u8 shareable,
+ struct acpi_irq_parse_one_ctx *ctx)
+{
+ if (!fwnode)
+ return;
+ ctx->rc = 0;
+ *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ ctx->fwspec->fwnode = fwnode;
+ ctx->fwspec->param[0] = hwirq;
+ ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity);
+ ctx->fwspec->param_count = 2;
+}
+
+/**
+ * acpi_irq_parse_one_cb - Handle the given resource.
+ * @ares: resource to handle
+ * @context: context for the walk
+ *
+ * Description:
+ * This is called by acpi_walk_resources passing each resource returned by
+ * the _CRS method. We only inspect IRQ resources. Since IRQ resources
+ * might contain multiple interrupts we check if the index is within this
+ * one's interrupt array, otherwise we subtract the current resource IRQ
+ * count from the lookup index to prepare for the next resource.
+ * Once a match is found we call acpi_irq_parse_one_match to populate
+ * the result and end the walk by returning AE_CTRL_TERMINATE.
+ *
+ * Return:
+ * AE_OK if the walk should continue, AE_CTRL_TERMINATE if a matching
+ * IRQ resource was found.
+ */
+static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
+ void *context)
+{
+ struct acpi_irq_parse_one_ctx *ctx = context;
+ struct acpi_resource_irq *irq;
+ struct acpi_resource_extended_irq *eirq;
+ struct fwnode_handle *fwnode;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irq = &ares->data.irq;
+ if (ctx->index >= irq->interrupt_count) {
+ ctx->index -= irq->interrupt_count;
+ return AE_OK;
+ }
+ fwnode = acpi_gsi_domain_id;
+ acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
+ irq->triggering, irq->polarity,
+ irq->sharable, ctx);
+ return AE_CTRL_TERMINATE;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ eirq = &ares->data.extended_irq;
+ if (eirq->producer_consumer == ACPI_PRODUCER)
+ return AE_OK;
+ if (ctx->index >= eirq->interrupt_count) {
+ ctx->index -= eirq->interrupt_count;
+ return AE_OK;
+ }
+ fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source);
+ acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
+ eirq->triggering, eirq->polarity,
+ eirq->sharable, ctx);
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_irq_parse_one - Resolve an interrupt for a device
+ * @handle: the device whose interrupt is to be resolved
+ * @index: index of the interrupt to resolve
+ * @fwspec: structure irq_fwspec filled by this function
+ * @flags: resource flags filled by this function
+ *
+ * Description:
+ * Resolves an interrupt for a device by walking its CRS resources to find
+ * the appropriate ACPI IRQ resource and populating the given struct irq_fwspec
+ * and flags.
+ *
+ * Return:
+ * The result stored in ctx.rc by the callback, or the default -EINVAL value
+ * if an error occurs.
+ */
+static int acpi_irq_parse_one(acpi_handle handle, unsigned int index,
+ struct irq_fwspec *fwspec, unsigned long *flags)
+{
+ struct acpi_irq_parse_one_ctx ctx = { -EINVAL, index, flags, fwspec };
+
+ acpi_walk_resources(handle, METHOD_NAME__CRS, acpi_irq_parse_one_cb, &ctx);
+ return ctx.rc;
+}
+
+/**
+ * acpi_irq_get - Lookup an ACPI IRQ resource and use it to initialize resource.
+ * @handle: ACPI device handle
+ * @index: ACPI IRQ resource index to lookup
+ * @res: Linux IRQ resource to initialize
+ *
+ * Description:
+ * Look for the ACPI IRQ resource with the given index and use it to initialize
+ * the given Linux IRQ resource.
+ *
+ * Return:
+ * 0 on success
+ * -EINVAL if an error occurs
+ * -EPROBE_DEFER if the IRQ lookup/conversion failed
+ */
+int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
+{
+ struct irq_fwspec fwspec;
+ struct irq_domain *domain;
+ unsigned long flags;
+ int rc;
+
+ rc = acpi_irq_parse_one(handle, index, &fwspec, &flags);
+ if (rc)
+ return rc;
+
+ domain = irq_find_matching_fwnode(fwspec.fwnode, DOMAIN_BUS_ANY);
+ if (!domain)
+ return -EPROBE_DEFER;
+
+ rc = irq_create_fwspec_mapping(&fwspec);
+ if (rc <= 0)
+ return -EINVAL;
+
+ res->start = rc;
+ res->end = rc;
+ res->flags = flags;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_irq_get);
+
+/**
+ * acpi_set_irq_model - Setup the GSI irqdomain information
+ * @model: the value assigned to acpi_irq_model
+ * @fwnode: the irq_domain identifier for mapping and looking up
+ * GSI interrupts
+ */
+void __init acpi_set_irq_model(enum acpi_irq_model_id model,
+ struct fwnode_handle *fwnode)
+{
+ acpi_irq_model = model;
+ acpi_gsi_domain_id = fwnode;
+}
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 2f82b8eba360..7361d00818e2 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
struct device *dev = acpi_desc->dev;
struct acpi_nfit_flush_work flush;
+ int rc;
/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
device_lock(dev);
@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
INIT_WORK_ONSTACK(&flush.work, flush_probe);
COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
queue_work(nfit_wq, &flush.work);
- return wait_for_completion_interruptible(&flush.cmp);
+
+ rc = wait_for_completion_interruptible(&flush.cmp);
+ cancel_work_sync(&flush.work);
+ return rc;
}
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e5ce81c38eed..3ba1c3472cf9 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -90,6 +90,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
static struct notifier_block nfit_mce_dec = {
.notifier_call = nfit_handle_mce,
+ .priority = MCE_PRIO_NFIT,
};
void nfit_mce_register(void)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 57fb5f468ac2..db78d353bab1 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1686,7 +1686,7 @@ acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
if (rc < 0)
return AE_ERROR;
else if (rc > 0)
- return AE_CTRL_SKIP;
+ return AE_CTRL_TERMINATE;
return AE_OK;
}
@@ -1697,6 +1697,7 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
__acpi_os_prepare_sleep = func;
}
+#if (ACPI_REDUCED_HARDWARE)
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
u32 val_b)
{
@@ -1707,13 +1708,35 @@ acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
if (rc < 0)
return AE_ERROR;
else if (rc > 0)
- return AE_CTRL_SKIP;
+ return AE_CTRL_TERMINATE;
return AE_OK;
}
+#else
+acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
+ u32 val_b)
+{
+ return AE_OK;
+}
+#endif
void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
u32 val_a, u32 val_b))
{
__acpi_os_prepare_extended_sleep = func;
}
+
+acpi_status acpi_os_enter_sleep(u8 sleep_state,
+ u32 reg_a_value, u32 reg_b_value)
+{
+ acpi_status status;
+
+ if (acpi_gbl_reduced_hardware)
+ status = acpi_os_prepare_extended_sleep(sleep_state,
+ reg_a_value,
+ reg_b_value);
+ else
+ status = acpi_os_prepare_sleep(sleep_state,
+ reg_a_value, reg_b_value);
+ return status;
+}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index f0b4a981b8d3..18b72eec3507 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -75,10 +75,8 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
struct acpi_processor *pr;
unsigned int ppc = 0;
- if (event == CPUFREQ_START && ignore_ppc <= 0) {
+ if (ignore_ppc < 0)
ignore_ppc = 0;
- return 0;
- }
if (ignore_ppc)
return 0;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index cb57962ef7c4..8b11d6d385dc 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -43,6 +43,19 @@ static inline bool
acpi_iospace_resource_valid(struct resource *res) { return true; }
#endif
+#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
+static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq)
+{
+ return ext_irq->resource_source.string_length == 0 &&
+ ext_irq->producer_consumer == ACPI_CONSUMER;
+}
+#else
+static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq)
+{
+ return true;
+}
+#endif
+
static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
{
u64 reslen = end - start + 1;
@@ -470,9 +483,12 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
acpi_dev_irqresource_disabled(res, 0);
return false;
}
- acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
+ if (is_gsi(ext_irq))
+ acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity,
ext_irq->sharable, false);
+ else
+ acpi_dev_irqresource_disabled(res, 0);
break;
default:
res->flags = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9b6cebe227a0..a4327af676fe 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -130,6 +130,12 @@ void __init acpi_nvs_nosave_s3(void)
nvs_nosave_s3 = true;
}
+static int __init init_nvs_save_s3(const struct dmi_system_id *d)
+{
+ nvs_nosave_s3 = false;
+ return 0;
+}
+
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -324,6 +330,19 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189431
+ * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
+ * saving during S3.
+ */
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G50-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
+ },
+ },
{},
};
@@ -674,14 +693,6 @@ static void acpi_sleep_suspend_setup(void)
if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
- /*
- * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
- * the default suspend mode was not selected from the command line.
- */
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
- mem_sleep_default > PM_SUSPEND_MEM)
- mem_sleep_default = PM_SUSPEND_FREEZE;
-
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25c82e4..7f48156cbc0c 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
},
},
- {
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
- /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
- .callback = video_detect_force_native,
- .ident = "HP Pavilion dv6",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
- },
- },
-
{ },
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2c8be74f401d..70b57d2229d6 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -14,7 +14,7 @@ menuconfig ATA
tristate "Serial ATA and Parallel ATA drivers (libata)"
depends on HAS_IOMEM
depends on BLOCK
- depends on !(M32R || M68K || S390) || BROKEN
+ depends on !(M32R || S390) || BROKEN
select SCSI
select GLOB
---help---
@@ -80,6 +80,8 @@ config SATA_PMP
This option adds support for SATA Port Multipliers
(the SATA version of an ethernet hub, or SAS expander).
+if HAS_DMA
+
comment "Controllers with non-SFF native interface"
config SATA_AHCI
@@ -127,6 +129,7 @@ config AHCI_ST
config AHCI_IMX
tristate "Freescale i.MX AHCI SATA support"
depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
+ depends on (HWMON && (THERMAL || !THERMAL_OF)) || !HWMON
help
This option enables support for the Freescale i.MX SoC's
onboard AHCI SATA.
@@ -232,6 +235,8 @@ config SATA_SIL24
If unsure, say N.
+endif # HAS_DMA
+
config ATA_SFF
bool "ATA SFF support (for legacy IDE and PATA)"
default y
@@ -289,6 +294,7 @@ config SATA_SX4
config ATA_BMDMA
bool "ATA BMDMA support"
+ depends on HAS_DMA
default y
help
This option adds support for SFF ATA controllers with BMDMA
@@ -344,6 +350,7 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
+ depends on HAS_DMA
depends on ARCH_HIGHBANK || COMPILE_TEST
help
This option enables support for the Calxeda Highbank SoC's
@@ -353,6 +360,7 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
+ depends on HAS_DMA
depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
select GENERIC_PHY
@@ -895,6 +903,15 @@ config PATA_CMD640_PCI
If unsure, say N.
+config PATA_FALCON
+ tristate "Atari Falcon PATA support"
+ depends on M68K && ATARI
+ help
+ This option enables support for the on-board IDE
+ interface on the Atari Falcon.
+
+ If unsure, say N.
+
config PATA_ISAPNP
tristate "ISA Plug and Play PATA support"
depends on ISAPNP
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index a46e6b784bda..89a0a1915d36 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_AT91) += pata_at91.o
obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_FALCON) += pata_falcon.o
obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 3f3a7db208ae..787567e840bd 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -26,6 +26,9 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/libata.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
#include "ahci.h"
#define DRV_NAME "ahci-imx"
@@ -214,6 +217,180 @@ static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
return timeout ? 0 : -ETIMEDOUT;
}
+enum {
+ /* SATA PHY Register */
+ SATA_PHY_CR_CLOCK_CRCMP_LT_LIMIT = 0x0001,
+ SATA_PHY_CR_CLOCK_DAC_CTL = 0x0008,
+ SATA_PHY_CR_CLOCK_RTUNE_CTL = 0x0009,
+ SATA_PHY_CR_CLOCK_ADC_OUT = 0x000A,
+ SATA_PHY_CR_CLOCK_MPLL_TST = 0x0017,
+};
+
+static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio)
+{
+ u16 adc_out_reg, read_sum;
+ u32 index, read_attempt;
+ const u32 attempt_limit = 100;
+
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
+ imx_phy_reg_write(rtune_ctl_reg, mmio);
+
+ /* two dummy read */
+ index = 0;
+ read_attempt = 0;
+ adc_out_reg = 0;
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_ADC_OUT, mmio);
+ while (index < 2) {
+ imx_phy_reg_read(&adc_out_reg, mmio);
+ /* check if valid */
+ if (adc_out_reg & 0x400)
+ index++;
+
+ read_attempt++;
+ if (read_attempt > attempt_limit) {
+ dev_err(dev, "Read REG more than %d times!\n",
+ attempt_limit);
+ break;
+ }
+ }
+
+ index = 0;
+ read_attempt = 0;
+ read_sum = 0;
+ while (index < 80) {
+ imx_phy_reg_read(&adc_out_reg, mmio);
+ if (adc_out_reg & 0x400) {
+ read_sum = read_sum + (adc_out_reg & 0x3FF);
+ index++;
+ }
+ read_attempt++;
+ if (read_attempt > attempt_limit) {
+ dev_err(dev, "Read REG more than %d times!\n",
+ attempt_limit);
+ break;
+ }
+ }
+
+ /* Use the U32 to make 1000 precision */
+ return (read_sum * 1000) / 80;
+}
+
+/* SATA AHCI temperature monitor */
+static int sata_ahci_read_temperature(void *dev, int *temp)
+{
+ u16 mpll_test_reg, rtune_ctl_reg, dac_ctl_reg, read_sum;
+ u32 str1, str2, str3, str4;
+ int m1, m2, a;
+ struct ahci_host_priv *hpriv = dev_get_drvdata(dev);
+ void __iomem *mmio = hpriv->mmio;
+
+ /* check rd-wr to reg */
+ read_sum = 0;
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_CRCMP_LT_LIMIT, mmio);
+ imx_phy_reg_write(read_sum, mmio);
+ imx_phy_reg_read(&read_sum, mmio);
+ if ((read_sum & 0xffff) != 0)
+ dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
+
+ imx_phy_reg_write(0x5A5A, mmio);
+ imx_phy_reg_read(&read_sum, mmio);
+ if ((read_sum & 0xffff) != 0x5A5A)
+ dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
+
+ imx_phy_reg_write(0x1234, mmio);
+ imx_phy_reg_read(&read_sum, mmio);
+ if ((read_sum & 0xffff) != 0x1234)
+ dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
+
+ /* start temperature test */
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
+ imx_phy_reg_read(&mpll_test_reg, mmio);
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
+ imx_phy_reg_read(&rtune_ctl_reg, mmio);
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
+ imx_phy_reg_read(&dac_ctl_reg, mmio);
+
+ /* mpll_tst.meas_iv ([12:2]) */
+ str1 = (mpll_test_reg >> 2) & 0x7FF;
+ /* rtune_ctl.mode ([1:0]) */
+ str2 = (rtune_ctl_reg) & 0x3;
+ /* dac_ctl.dac_mode ([14:12]) */
+ str3 = (dac_ctl_reg >> 12) & 0x7;
+ /* rtune_ctl.sel_atbp ([4]) */
+ str4 = (rtune_ctl_reg >> 4);
+
+ /* Calculate the m1 */
+ /* mpll_tst.meas_iv */
+ mpll_test_reg = (mpll_test_reg & 0xE03) | (512) << 2;
+ /* rtune_ctl.mode */
+ rtune_ctl_reg = (rtune_ctl_reg & 0xFFC) | (1);
+ /* dac_ctl.dac_mode */
+ dac_ctl_reg = (dac_ctl_reg & 0x8FF) | (4) << 12;
+ /* rtune_ctl.sel_atbp */
+ rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (0) << 4;
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
+ imx_phy_reg_write(mpll_test_reg, mmio);
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
+ imx_phy_reg_write(dac_ctl_reg, mmio);
+ m1 = read_adc_sum(dev, rtune_ctl_reg, mmio);
+
+ /* Calculate the m2 */
+ /* rtune_ctl.sel_atbp */
+ rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (1) << 4;
+ m2 = read_adc_sum(dev, rtune_ctl_reg, mmio);
+
+ /* restore the status */
+ /* mpll_tst.meas_iv */
+ mpll_test_reg = (mpll_test_reg & 0xE03) | (str1) << 2;
+ /* rtune_ctl.mode */
+ rtune_ctl_reg = (rtune_ctl_reg & 0xFFC) | (str2);
+ /* dac_ctl.dac_mode */
+ dac_ctl_reg = (dac_ctl_reg & 0x8FF) | (str3) << 12;
+ /* rtune_ctl.sel_atbp */
+ rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (str4) << 4;
+
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
+ imx_phy_reg_write(mpll_test_reg, mmio);
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
+ imx_phy_reg_write(dac_ctl_reg, mmio);
+ imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
+ imx_phy_reg_write(rtune_ctl_reg, mmio);
+
+ /* Compute temperature */
+ if (!(m2 / 1000))
+ m2 = 1000;
+ a = (m2 - m1) / (m2/1000);
+ *temp = ((-559) * a * a) / 1000 + (1379) * a + (-458000);
+
+ return 0;
+}
+
+static ssize_t sata_ahci_show_temp(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ unsigned int temp = 0;
+ int err;
+
+ err = sata_ahci_read_temperature(dev, &temp);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static const struct thermal_zone_of_device_ops fsl_sata_ahci_of_thermal_ops = {
+ .get_temp = sata_ahci_read_temperature,
+};
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sata_ahci_show_temp, NULL, 0);
+
+static struct attribute *fsl_sata_ahci_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(fsl_sata_ahci);
+
static int imx_sata_enable(struct ahci_host_priv *hpriv)
{
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
@@ -597,6 +774,25 @@ static int imx_ahci_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (imxpriv->type == AHCI_IMX53 &&
+ IS_ENABLED(CONFIG_HWMON)) {
+ /* Add the temperature monitor */
+ struct device *hwmon_dev;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_groups(dev,
+ "sata_ahci",
+ hpriv,
+ fsl_sata_ahci_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ret = PTR_ERR(hwmon_dev);
+ goto disable_clk;
+ }
+ devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
+ &fsl_sata_ahci_of_thermal_ops);
+ dev_info(dev, "%s: sensor 'sata_ahci'\n", dev_name(hwmon_dev));
+ }
+
ret = imx_sata_enable(hpriv);
if (ret)
goto disable_clk;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 9884c8c6e934..85d833289f28 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -46,19 +46,21 @@
#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
-#define LS1046A_SATA_ECC_DIS 0x80000000
+#define ECC_DIS_ARMV8_CH2 0x80000000
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
AHCI_LS2080A,
AHCI_LS1046A,
+ AHCI_LS2088A,
};
struct ahci_qoriq_priv {
struct ccsr_ahci *reg_base;
enum ahci_qoriq_type type;
void __iomem *ecc_addr;
+ bool is_dmacoherent;
};
static const struct of_device_id ahci_qoriq_of_match[] = {
@@ -66,6 +68,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
{ .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
+ { .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
{},
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -157,6 +160,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
switch (qpriv->type) {
case AHCI_LS1021A:
+ if (!qpriv->ecc_addr)
+ return -EINVAL;
writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2);
@@ -164,26 +169,43 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
+ if (qpriv->is_dmacoherent)
+ writel(AHCI_PORT_AXICC_CFG,
+ reg_base + LS1021A_AXICC_ADDR);
break;
case AHCI_LS1043A:
+ if (!qpriv->ecc_addr)
+ return -EINVAL;
+ writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ if (qpriv->is_dmacoherent)
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ if (qpriv->is_dmacoherent)
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS1046A:
- writel(LS1046A_SATA_ECC_DIS, qpriv->ecc_addr);
+ if (!qpriv->ecc_addr)
+ return -EINVAL;
+ writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
- writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ if (qpriv->is_dmacoherent)
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ break;
+
+ case AHCI_LS2088A:
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ if (qpriv->is_dmacoherent)
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
}
@@ -221,6 +243,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
if (IS_ERR(qoriq_priv->ecc_addr))
return PTR_ERR(qoriq_priv->ecc_addr);
}
+ qoriq_priv->is_dmacoherent = of_dma_is_coherent(np);
rc = ahci_platform_enable_resources(hpriv);
if (rc)
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 73b19b277138..c2b5941d9184 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -821,8 +821,10 @@ static int xgene_ahci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
__func__);
version = XGENE_AHCI_V1;
- } else if (info->valid & ACPI_VALID_CID) {
- version = XGENE_AHCI_V2;
+ } else {
+ if (info->valid & ACPI_VALID_CID)
+ version = XGENE_AHCI_V2;
+ kfree(info);
}
}
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9cd0a2d41816..ca75823697dd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
+ } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+ qc->result_tf.command |= ATA_SENSE;
}
/* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
/*
- * Device times out with higher max sects.
+ * These devices time out with higher max sects.
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
- { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
@@ -4814,32 +4816,6 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
}
/**
- * ata_sg_clean - Unmap DMA memory associated with command
- * @qc: Command containing DMA memory to be released
- *
- * Unmap all mapped DMA memory associated with this command.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sg_clean(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg = qc->sg;
- int dir = qc->dma_dir;
-
- WARN_ON_ONCE(sg == NULL);
-
- VPRINTK("unmapping %u sg elements\n", qc->n_elem);
-
- if (qc->n_elem)
- dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
-
- qc->flags &= ~ATA_QCFLAG_DMAMAP;
- qc->sg = NULL;
-}
-
-/**
* atapi_check_dma - Check whether ATAPI DMA can be supported
* @qc: Metadata associated with taskfile to check
*
@@ -4923,6 +4899,34 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
qc->cursg = qc->sg;
}
+#ifdef CONFIG_HAS_DMA
+
+/**
+ * ata_sg_clean - Unmap DMA memory associated with command
+ * @qc: Command containing DMA memory to be released
+ *
+ * Unmap all mapped DMA memory associated with this command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_sg_clean(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg = qc->sg;
+ int dir = qc->dma_dir;
+
+ WARN_ON_ONCE(sg == NULL);
+
+ VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+
+ if (qc->n_elem)
+ dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
+
+ qc->flags &= ~ATA_QCFLAG_DMAMAP;
+ qc->sg = NULL;
+}
+
/**
* ata_sg_setup - DMA-map the scatter-gather table associated with a command.
* @qc: Command with scatter-gather table to be mapped.
@@ -4955,6 +4959,13 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
return 0;
}
+#else /* !CONFIG_HAS_DMA */
+
+static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
+static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
+
+#endif /* !CONFIG_HAS_DMA */
+
/**
* swap_buf_le16 - swap halves of 16-bit words in place
* @buf: Buffer to swap
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 0e1ec37070d1..4e5bf36c5f46 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -549,6 +549,7 @@ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
DPRINTK("EXIT, ret=%d\n", ret);
return ret;
}
+EXPORT_SYMBOL(ata_scsi_timed_out);
static void ata_eh_unload(struct ata_port *ap)
{
@@ -2606,21 +2607,39 @@ static void ata_eh_link_report(struct ata_link *link)
[DMA_TO_DEVICE] = "out",
[DMA_FROM_DEVICE] = "in",
};
- static const char *prot_str[] = {
- [ATA_PROT_UNKNOWN] = "unknown",
- [ATA_PROT_NODATA] = "nodata",
- [ATA_PROT_PIO] = "pio",
- [ATA_PROT_DMA] = "dma",
- [ATA_PROT_NCQ] = "ncq dma",
- [ATA_PROT_NCQ_NODATA] = "ncq nodata",
- [ATAPI_PROT_NODATA] = "nodata",
- [ATAPI_PROT_PIO] = "pio",
- [ATAPI_PROT_DMA] = "dma",
- };
+ const char *prot_str = NULL;
+ switch (qc->tf.protocol) {
+ case ATA_PROT_UNKNOWN:
+ prot_str = "unknown";
+ break;
+ case ATA_PROT_NODATA:
+ prot_str = "nodata";
+ break;
+ case ATA_PROT_PIO:
+ prot_str = "pio";
+ break;
+ case ATA_PROT_DMA:
+ prot_str = "dma";
+ break;
+ case ATA_PROT_NCQ:
+ prot_str = "ncq dma";
+ break;
+ case ATA_PROT_NCQ_NODATA:
+ prot_str = "ncq nodata";
+ break;
+ case ATAPI_PROT_NODATA:
+ prot_str = "nodata";
+ break;
+ case ATAPI_PROT_PIO:
+ prot_str = "pio";
+ break;
+ case ATAPI_PROT_DMA:
+ prot_str = "dma";
+ break;
+ }
snprintf(data_buf, sizeof(data_buf), " %s %u %s",
- prot_str[qc->tf.protocol], qc->nbytes,
- dma_str[qc->dma_dir]);
+ prot_str, qc->nbytes, dma_str[qc->dma_dir]);
}
if (ata_is_atapi(qc->tf.protocol)) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1f863e757ee4..12d3a66600a3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -484,13 +484,6 @@ struct device_attribute *ata_common_sdev_attrs[] = {
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
-static void ata_scsi_invalid_field(struct ata_device *dev,
- struct scsi_cmnd *cmd, u16 field)
-{
- ata_scsi_set_invalid_field(dev, cmd, field, 0xff);
- cmd->scsi_done(cmd);
-}
-
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
@@ -1265,13 +1258,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
*/
static int atapi_drain_needed(struct request *rq)
{
- if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
+ if (likely(!blk_rq_is_passthrough(rq)))
return 0;
if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
return 0;
- return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
+ return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
}
static int ata_scsi_dev_config(struct scsi_device *sdev,
@@ -2057,6 +2050,12 @@ defer:
return SCSI_MLQUEUE_HOST_BUSY;
}
+struct ata_scsi_args {
+ struct ata_device *dev;
+ u16 *id;
+ struct scsi_cmnd *cmd;
+};
+
/**
* ata_scsi_rbuf_get - Map response buffer.
* @cmd: SCSI command containing buffer to be mapped.
@@ -2133,7 +2132,6 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
if (rc == 0)
cmd->result = SAM_STAT_GOOD;
- args->done(cmd);
}
/**
@@ -2455,23 +2453,6 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
}
/**
- * ata_scsiop_noop - Command handler that simply returns success.
- * @args: device IDENTIFY data / SCSI command of interest.
- * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *
- * No operation. Simply returns success to caller, to indicate
- * that the caller should successfully complete this SCSI command.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
-{
- VPRINTK("ENTER\n");
- return 0;
-}
-
-/**
* modecpy - Prepare response for MODE SENSE
* @dest: output buffer
* @src: data being copied
@@ -2873,6 +2854,26 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
DPRINTK("EXIT\n");
}
+/*
+ * ATAPI devices typically report zero for their SCSI version, and sometimes
+ * deviate from the spec WRT response data format. If SCSI version is
+ * reported as zero like normal, then we make the following fixups:
+ * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
+ * modern device.
+ * 2) Ensure response data format / ATAPI information are always correct.
+ */
+static void atapi_fixup_inquiry(struct scsi_cmnd *cmd)
+{
+ u8 buf[4];
+
+ sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
+ if (buf[2] == 0) {
+ buf[2] = 0x5;
+ buf[3] = 0x32;
+ }
+ sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
+}
+
static void atapi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
@@ -2927,30 +2928,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
*/
ata_gen_passthru_sense(qc);
} else {
- u8 *scsicmd = cmd->cmnd;
-
- if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
- unsigned long flags;
- u8 *buf;
-
- buf = ata_scsi_rbuf_get(cmd, true, &flags);
-
- /* ATAPI devices typically report zero for their SCSI version,
- * and sometimes deviate from the spec WRT response data
- * format. If SCSI version is reported as zero like normal,
- * then we make the following fixups: 1) Fake MMC-5 version,
- * to indicate to the Linux scsi midlayer this is a modern
- * device. 2) Ensure response data format / ATAPI information
- * are always correct.
- */
- if (buf[2] == 0) {
- buf[2] = 0x5;
- buf[3] = 0x32;
- }
-
- ata_scsi_rbuf_put(cmd, true, &flags);
- }
-
+ if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
+ atapi_fixup_inquiry(cmd);
cmd->result = SAM_STAT_GOOD;
}
@@ -4352,12 +4331,11 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
args.dev = dev;
args.id = dev->id;
args.cmd = cmd;
- args.done = cmd->scsi_done;
switch(scsicmd[0]) {
case INQUIRY:
if (scsicmd[1] & 2) /* is CmdDt set? */
- ata_scsi_invalid_field(dev, cmd, 1);
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
else switch (scsicmd[2]) {
@@ -4389,7 +4367,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
}
/* Fallthrough */
default:
- ata_scsi_invalid_field(dev, cmd, 2);
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
}
break;
@@ -4407,7 +4385,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
- ata_scsi_invalid_field(dev, cmd, 1);
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
break;
case REPORT_LUNS:
@@ -4417,7 +4395,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
case REQUEST_SENSE:
ata_scsi_set_sense(dev, cmd, 0, 0, 0);
cmd->result = (DRIVER_SENSE << 24);
- cmd->scsi_done(cmd);
break;
/* if we reach this, then writeback caching is disabled,
@@ -4431,31 +4408,29 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
case SEEK_6:
case SEEK_10:
case TEST_UNIT_READY:
- ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
break;
case SEND_DIAGNOSTIC:
tmp8 = scsicmd[1] & ~(1 << 3);
- if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
- ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
- else
- ata_scsi_invalid_field(dev, cmd, 1);
+ if (tmp8 != 0x4 || scsicmd[3] || scsicmd[4])
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
break;
case MAINTENANCE_IN:
if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
- ata_scsi_invalid_field(dev, cmd, 1);
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
break;
/* all other commands */
default:
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
/* "Invalid command operation code" */
- cmd->scsi_done(cmd);
break;
}
+
+ cmd->scsi_done(cmd);
}
int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 051b6158d1b7..2bd92dca3e62 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -542,7 +542,7 @@ static inline void ata_tf_to_host(struct ata_port *ap,
/**
* ata_sff_data_xfer - Transfer data by PIO
- * @dev: device to target
+ * @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @rw: read/write
@@ -555,10 +555,10 @@ static inline void ata_tf_to_host(struct ata_port *ap,
* RETURNS:
* Bytes consumed.
*/
-unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
+unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
unsigned int buflen, int rw)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1;
@@ -595,7 +595,7 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
/**
* ata_sff_data_xfer32 - Transfer data by PIO
- * @dev: device to target
+ * @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @rw: read/write
@@ -610,16 +610,17 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
* Bytes consumed.
*/
-unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
+unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
unsigned int buflen, int rw)
{
+ struct ata_device *dev = qc->dev;
struct ata_port *ap = dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 2;
int slop = buflen & 3;
if (!(ap->pflags & ATA_PFLAG_PIO32))
- return ata_sff_data_xfer(dev, buf, buflen, rw);
+ return ata_sff_data_xfer(qc, buf, buflen, rw);
/* Transfer multiple of 4 bytes */
if (rw == READ)
@@ -658,7 +659,7 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
/**
* ata_sff_data_xfer_noirq - Transfer data by PIO
- * @dev: device to target
+ * @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @rw: read/write
@@ -672,14 +673,14 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
* RETURNS:
* Bytes consumed.
*/
-unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
+unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc, unsigned char *buf,
unsigned int buflen, int rw)
{
unsigned long flags;
unsigned int consumed;
local_irq_save(flags);
- consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
+ consumed = ata_sff_data_xfer32(qc, buf, buflen, rw);
local_irq_restore(flags);
return consumed;
@@ -723,14 +724,14 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
buf = kmap_atomic(page);
/* do the actual data transfer */
- ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+ ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size,
do_write);
kunmap_atomic(buf);
local_irq_restore(flags);
} else {
buf = page_address(page);
- ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+ ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size,
do_write);
}
@@ -791,7 +792,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
DPRINTK("send cdb\n");
WARN_ON_ONCE(qc->dev->cdb_len < 12);
- ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
+ ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
ata_sff_sync(ap);
/* FIXME: If the CDB is for DMA do we need to do the transition delay
or is bmdma_start guaranteed to do it ? */
@@ -868,14 +869,14 @@ next_sg:
buf = kmap_atomic(page);
/* do the actual data transfer */
- consumed = ap->ops->sff_data_xfer(dev, buf + offset,
+ consumed = ap->ops->sff_data_xfer(qc, buf + offset,
count, rw);
kunmap_atomic(buf);
local_irq_restore(flags);
} else {
buf = page_address(page);
- consumed = ap->ops->sff_data_xfer(dev, buf + offset,
+ consumed = ap->ops->sff_data_xfer(qc, buf + offset,
count, rw);
}
@@ -2427,11 +2428,21 @@ int ata_pci_sff_activate_host(struct ata_host *host,
return rc;
if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
- u8 tmp8, mask;
+ u8 tmp8, mask = 0;
- /* TODO: What if one channel is in native mode ... */
+ /*
+ * ATA spec says we should use legacy mode when one
+ * port is in legacy mode, but disabled ports on some
+ * PCI hosts appear as fixed legacy ports, e.g SB600/700
+ * on which the secondary port is not wired, so
+ * ignore ports that are marked as 'dummy' during
+ * this check
+ */
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
- mask = (1 << 2) | (1 << 0);
+ if (!ata_port_is_dummy(host->ports[0]))
+ mask |= (1 << 0);
+ if (!ata_port_is_dummy(host->ports[1]))
+ mask |= (1 << 2);
if ((tmp8 & mask) != mask)
legacy_mode = 1;
}
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 7ef16c085058..46698232e6bf 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -716,7 +716,6 @@ struct scsi_transport_template *ata_attach_transport(void)
return NULL;
i->t.eh_strategy_handler = ata_scsi_error;
- i->t.eh_timed_out = ata_scsi_timed_out;
i->t.user_scan = ata_scsi_user_scan;
i->t.host_attrs.ac.attrs = &i->port_attrs[0];
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 8f3a5596dd67..120fce0befd3 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -31,13 +31,6 @@
#define DRV_NAME "libata"
#define DRV_VERSION "3.00" /* must be exactly four chars */
-struct ata_scsi_args {
- struct ata_device *dev;
- u16 *id;
- struct scsi_cmnd *cmd;
- void (*done)(struct scsi_cmnd *);
-};
-
/* libata-core.c */
enum {
/* flags for ata_dev_read_id() */
@@ -89,7 +82,6 @@ extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
u8 enable, u8 feature);
-extern void ata_sg_clean(struct ata_queued_cmd *qc);
extern void ata_qc_free(struct ata_queued_cmd *qc);
extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern void __ata_qc_complete(struct ata_queued_cmd *qc);
@@ -159,7 +151,6 @@ extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
extern void ata_eh_acquire(struct ata_port *ap);
extern void ata_eh_release(struct ata_port *ap);
-extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern void ata_scsi_error(struct Scsi_Host *host);
extern void ata_eh_fastdrain_timerfn(unsigned long arg);
extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 1611e0e8d767..fd5b34f0d007 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -286,10 +286,10 @@ static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
set_smc_timing(ap->dev, adev, info, &timing);
}
-static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
+static unsigned int pata_at91_data_xfer_noirq(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
- struct at91_ide_info *info = dev->link->ap->host->private_data;
+ struct at91_ide_info *info = qc->dev->link->ap->host->private_data;
unsigned int consumed;
unsigned int mode;
unsigned long flags;
@@ -301,7 +301,7 @@ static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
regmap_fields_write(fields.mode, info->cs, (mode & ~AT91_SMC_DBW) |
AT91_SMC_DBW_16);
- consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+ consumed = ata_sff_data_xfer(qc, buf, buflen, rw);
/* restore 8bit mode after data is written */
regmap_fields_write(fields.mode, info->cs, (mode & ~AT91_SMC_DBW) |
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 49d705c9f0f7..6c9aa95a9a05 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,6 +278,11 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &info };
+ /* SB600/700 don't have secondary port wired */
+ if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
+ (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
+ ppi[1] = &ata_dummy_port_info;
+
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index ec748d31928d..9c5780a7e1b9 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1143,7 +1143,7 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
/**
* bfin_data_xfer - Transfer data by PIO
- * @adev: device for this I/O
+ * @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @write_data: read/write
@@ -1151,10 +1151,11 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
* Note: Original code is ata_sff_data_xfer().
*/
-static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
+static unsigned int bfin_data_xfer(struct ata_queued_cmd *qc,
+ unsigned char *buf,
unsigned int buflen, int rw)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
unsigned int words = buflen >> 1;
unsigned short *buf16 = (u16 *)buf;
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index bd6b089c67a3..bf1b910c5d69 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -474,11 +474,11 @@ static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl)
}
/* Note: original code is ata_sff_data_xfer */
-static unsigned int ep93xx_pata_data_xfer(struct ata_device *adev,
+static unsigned int ep93xx_pata_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
- struct ata_port *ap = adev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
struct ep93xx_pata_data *drv_data = ap->host->private_data;
u16 *data = (u16 *)buf;
unsigned int words = buflen >> 1;
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
new file mode 100644
index 000000000000..5b0c57d1c59f
--- /dev/null
+++ b/drivers/ata/pata_falcon.c
@@ -0,0 +1,184 @@
+/*
+ * Atari Falcon PATA controller driver
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Based on falconide.c:
+ *
+ * Created 12 Jul 1997 by Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/setup.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_stdma.h>
+#include <asm/ide.h>
+
+#define DRV_NAME "pata_falcon"
+#define DRV_VERSION "0.1.0"
+
+#define ATA_HD_BASE 0xfff00000
+#define ATA_HD_CONTROL 0x39
+
+static struct scsi_host_template pata_falcon_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
+ unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_device *dev = qc->dev;
+ struct ata_port *ap = dev->link->ap;
+ void __iomem *data_addr = ap->ioaddr.data_addr;
+ unsigned int words = buflen >> 1;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ bool swap = 1;
+
+ if (dev->class == ATA_DEV_ATA && cmd && cmd->request &&
+ !blk_rq_is_passthrough(cmd->request))
+ swap = 0;
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ) {
+ if (swap)
+ raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
+ else
+ raw_insw((u16 *)data_addr, (u16 *)buf, words);
+ } else {
+ if (swap)
+ raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
+ else
+ raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+ }
+
+ /* Transfer trailing byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ unsigned char pad[2] = { };
+
+ /* Point buf to the tail of buffer */
+ buf += buflen - 1;
+
+ if (rw == READ) {
+ if (swap)
+ raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+ else
+ raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+ *buf = pad[0];
+ } else {
+ pad[0] = *buf;
+ if (swap)
+ raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+ else
+ raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+/*
+ * Provide our own set_mode() as we don't want to change anything that has
+ * already been configured..
+ */
+static int pata_falcon_set_mode(struct ata_link *link,
+ struct ata_device **unused)
+{
+ struct ata_device *dev;
+
+ ata_for_each_dev(dev, link, ENABLED) {
+ /* We don't really care */
+ dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+ ata_dev_info(dev, "configured for PIO\n");
+ }
+ return 0;
+}
+
+static struct ata_port_operations pata_falcon_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = pata_falcon_data_xfer,
+ .cable_detect = ata_cable_unknown,
+ .set_mode = pata_falcon_set_mode,
+};
+
+static int pata_falcon_init_one(void)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct platform_device *pdev;
+ void __iomem *base;
+
+ if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
+ return -ENODEV;
+
+ pr_info(DRV_NAME ": Atari Falcon PATA controller\n");
+
+ pdev = platform_device_register_simple(DRV_NAME, 0, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ if (!devm_request_mem_region(&pdev->dev, ATA_HD_BASE, 0x40, DRV_NAME)) {
+ pr_err(DRV_NAME ": resources busy\n");
+ return -EBUSY;
+ }
+
+ /* allocate host */
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+ ap = host->ports[0];
+
+ ap->ops = &pata_falcon_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+
+ base = (void __iomem *)ATA_HD_BASE;
+ ap->ioaddr.data_addr = base;
+ ap->ioaddr.error_addr = base + 1 + 1 * 4;
+ ap->ioaddr.feature_addr = base + 1 + 1 * 4;
+ ap->ioaddr.nsect_addr = base + 1 + 2 * 4;
+ ap->ioaddr.lbal_addr = base + 1 + 3 * 4;
+ ap->ioaddr.lbam_addr = base + 1 + 4 * 4;
+ ap->ioaddr.lbah_addr = base + 1 + 5 * 4;
+ ap->ioaddr.device_addr = base + 1 + 6 * 4;
+ ap->ioaddr.status_addr = base + 1 + 7 * 4;
+ ap->ioaddr.command_addr = base + 1 + 7 * 4;
+
+ ap->ioaddr.altstatus_addr = base + ATA_HD_CONTROL;
+ ap->ioaddr.ctl_addr = base + ATA_HD_CONTROL;
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", (unsigned long)base,
+ (unsigned long)base + ATA_HD_CONTROL);
+
+ /* activate */
+ return ata_host_activate(host, 0, NULL, 0, &pata_falcon_sht);
+}
+
+module_init(pata_falcon_init_one);
+
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
+MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index abda44183512..0b0d93065f5a 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -40,13 +40,13 @@ static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
return 0;
}
-static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
+static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
unsigned int i;
unsigned int words = buflen >> 1;
u16 *buf16 = (u16 *) buf;
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
void __iomem *mmio = ap->ioaddr.data_addr;
struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index bce2a8ca4678..53828b6c3044 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -303,11 +303,12 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
-static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
+static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
- int slop = buflen & 3;
+ struct ata_device *dev = qc->dev;
struct ata_port *ap = dev->link->ap;
+ int slop = buflen & 3;
/* 32bit I/O capable *and* we need to write a whole number of dwords */
if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)
@@ -340,7 +341,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
}
local_irq_restore(flags);
} else
- buflen = ata_sff_data_xfer_noirq(dev, buf, buflen, rw);
+ buflen = ata_sff_data_xfer_noirq(qc, buf, buflen, rw);
return buflen;
}
@@ -702,9 +703,11 @@ static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
return ata_sff_qc_issue(qc);
}
-static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
- unsigned int buflen, int rw)
+static unsigned int vlb32_data_xfer(struct ata_queued_cmd *qc,
+ unsigned char *buf,
+ unsigned int buflen, int rw)
{
+ struct ata_device *adev = qc->dev;
struct ata_port *ap = adev->link->ap;
int slop = buflen & 3;
@@ -727,7 +730,7 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
}
return (buflen + 3) & ~3;
} else
- return ata_sff_data_xfer(adev, buf, buflen, rw);
+ return ata_sff_data_xfer(qc, buf, buflen, rw);
}
static int qdi_port(struct platform_device *dev,
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 475a00669427..f524a9099d01 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -138,9 +138,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
int trh;
int pause;
/* These names are timing parameters from the ATA spec */
- int t1;
int t2;
- int t2i;
/*
* A divisor value of four will overflow the timing fields at
@@ -154,15 +152,9 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
- t1 = timing.setup;
- if (t1)
- t1--;
t2 = timing.active;
if (t2)
t2--;
- t2i = timing.act8b;
- if (t2i)
- t2i--;
trh = ns_to_tim_reg(div, 20);
if (trh)
@@ -293,17 +285,17 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
/**
* Handle an 8 bit I/O request.
*
- * @dev: Device to access
+ * @qc: Queued command
* @buffer: Data buffer
* @buflen: Length of the buffer.
* @rw: True to write.
*/
-static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
+static unsigned int octeon_cf_data_xfer8(struct ata_queued_cmd *qc,
unsigned char *buffer,
unsigned int buflen,
int rw)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned long words;
int count;
@@ -332,17 +324,17 @@ static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
/**
* Handle a 16 bit I/O request.
*
- * @dev: Device to access
+ * @qc: Queued command
* @buffer: Data buffer
* @buflen: Length of the buffer.
* @rw: True to write.
*/
-static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
+static unsigned int octeon_cf_data_xfer16(struct ata_queued_cmd *qc,
unsigned char *buffer,
unsigned int buflen,
int rw)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned long words;
int count;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index b6b7af894d9d..201a32d0627f 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -32,7 +32,6 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
unsigned int reg_shift = 0;
int pio_mode = 0;
int pio_mask;
- const u32 *prop;
ret = of_address_to_resource(dn, 0, &io_res);
if (ret) {
@@ -50,13 +49,9 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
- prop = of_get_property(dn, "reg-shift", NULL);
- if (prop)
- reg_shift = be32_to_cpup(prop);
+ of_property_read_u32(dn, "reg-shift", &reg_shift);
- prop = of_get_property(dn, "pio-mode", NULL);
- if (prop) {
- pio_mode = be32_to_cpup(prop);
+ if (!of_property_read_u32(dn, "pio-mode", &pio_mode)) {
if (pio_mode > 6) {
dev_err(&ofdev->dev, "invalid pio-mode\n");
return -EINVAL;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index bcc4b968c049..a541eacc5e95 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -90,7 +90,7 @@ static int pcmcia_set_mode_8bit(struct ata_link *link,
/**
* ata_data_xfer_8bit - Transfer data by 8bit PIO
- * @dev: device to target
+ * @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @rw: read/write
@@ -101,10 +101,10 @@ static int pcmcia_set_mode_8bit(struct ata_link *link,
* Inherited from caller.
*/
-static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
+static unsigned int ata_data_xfer_8bit(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
if (rw == READ)
ioread8_rep(ap->ioaddr.data_addr, buf, buflen);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index f6facd686f94..431c7de30ce6 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -263,10 +263,10 @@ static u8 pata_s3c_check_altstatus(struct ata_port *ap)
/*
* pata_s3c_data_xfer - Transfer data by PIO
*/
-static unsigned int pata_s3c_data_xfer(struct ata_device *dev,
+static unsigned int pata_s3c_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
struct s3c_ide_info *info = ap->host->private_data;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1, i;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 823e938c9a78..00ce26d0c047 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4067,6 +4067,7 @@ static int mv_platform_probe(struct platform_device *pdev)
struct ata_host *host;
struct mv_host_priv *hpriv;
struct resource *res;
+ void __iomem *mmio;
int n_ports = 0, irq = 0;
int rc;
int port;
@@ -4085,8 +4086,9 @@ static int mv_platform_probe(struct platform_device *pdev)
* Get the register base first
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL)
- return -EINVAL;
+ mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
/* allocate host */
if (pdev->dev.of_node) {
@@ -4130,9 +4132,7 @@ static int mv_platform_probe(struct platform_device *pdev)
hpriv->board_idx = chip_soc;
host->iomap = NULL;
- hpriv->base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- hpriv->base -= SATAHC0_REG_BASE;
+ hpriv->base = mmio - SATAHC0_REG_BASE;
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
@@ -4526,7 +4526,7 @@ static void __exit mv_exit(void)
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index f72d601e300a..5d38245a7a73 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -447,11 +447,11 @@ static void sata_rcar_exec_command(struct ata_port *ap,
ata_sff_pause(ap);
}
-static unsigned int sata_rcar_data_xfer(struct ata_device *dev,
+static unsigned int sata_rcar_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index c53a9dd1353f..623359e407aa 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1779,7 +1779,7 @@ static int eni_do_init(struct atm_dev *dev)
printk(")\n");
printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number,
eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA",
- media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]);
+ media_name[eni_in(MID_RES_ID_MCON) & DAUGHTER_ID]);
error = suni_init(dev);
if (error)
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 471ddfd93ea8..5ec109533bb9 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2132,12 +2132,8 @@ idt77252_init_est(struct vc_map *vc, int pcr)
est->interval = 2; /* XXX: make this configurable */
est->ewma_log = 2; /* XXX: make this configurable */
- init_timer(&est->timer);
- est->timer.data = (unsigned long)vc;
- est->timer.function = idt77252_est_timer;
-
- est->timer.expires = jiffies + ((HZ / 4) << est->interval);
- add_timer(&est->timer);
+ setup_timer(&est->timer, idt77252_est_timer, (unsigned long)vc);
+ mod_timer(&est->timer, jiffies + ((HZ / 4) << est->interval));
return est;
}
@@ -3638,9 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
spin_lock_init(&card->cmd_lock);
spin_lock_init(&card->tst_lock);
- init_timer(&card->tst_timer);
- card->tst_timer.data = (unsigned long)card;
- card->tst_timer.function = tst_timer;
+ setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
/* Do the I/O remapping... */
card->membase = ioremap(membase, 1024);
diff --git a/drivers/atm/midway.h b/drivers/atm/midway.h
index 432525ad5e46..d8bec0f2a71c 100644
--- a/drivers/atm/midway.h
+++ b/drivers/atm/midway.h
@@ -56,7 +56,7 @@
#define MID_CON_SUNI 0x00000040 /* 0: UTOPIA; 1: SUNI */
#define MID_CON_V6 0x00000020 /* 0: non-pipel UTOPIA (required iff
!CON_SUNI; 1: UTOPIA */
-#define DAUGTHER_ID 0x0000001f /* daugther board id */
+#define DAUGHTER_ID 0x0000001f /* daughter board id */
/*
* Interrupt Status Acknowledge, Interrupt Status & Interrupt Enable
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4c28e1a09786..2c3b359b3536 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/cpufeature.h>
#include <linux/tick.h>
+#include <linux/pm_qos.h>
#include "base.h"
@@ -376,6 +377,7 @@ int register_cpu(struct cpu *cpu, int num)
per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num));
+ dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
return 0;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4497d263209f..ac350c518e0c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
struct firmware_buf *buf = fw_priv->buf;
__fw_load_abort(buf);
-
- /* avoid user action after loading abort */
- fw_priv->buf = NULL;
}
static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_lock(&fw_lock);
fw_buf = fw_priv->buf;
- if (!fw_buf)
+ if (fw_state_is_aborted(&fw_buf->fw_st))
goto out;
switch (loading) {
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ab8ea1253e6..fa26ffd25fa6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
{
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn, end_pfn;
+ unsigned long valid_start, valid_end, valid_pages;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
struct zone *zone;
int zone_shift = 0;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
end_pfn = start_pfn + nr_pages;
- first_page = pfn_to_page(start_pfn);
/* The block contains more than one zone can not be offlined. */
- if (!test_pages_in_a_zone(start_pfn, end_pfn))
+ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
return sprintf(buf, "none\n");
- zone = page_zone(first_page);
+ zone = page_zone(pfn_to_page(valid_start));
+ valid_pages = valid_end - valid_start;
/* MMOP_ONLINE_KEEP */
sprintf(buf, "%s", zone->name);
/* MMOP_ONLINE_KERNEL */
- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+ zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
}
/* MMOP_ONLINE_MOVABLE */
- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+ zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index be6a599bc0c1..0fc7c4da7756 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -206,7 +206,7 @@ platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
{
struct platform_msi_priv_data *datap;
/*
- * Limit the number of interrupts to 256 per device. Should we
+ * Limit the number of interrupts to 2048 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c4af00385502..647e4761dbf3 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -102,6 +102,16 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+ if (has_acpi_companion(&dev->dev)) {
+ if (r && r->flags & IORESOURCE_DISABLED) {
+ int ret;
+
+ ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
+ if (ret)
+ return ret;
+ }
+ }
+
/*
* The resources may pass trigger flags to the irqs that need
* to be set up. It so happens that the trigger flags for
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 2997026b4dfb..3a75fb1b4126 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -130,7 +130,7 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
- /* Warn once for each IRQ safe dev in no sleep domain */
+ /* Warn once if IRQ safe dev in no sleep domain */
if (ret)
dev_warn_once(dev, "PM domain %s will not be powered off\n",
genpd->name);
@@ -201,7 +201,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
-static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
+static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
@@ -231,7 +231,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
}
-static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
+static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
@@ -262,10 +262,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
}
/**
- * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
+ * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
* @genpd: PM domain to power off.
*
- * Queue up the execution of genpd_poweroff() unless it's already been done
+ * Queue up the execution of genpd_power_off() unless it's already been done
* before.
*/
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
@@ -274,14 +274,14 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
}
/**
- * genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_power_on - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
*
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
+static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
{
struct gpd_link *link;
int ret = 0;
@@ -300,7 +300,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
genpd_sd_counter_inc(master);
genpd_lock_nested(master, depth + 1);
- ret = genpd_poweron(master, depth + 1);
+ ret = genpd_power_on(master, depth + 1);
genpd_unlock(master);
if (ret) {
@@ -309,7 +309,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
}
}
- ret = genpd_power_on(genpd, true);
+ ret = _genpd_power_on(genpd, true);
if (ret)
goto err;
@@ -368,14 +368,14 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
}
/**
- * genpd_poweroff - Remove power from a given PM domain.
+ * genpd_power_off - Remove power from a given PM domain.
* @genpd: PM domain to power down.
* @is_async: PM domain is powered down from a scheduled work
*
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
*/
-static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
+static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
@@ -427,13 +427,13 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_poweron() for the master yet after
- * incrementing it. In that case genpd_poweron() will wait
+ * managed to call genpd_power_on() for the master yet after
+ * incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
- * the genpd_poweron() restore power for us (this shouldn't
+ * the genpd_power_on() restore power for us (this shouldn't
* happen very often).
*/
- ret = genpd_power_off(genpd, true);
+ ret = _genpd_power_off(genpd, true);
if (ret)
return ret;
}
@@ -459,7 +459,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
genpd_lock(genpd);
- genpd_poweroff(genpd, true);
+ genpd_power_off(genpd, true);
genpd_unlock(genpd);
}
@@ -578,7 +578,7 @@ static int genpd_runtime_suspend(struct device *dev)
return 0;
genpd_lock(genpd);
- genpd_poweroff(genpd, false);
+ genpd_power_off(genpd, false);
genpd_unlock(genpd);
return 0;
@@ -618,7 +618,7 @@ static int genpd_runtime_resume(struct device *dev)
}
genpd_lock(genpd);
- ret = genpd_poweron(genpd, 0);
+ ret = genpd_power_on(genpd, 0);
genpd_unlock(genpd);
if (ret)
@@ -658,7 +658,7 @@ err_poweroff:
if (!pm_runtime_is_irq_safe(dev) ||
(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
genpd_lock(genpd);
- genpd_poweroff(genpd, 0);
+ genpd_power_off(genpd, 0);
genpd_unlock(genpd);
}
@@ -674,9 +674,9 @@ static int __init pd_ignore_unused_setup(char *__unused)
__setup("pd_ignore_unused", pd_ignore_unused_setup);
/**
- * genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ * genpd_power_off_unused - Power off all PM domains with no devices in use.
*/
-static int __init genpd_poweroff_unused(void)
+static int __init genpd_power_off_unused(void)
{
struct generic_pm_domain *genpd;
@@ -694,7 +694,7 @@ static int __init genpd_poweroff_unused(void)
return 0;
}
-late_initcall(genpd_poweroff_unused);
+late_initcall(genpd_power_off_unused);
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
@@ -727,18 +727,20 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
}
/**
- * genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
+ * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
*
* Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its masters.
*
* This function is only called in "noirq" and "syscore" stages of system power
- * transitions, so it need not acquire locks (all of the "noirq" callbacks are
- * executed sequentially, so it is guaranteed that it will never run twice in
- * parallel).
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
*/
-static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
+static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
+ unsigned int depth)
{
struct gpd_link *link;
@@ -751,26 +753,35 @@ static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
/* Choose the deepest state when suspending */
genpd->state_idx = genpd->state_count - 1;
- genpd_power_off(genpd, false);
+ _genpd_power_off(genpd, false);
genpd->status = GPD_STATE_POWER_OFF;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
- genpd_sync_poweroff(link->master);
+
+ if (use_lock)
+ genpd_lock_nested(link->master, depth + 1);
+
+ genpd_sync_power_off(link->master, use_lock, depth + 1);
+
+ if (use_lock)
+ genpd_unlock(link->master);
}
}
/**
- * genpd_sync_poweron - Synchronously power on a PM domain and its masters.
+ * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
* @genpd: PM domain to power on.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
*
* This function is only called in "noirq" and "syscore" stages of system power
- * transitions, so it need not acquire locks (all of the "noirq" callbacks are
- * executed sequentially, so it is guaranteed that it will never run twice in
- * parallel).
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
*/
-static void genpd_sync_poweron(struct generic_pm_domain *genpd)
+static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
+ unsigned int depth)
{
struct gpd_link *link;
@@ -778,11 +789,18 @@ static void genpd_sync_poweron(struct generic_pm_domain *genpd)
return;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sync_poweron(link->master);
genpd_sd_counter_inc(link->master);
+
+ if (use_lock)
+ genpd_lock_nested(link->master, depth + 1);
+
+ genpd_sync_power_on(link->master, use_lock, depth + 1);
+
+ if (use_lock)
+ genpd_unlock(link->master);
}
- genpd_power_on(genpd, false);
+ _genpd_power_on(genpd, false);
genpd->status = GPD_STATE_ACTIVE;
}
@@ -888,13 +906,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
return ret;
}
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
+ genpd_lock(genpd);
genpd->suspended_count++;
- genpd_sync_poweroff(genpd);
+ genpd_sync_power_off(genpd, true, 0);
+ genpd_unlock(genpd);
return 0;
}
@@ -919,13 +934,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
- genpd_sync_poweron(genpd);
+ genpd_lock(genpd);
+ genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
+ genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_resume(dev);
@@ -1002,22 +1014,20 @@ static int pm_genpd_restore_noirq(struct device *dev)
return -EINVAL;
/*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- *
* At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle.
*/
+ genpd_lock(genpd);
if (genpd->suspended_count++ == 0)
/*
* The boot kernel might put the domain into arbitrary state,
- * so make it appear as powered off to genpd_sync_poweron(),
+ * so make it appear as powered off to genpd_sync_power_on(),
* so that it tries to power it on in case it was really off.
*/
genpd->status = GPD_STATE_POWER_OFF;
- genpd_sync_poweron(genpd);
+ genpd_sync_power_on(genpd, true, 0);
+ genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_resume(dev);
@@ -1072,9 +1082,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
if (suspend) {
genpd->suspended_count++;
- genpd_sync_poweroff(genpd);
+ genpd_sync_power_off(genpd, false, 0);
} else {
- genpd_sync_poweron(genpd);
+ genpd_sync_power_on(genpd, false, 0);
genpd->suspended_count--;
}
}
@@ -2043,7 +2053,7 @@ int genpd_dev_pm_attach(struct device *dev)
dev->pm_domain->sync = genpd_dev_pm_sync;
genpd_lock(pd);
- ret = genpd_poweron(pd, 0);
+ ret = genpd_power_on(pd, 0);
genpd_unlock(pd);
out:
return ret ? -EPROBE_DEFER : 0;
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 35ff06283738..91ec3232d630 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -32,13 +32,7 @@ LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
-#define opp_rcu_lockdep_assert() \
-do { \
- RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&opp_table_lock), \
- "Missing rcu_read_lock() or " \
- "opp_table_lock protection"); \
-} while (0)
+static void dev_pm_opp_get(struct dev_pm_opp *opp);
static struct opp_device *_find_opp_dev(const struct device *dev,
struct opp_table *opp_table)
@@ -52,38 +46,46 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
return NULL;
}
+static struct opp_table *_find_opp_table_unlocked(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ list_for_each_entry(opp_table, &opp_tables, node) {
+ if (_find_opp_dev(dev, opp_table)) {
+ _get_opp_table_kref(opp_table);
+
+ return opp_table;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
/**
* _find_opp_table() - find opp_table struct using device pointer
* @dev: device pointer used to lookup OPP table
*
- * Search OPP table for one containing matching device. Does a RCU reader
- * operation to grab the pointer needed.
+ * Search OPP table for one containing matching device.
*
* Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
- * Locking: For readers, this function must be called under rcu_read_lock().
- * opp_table is a RCU protected pointer, which means that opp_table is valid
- * as long as we are under RCU lock.
- *
- * For Writers, this function must be called with opp_table_lock held.
+ * The callers must call dev_pm_opp_put_opp_table() after the table is used.
*/
struct opp_table *_find_opp_table(struct device *dev)
{
struct opp_table *opp_table;
- opp_rcu_lockdep_assert();
-
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- list_for_each_entry_rcu(opp_table, &opp_tables, node)
- if (_find_opp_dev(dev, opp_table))
- return opp_table;
+ mutex_lock(&opp_table_lock);
+ opp_table = _find_opp_table_unlocked(dev);
+ mutex_unlock(&opp_table_lock);
- return ERR_PTR(-ENODEV);
+ return opp_table;
}
/**
@@ -94,29 +96,15 @@ struct opp_table *_find_opp_table(struct device *dev)
* return 0
*
* This is useful only for devices with single power supply.
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. This means that opp which could have been fetched by
- * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
- * under RCU lock. The pointer returned by the opp_find_freq family must be
- * used in the same section as the usage of this function with the pointer
- * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
- * pointer.
*/
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
- struct dev_pm_opp *tmp_opp;
- unsigned long v = 0;
-
- opp_rcu_lockdep_assert();
-
- tmp_opp = rcu_dereference(opp);
- if (IS_ERR_OR_NULL(tmp_opp))
+ if (IS_ERR_OR_NULL(opp)) {
pr_err("%s: Invalid parameters\n", __func__);
- else
- v = tmp_opp->supplies[0].u_volt;
+ return 0;
+ }
- return v;
+ return opp->supplies[0].u_volt;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
@@ -126,29 +114,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
*
* Return: frequency in hertz corresponding to the opp, else
* return 0
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. This means that opp which could have been fetched by
- * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
- * under RCU lock. The pointer returned by the opp_find_freq family must be
- * used in the same section as the usage of this function with the pointer
- * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
- * pointer.
*/
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
- struct dev_pm_opp *tmp_opp;
- unsigned long f = 0;
-
- opp_rcu_lockdep_assert();
-
- tmp_opp = rcu_dereference(opp);
- if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
+ if (IS_ERR_OR_NULL(opp) || !opp->available) {
pr_err("%s: Invalid parameters\n", __func__);
- else
- f = tmp_opp->rate;
+ return 0;
+ }
- return f;
+ return opp->rate;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
@@ -161,28 +135,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
* quickly. Running on them for longer times may overheat the chip.
*
* Return: true if opp is turbo opp, else false.
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. This means that opp which could have been fetched by
- * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
- * under RCU lock. The pointer returned by the opp_find_freq family must be
- * used in the same section as the usage of this function with the pointer
- * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
- * pointer.
*/
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
- struct dev_pm_opp *tmp_opp;
-
- opp_rcu_lockdep_assert();
-
- tmp_opp = rcu_dereference(opp);
- if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
+ if (IS_ERR_OR_NULL(opp) || !opp->available) {
pr_err("%s: Invalid parameters\n", __func__);
return false;
}
- return tmp_opp->turbo;
+ return opp->turbo;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
@@ -191,52 +152,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
* @dev: device for which we do this operation
*
* Return: This function returns the max clock latency in nanoseconds.
- *
- * Locking: This function takes rcu_read_lock().
*/
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
struct opp_table *opp_table;
unsigned long clock_latency_ns;
- rcu_read_lock();
-
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
- clock_latency_ns = 0;
- else
- clock_latency_ns = opp_table->clock_latency_ns_max;
-
- rcu_read_unlock();
- return clock_latency_ns;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
-
-static int _get_regulator_count(struct device *dev)
-{
- struct opp_table *opp_table;
- int count;
+ return 0;
- rcu_read_lock();
+ clock_latency_ns = opp_table->clock_latency_ns_max;
- opp_table = _find_opp_table(dev);
- if (!IS_ERR(opp_table))
- count = opp_table->regulator_count;
- else
- count = 0;
+ dev_pm_opp_put_opp_table(opp_table);
- rcu_read_unlock();
-
- return count;
+ return clock_latency_ns;
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
/**
* dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
* @dev: device for which we do this operation
*
* Return: This function returns the max voltage latency in nanoseconds.
- *
- * Locking: This function takes rcu_read_lock().
*/
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
@@ -250,35 +188,33 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
unsigned long max;
} *uV;
- count = _get_regulator_count(dev);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return 0;
+
+ count = opp_table->regulator_count;
/* Regulator may not be required for the device */
if (!count)
- return 0;
+ goto put_opp_table;
regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
if (!regulators)
- return 0;
+ goto put_opp_table;
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
if (!uV)
goto free_regulators;
- rcu_read_lock();
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- rcu_read_unlock();
- goto free_uV;
- }
-
memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
+ mutex_lock(&opp_table->lock);
+
for (i = 0; i < count; i++) {
uV[i].min = ~0;
uV[i].max = 0;
- list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
if (!opp->available)
continue;
@@ -289,7 +225,7 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
}
}
- rcu_read_unlock();
+ mutex_unlock(&opp_table->lock);
/*
* The caller needs to ensure that opp_table (and hence the regulator)
@@ -301,10 +237,11 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
latency_ns += ret * 1000;
}
-free_uV:
kfree(uV);
free_regulators:
kfree(regulators);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
return latency_ns;
}
@@ -317,8 +254,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
*
* Return: This function returns the max transition latency, in nanoseconds, to
* switch from one OPP to other.
- *
- * Locking: This function takes rcu_read_lock().
*/
unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
{
@@ -328,32 +263,29 @@ unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
/**
- * dev_pm_opp_get_suspend_opp() - Get suspend opp
+ * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
* @dev: device for which we do this operation
*
- * Return: This function returns pointer to the suspend opp if it is
- * defined and available, otherwise it returns NULL.
- *
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
+ * Return: This function returns the frequency of the OPP marked as suspend_opp
+ * if one is available, else returns 0;
*/
-struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
{
struct opp_table *opp_table;
-
- opp_rcu_lockdep_assert();
+ unsigned long freq = 0;
opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
- !opp_table->suspend_opp->available)
- return NULL;
+ if (IS_ERR(opp_table))
+ return 0;
+
+ if (opp_table->suspend_opp && opp_table->suspend_opp->available)
+ freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
- return opp_table->suspend_opp;
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return freq;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
/**
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
@@ -361,8 +293,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
*
* Return: This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
- *
- * Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
@@ -370,23 +300,24 @@ int dev_pm_opp_get_opp_count(struct device *dev)
struct dev_pm_opp *temp_opp;
int count = 0;
- rcu_read_lock();
-
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
count = PTR_ERR(opp_table);
dev_err(dev, "%s: OPP table not found (%d)\n",
__func__, count);
- goto out_unlock;
+ return count;
}
- list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available)
count++;
}
-out_unlock:
- rcu_read_unlock();
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -411,11 +342,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* This provides a mechanism to enable an opp which is not available currently
* or the opposite as well.
*
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
*/
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
@@ -424,8 +352,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
-
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
int r = PTR_ERR(opp_table);
@@ -434,14 +360,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
return ERR_PTR(r);
}
- list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available == available &&
temp_opp->rate == freq) {
opp = temp_opp;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
break;
}
}
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
@@ -451,14 +385,21 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
{
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
break;
}
}
+ mutex_unlock(&opp_table->lock);
+
return opp;
}
@@ -477,18 +418,14 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
* ERANGE: no match found for search
* ENODEV: if device not found in list of registered devices
*
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
*/
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
struct opp_table *opp_table;
-
- opp_rcu_lockdep_assert();
+ struct dev_pm_opp *opp;
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -499,7 +436,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
if (IS_ERR(opp_table))
return ERR_CAST(opp_table);
- return _find_freq_ceil(opp_table, freq);
+ opp = _find_freq_ceil(opp_table, freq);
+
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@@ -518,11 +459,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
* ERANGE: no match found for search
* ENODEV: if device not found in list of registered devices
*
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
*/
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
@@ -530,8 +468,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
-
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -541,7 +477,9 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
if (IS_ERR(opp_table))
return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available) {
/* go to the next node, before choosing prev */
if (temp_opp->rate > *freq)
@@ -550,6 +488,13 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
opp = temp_opp;
}
}
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp))
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
if (!IS_ERR(opp))
*freq = opp->rate;
@@ -557,34 +502,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
-/*
- * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
- * while clk returned here is used.
- */
-static struct clk *_get_opp_clk(struct device *dev)
-{
- struct opp_table *opp_table;
- struct clk *clk;
-
- rcu_read_lock();
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp doesn't exist\n", __func__);
- clk = ERR_CAST(opp_table);
- goto unlock;
- }
-
- clk = opp_table->clk;
- if (IS_ERR(clk))
- dev_err(dev, "%s: No clock available for the device\n",
- __func__);
-
-unlock:
- rcu_read_unlock();
- return clk;
-}
-
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
struct dev_pm_opp_supply *supply)
{
@@ -680,8 +597,6 @@ restore_voltage:
*
* This configures the power-supplies and clock source to the levels specified
* by the OPP corresponding to the target_freq.
- *
- * Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
@@ -700,9 +615,19 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return -EINVAL;
}
- clk = _get_opp_clk(dev);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ return PTR_ERR(opp_table);
+ }
+
+ clk = opp_table->clk;
+ if (IS_ERR(clk)) {
+ dev_err(dev, "%s: No clock available for the device\n",
+ __func__);
+ ret = PTR_ERR(clk);
+ goto put_opp_table;
+ }
freq = clk_round_rate(clk, target_freq);
if ((long)freq <= 0)
@@ -714,16 +639,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
if (old_freq == freq) {
dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
__func__, freq);
- return 0;
- }
-
- rcu_read_lock();
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "%s: device opp doesn't exist\n", __func__);
- rcu_read_unlock();
- return PTR_ERR(opp_table);
+ ret = 0;
+ goto put_opp_table;
}
old_opp = _find_freq_ceil(opp_table, &old_freq);
@@ -737,8 +654,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
ret = PTR_ERR(opp);
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
__func__, freq, ret);
- rcu_read_unlock();
- return ret;
+ goto put_old_opp;
}
dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
@@ -748,8 +664,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
/* Only frequency scaling */
if (!regulators) {
- rcu_read_unlock();
- return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ goto put_opps;
}
if (opp_table->set_opp)
@@ -773,28 +689,26 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
data->new_opp.rate = freq;
memcpy(data->new_opp.supplies, opp->supplies, size);
- rcu_read_unlock();
+ ret = set_opp(data);
- return set_opp(data);
+put_opps:
+ dev_pm_opp_put(opp);
+put_old_opp:
+ if (!IS_ERR(old_opp))
+ dev_pm_opp_put(old_opp);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
/* OPP-dev Helpers */
-static void _kfree_opp_dev_rcu(struct rcu_head *head)
-{
- struct opp_device *opp_dev;
-
- opp_dev = container_of(head, struct opp_device, rcu_head);
- kfree_rcu(opp_dev, rcu_head);
-}
-
static void _remove_opp_dev(struct opp_device *opp_dev,
struct opp_table *opp_table)
{
opp_debug_unregister(opp_dev, opp_table);
list_del(&opp_dev->node);
- call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
- _kfree_opp_dev_rcu);
+ kfree(opp_dev);
}
struct opp_device *_add_opp_dev(const struct device *dev,
@@ -809,7 +723,7 @@ struct opp_device *_add_opp_dev(const struct device *dev,
/* Initialize opp-dev */
opp_dev->dev = dev;
- list_add_rcu(&opp_dev->node, &opp_table->dev_list);
+ list_add(&opp_dev->node, &opp_table->dev_list);
/* Create debugfs entries for the opp_table */
ret = opp_debug_register(opp_dev, opp_table);
@@ -820,26 +734,12 @@ struct opp_device *_add_opp_dev(const struct device *dev,
return opp_dev;
}
-/**
- * _add_opp_table() - Find OPP table or allocate a new one
- * @dev: device for which we do this operation
- *
- * It tries to find an existing table first, if it couldn't find one, it
- * allocates a new OPP table and returns that.
- *
- * Return: valid opp_table pointer if success, else NULL.
- */
-static struct opp_table *_add_opp_table(struct device *dev)
+static struct opp_table *_allocate_opp_table(struct device *dev)
{
struct opp_table *opp_table;
struct opp_device *opp_dev;
int ret;
- /* Check for existing table for 'dev' first */
- opp_table = _find_opp_table(dev);
- if (!IS_ERR(opp_table))
- return opp_table;
-
/*
* Allocate a new OPP table. In the infrequent case where a new
* device is needed to be added, we pay this penalty.
@@ -867,50 +767,45 @@ static struct opp_table *_add_opp_table(struct device *dev)
ret);
}
- srcu_init_notifier_head(&opp_table->srcu_head);
+ BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
+ mutex_init(&opp_table->lock);
+ kref_init(&opp_table->kref);
/* Secure the device table modification */
- list_add_rcu(&opp_table->node, &opp_tables);
+ list_add(&opp_table->node, &opp_tables);
return opp_table;
}
-/**
- * _kfree_device_rcu() - Free opp_table RCU handler
- * @head: RCU head
- */
-static void _kfree_device_rcu(struct rcu_head *head)
+void _get_opp_table_kref(struct opp_table *opp_table)
{
- struct opp_table *opp_table = container_of(head, struct opp_table,
- rcu_head);
-
- kfree_rcu(opp_table, rcu_head);
+ kref_get(&opp_table->kref);
}
-/**
- * _remove_opp_table() - Removes a OPP table
- * @opp_table: OPP table to be removed.
- *
- * Removes/frees OPP table if it doesn't contain any OPPs.
- */
-static void _remove_opp_table(struct opp_table *opp_table)
+struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
- struct opp_device *opp_dev;
+ struct opp_table *opp_table;
- if (!list_empty(&opp_table->opp_list))
- return;
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- if (opp_table->supported_hw)
- return;
+ opp_table = _find_opp_table_unlocked(dev);
+ if (!IS_ERR(opp_table))
+ goto unlock;
- if (opp_table->prop_name)
- return;
+ opp_table = _allocate_opp_table(dev);
- if (opp_table->regulators)
- return;
+unlock:
+ mutex_unlock(&opp_table_lock);
- if (opp_table->set_opp)
- return;
+ return opp_table;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
+
+static void _opp_table_kref_release(struct kref *kref)
+{
+ struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
+ struct opp_device *opp_dev;
/* Release clk */
if (!IS_ERR(opp_table->clk))
@@ -924,63 +819,60 @@ static void _remove_opp_table(struct opp_table *opp_table)
/* dev_list must be empty now */
WARN_ON(!list_empty(&opp_table->dev_list));
- list_del_rcu(&opp_table->node);
- call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
- _kfree_device_rcu);
+ mutex_destroy(&opp_table->lock);
+ list_del(&opp_table->node);
+ kfree(opp_table);
+
+ mutex_unlock(&opp_table_lock);
}
-/**
- * _kfree_opp_rcu() - Free OPP RCU handler
- * @head: RCU head
- */
-static void _kfree_opp_rcu(struct rcu_head *head)
+void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
- struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
+ kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
+ &opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
- kfree_rcu(opp, rcu_head);
+void _opp_free(struct dev_pm_opp *opp)
+{
+ kfree(opp);
}
-/**
- * _opp_remove() - Remove an OPP from a table definition
- * @opp_table: points back to the opp_table struct this opp belongs to
- * @opp: pointer to the OPP to remove
- * @notify: OPP_EVENT_REMOVE notification should be sent or not
- *
- * This function removes an opp definition from the opp table.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * It is assumed that the caller holds required mutex for an RCU updater
- * strategy.
- */
-void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
- bool notify)
+static void _opp_kref_release(struct kref *kref)
{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- if (notify)
- srcu_notifier_call_chain(&opp_table->srcu_head,
- OPP_EVENT_REMOVE, opp);
+ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
opp_debug_remove_one(opp);
- list_del_rcu(&opp->node);
- call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ list_del(&opp->node);
+ kfree(opp);
- _remove_opp_table(opp_table);
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+}
+
+static void dev_pm_opp_get(struct dev_pm_opp *opp)
+{
+ kref_get(&opp->kref);
}
+void dev_pm_opp_put(struct dev_pm_opp *opp)
+{
+ kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put);
+
/**
* dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
* @freq: OPP to remove with matching 'freq'
*
* This function removes an opp from the opp table.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
@@ -988,12 +880,11 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
struct opp_table *opp_table;
bool found = false;
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
- goto unlock;
+ return;
+
+ mutex_lock(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) {
if (opp->rate == freq) {
@@ -1002,28 +893,23 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
}
}
- if (!found) {
+ mutex_unlock(&opp_table->lock);
+
+ if (found) {
+ dev_pm_opp_put(opp);
+ } else {
dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
__func__, freq);
- goto unlock;
}
- _opp_remove(opp_table, opp, true);
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
-struct dev_pm_opp *_allocate_opp(struct device *dev,
- struct opp_table **opp_table)
+struct dev_pm_opp *_opp_allocate(struct opp_table *table)
{
struct dev_pm_opp *opp;
int count, supply_size;
- struct opp_table *table;
-
- table = _add_opp_table(dev);
- if (!table)
- return NULL;
/* Allocate space for at least one supply */
count = table->regulator_count ? table->regulator_count : 1;
@@ -1031,17 +917,13 @@ struct dev_pm_opp *_allocate_opp(struct device *dev,
/* allocate new OPP node and supplies structures */
opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
- if (!opp) {
- kfree(table);
+ if (!opp)
return NULL;
- }
/* Put the supplies at the end of the OPP structure as an empty array */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
INIT_LIST_HEAD(&opp->node);
- *opp_table = table;
-
return opp;
}
@@ -1067,11 +949,21 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
+/*
+ * Returns:
+ * 0: On success. And appropriate error message for duplicate OPPs.
+ * -EBUSY: For OPP with same freq/volt and is available. The callers of
+ * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
+ * sure we don't print error messages unnecessarily if different parts of
+ * kernel try to initialize the OPP table.
+ * -EEXIST: For OPP with same freq but different volt or is unavailable. This
+ * should be considered an error by the callers of _opp_add().
+ */
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- struct list_head *head = &opp_table->opp_list;
+ struct list_head *head;
int ret;
/*
@@ -1082,7 +974,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* loop, don't replace it with head otherwise it will become an infinite
* loop.
*/
- list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ mutex_lock(&opp_table->lock);
+ head = &opp_table->opp_list;
+
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
if (new_opp->rate > opp->rate) {
head = &opp->node;
continue;
@@ -1098,12 +993,21 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
new_opp->supplies[0].u_volt, new_opp->available);
/* Should we compare voltages for all regulators here ? */
- return opp->available &&
- new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
+ ret = opp->available &&
+ new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
+
+ mutex_unlock(&opp_table->lock);
+ return ret;
}
+ list_add(&new_opp->node, head);
+ mutex_unlock(&opp_table->lock);
+
new_opp->opp_table = opp_table;
- list_add_rcu(&new_opp->node, head);
+ kref_init(&new_opp->kref);
+
+ /* Get a reference to the OPP table */
+ _get_opp_table_kref(opp_table);
ret = opp_debug_create_one(new_opp, opp_table);
if (ret)
@@ -1121,6 +1025,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
/**
* _opp_add_v1() - Allocate a OPP based on v1 bindings.
+ * @opp_table: OPP table
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
@@ -1133,12 +1038,6 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
* and freed by dev_pm_opp_of_remove_table.
*
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
* Return:
* 0 On success OR
* Duplicate OPPs (both freq and volt are same) and opp->available
@@ -1146,22 +1045,16 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
*/
-int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
- bool dynamic)
+int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
+ unsigned long freq, long u_volt, bool dynamic)
{
- struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
unsigned long tol;
int ret;
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
- new_opp = _allocate_opp(dev, &opp_table);
- if (!new_opp) {
- ret = -ENOMEM;
- goto unlock;
- }
+ new_opp = _opp_allocate(opp_table);
+ if (!new_opp)
+ return -ENOMEM;
/* populate the opp table */
new_opp->rate = freq;
@@ -1173,22 +1066,23 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
new_opp->dynamic = dynamic;
ret = _opp_add(dev, new_opp, opp_table);
- if (ret)
+ if (ret) {
+ /* Don't return error for duplicate OPPs */
+ if (ret == -EBUSY)
+ ret = 0;
goto free_opp;
-
- mutex_unlock(&opp_table_lock);
+ }
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
+ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(opp_table, new_opp, false);
-unlock:
- mutex_unlock(&opp_table_lock);
+ _opp_free(new_opp);
+
return ret;
}
@@ -1202,27 +1096,16 @@ unlock:
* specify the hierarchy of versions it supports. OPP layer will then enable
* OPPs, which are available for those versions, based on its 'opp-supported-hw'
* property.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
-int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
- unsigned int count)
+struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions, unsigned int count)
{
struct opp_table *opp_table;
- int ret = 0;
-
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
+ int ret;
- opp_table = _add_opp_table(dev);
- if (!opp_table) {
- ret = -ENOMEM;
- goto unlock;
- }
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@@ -1243,65 +1126,40 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
}
opp_table->supported_hw_count = count;
- mutex_unlock(&opp_table_lock);
- return 0;
+
+ return opp_table;
err:
- _remove_opp_table(opp_table);
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
/**
* dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @dev: Device for which supported-hw has to be put.
+ * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
*
* This is required only for the V2 bindings, and is called for a matching
* dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
* will not be freed.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
-void dev_pm_opp_put_supported_hw(struct device *dev)
+void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
-
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
- /* Check for existing table for 'dev' first */
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to find opp_table: %ld\n",
- PTR_ERR(opp_table));
- goto unlock;
- }
-
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
if (!opp_table->supported_hw) {
- dev_err(dev, "%s: Doesn't have supported hardware list\n",
- __func__);
- goto unlock;
+ pr_err("%s: Doesn't have supported hardware list\n",
+ __func__);
+ return;
}
kfree(opp_table->supported_hw);
opp_table->supported_hw = NULL;
opp_table->supported_hw_count = 0;
- /* Try freeing opp_table if this was the last blocking resource */
- _remove_opp_table(opp_table);
-
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
@@ -1314,26 +1172,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
* specify the extn to be used for certain property names. The properties to
* which the extension will apply are opp-microvolt and opp-microamp. OPP core
* should postfix the property name with -<name> while looking for them.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
-int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
struct opp_table *opp_table;
- int ret = 0;
-
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
+ int ret;
- opp_table = _add_opp_table(dev);
- if (!opp_table) {
- ret = -ENOMEM;
- goto unlock;
- }
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@@ -1352,63 +1199,37 @@ int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
goto err;
}
- mutex_unlock(&opp_table_lock);
- return 0;
+ return opp_table;
err:
- _remove_opp_table(opp_table);
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
/**
* dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @dev: Device for which the prop-name has to be put.
+ * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
*
* This is required only for the V2 bindings, and is called for a matching
* dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
* will not be freed.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
-void dev_pm_opp_put_prop_name(struct device *dev)
+void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
-
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
- /* Check for existing table for 'dev' first */
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to find opp_table: %ld\n",
- PTR_ERR(opp_table));
- goto unlock;
- }
-
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
if (!opp_table->prop_name) {
- dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
- goto unlock;
+ pr_err("%s: Doesn't have a prop-name\n", __func__);
+ return;
}
kfree(opp_table->prop_name);
opp_table->prop_name = NULL;
- /* Try freeing opp_table if this was the last blocking resource */
- _remove_opp_table(opp_table);
-
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
@@ -1455,12 +1276,6 @@ static void _free_set_opp_data(struct opp_table *opp_table)
* well.
*
* This must be called before any OPPs are initialized for the device.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
const char * const names[],
@@ -1470,13 +1285,9 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
struct regulator *reg;
int ret, i;
- mutex_lock(&opp_table_lock);
-
- opp_table = _add_opp_table(dev);
- if (!opp_table) {
- ret = -ENOMEM;
- goto unlock;
- }
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
/* This should be called before OPPs are initialized */
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
@@ -1518,7 +1329,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
if (ret)
goto free_regulators;
- mutex_unlock(&opp_table_lock);
return opp_table;
free_regulators:
@@ -1529,9 +1339,7 @@ free_regulators:
opp_table->regulators = NULL;
opp_table->regulator_count = 0;
err:
- _remove_opp_table(opp_table);
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
return ERR_PTR(ret);
}
@@ -1540,22 +1348,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
/**
* dev_pm_opp_put_regulators() - Releases resources blocked for regulator
* @opp_table: OPP table returned from dev_pm_opp_set_regulators().
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
void dev_pm_opp_put_regulators(struct opp_table *opp_table)
{
int i;
- mutex_lock(&opp_table_lock);
-
if (!opp_table->regulators) {
pr_err("%s: Doesn't have regulators set\n", __func__);
- goto unlock;
+ return;
}
/* Make sure there are no concurrent readers while updating opp_table */
@@ -1570,11 +1370,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
opp_table->regulators = NULL;
opp_table->regulator_count = 0;
- /* Try freeing opp_table if this was the last blocking resource */
- _remove_opp_table(opp_table);
-
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
@@ -1587,29 +1383,19 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
* regulators per device), instead of the generic OPP set rate helper.
*
* This must be called before any OPPs are initialized for the device.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
-int dev_pm_opp_register_set_opp_helper(struct device *dev,
+struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
int (*set_opp)(struct dev_pm_set_opp_data *data))
{
struct opp_table *opp_table;
int ret;
if (!set_opp)
- return -EINVAL;
-
- mutex_lock(&opp_table_lock);
+ return ERR_PTR(-EINVAL);
- opp_table = _add_opp_table(dev);
- if (!opp_table) {
- ret = -ENOMEM;
- goto unlock;
- }
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
/* This should be called before OPPs are initialized */
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
@@ -1625,47 +1411,28 @@ int dev_pm_opp_register_set_opp_helper(struct device *dev,
opp_table->set_opp = set_opp;
- mutex_unlock(&opp_table_lock);
- return 0;
+ return opp_table;
err:
- _remove_opp_table(opp_table);
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
/**
* dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
* set_opp helper
- * @dev: Device for which custom set_opp helper has to be cleared.
+ * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
*
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
+ * Release resources blocked for platform specific set_opp helper.
*/
-void dev_pm_opp_register_put_opp_helper(struct device *dev)
+void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
-
- mutex_lock(&opp_table_lock);
-
- /* Check for existing table for 'dev' first */
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to find opp_table: %ld\n",
- PTR_ERR(opp_table));
- goto unlock;
- }
-
if (!opp_table->set_opp) {
- dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
- __func__);
- goto unlock;
+ pr_err("%s: Doesn't have custom set_opp helper set\n",
+ __func__);
+ return;
}
/* Make sure there are no concurrent readers while updating opp_table */
@@ -1673,11 +1440,7 @@ void dev_pm_opp_register_put_opp_helper(struct device *dev)
opp_table->set_opp = NULL;
- /* Try freeing opp_table if this was the last blocking resource */
- _remove_opp_table(opp_table);
-
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
@@ -1691,12 +1454,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions.
*
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
* Return:
* 0 On success OR
* Duplicate OPPs (both freq and volt are same) and opp->available
@@ -1706,7 +1463,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
*/
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
- return _opp_add_v1(dev, freq, u_volt, true);
+ struct opp_table *opp_table;
+ int ret;
+
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return -ENOMEM;
+
+ ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
+
+ dev_pm_opp_put_opp_table(opp_table);
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
@@ -1716,41 +1483,30 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* @freq: OPP frequency to modify availability
* @availability_req: availability status requested for this opp
*
- * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
- * share a common logic which is isolated here.
+ * Set the availability of an OPP, opp_{enable,disable} share a common logic
+ * which is isolated here.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
* copy operation, returns 0 if no modification was done OR modification was
* successful.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks to
- * keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
static int _opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
struct opp_table *opp_table;
- struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
- /* keep the node allocated */
- new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
- if (!new_opp)
- return -ENOMEM;
-
- mutex_lock(&opp_table_lock);
-
/* Find the opp_table */
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
r = PTR_ERR(opp_table);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
- goto unlock;
+ return r;
}
+ mutex_lock(&opp_table->lock);
+
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
if (tmp_opp->rate == freq) {
@@ -1758,6 +1514,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
break;
}
}
+
if (IS_ERR(opp)) {
r = PTR_ERR(opp);
goto unlock;
@@ -1766,29 +1523,20 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
/* Is update really needed? */
if (opp->available == availability_req)
goto unlock;
- /* copy the old data over */
- *new_opp = *opp;
- /* plug in new node */
- new_opp->available = availability_req;
-
- list_replace_rcu(&opp->node, &new_opp->node);
- mutex_unlock(&opp_table_lock);
- call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ opp->available = availability_req;
/* Notify the change of the OPP availability */
if (availability_req)
- srcu_notifier_call_chain(&opp_table->srcu_head,
- OPP_EVENT_ENABLE, new_opp);
+ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
+ opp);
else
- srcu_notifier_call_chain(&opp_table->srcu_head,
- OPP_EVENT_DISABLE, new_opp);
-
- return 0;
+ blocking_notifier_call_chain(&opp_table->head,
+ OPP_EVENT_DISABLE, opp);
unlock:
- mutex_unlock(&opp_table_lock);
- kfree(new_opp);
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
return r;
}
@@ -1801,12 +1549,6 @@ unlock:
* corresponding error value. It is meant to be used for users an OPP available
* after being temporarily made unavailable with dev_pm_opp_disable.
*
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU and mutex locks to keep the
- * integrity of the internal data structures. Callers should ensure that
- * this function is *NOT* called under RCU protection or in contexts where
- * mutex locking or synchronize_rcu() blocking calls cannot be used.
- *
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
* copy operation, returns 0 if no modification was done OR modification was
* successful.
@@ -1827,12 +1569,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* control by users to make this OPP not available until the circumstances are
* right to make it available again (with a call to dev_pm_opp_enable).
*
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU and mutex locks to keep the
- * integrity of the internal data structures. Callers should ensure that
- * this function is *NOT* called under RCU protection or in contexts where
- * mutex locking or synchronize_rcu() blocking calls cannot be used.
- *
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
* copy operation, returns 0 if no modification was done OR modification was
* successful.
@@ -1844,41 +1580,78 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq)
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
/**
- * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
- * @dev: device pointer used to lookup OPP table.
+ * dev_pm_opp_register_notifier() - Register OPP notifier for the device
+ * @dev: Device for which notifier needs to be registered
+ * @nb: Notifier block to be registered
*
- * Return: pointer to notifier head if found, otherwise -ENODEV or
- * -EINVAL based on type of error casted as pointer. value must be checked
- * with IS_ERR to determine valid pointer or error result.
+ * Return: 0 on success or a negative error value.
+ */
+int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
+{
+ struct opp_table *opp_table;
+ int ret;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
+
+ ret = blocking_notifier_chain_register(&opp_table->head, nb);
+
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ret;
+}
+EXPORT_SYMBOL(dev_pm_opp_register_notifier);
+
+/**
+ * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
+ * @dev: Device for which notifier needs to be unregistered
+ * @nb: Notifier block to be unregistered
*
- * Locking: This function must be called under rcu_read_lock(). opp_table is a
- * RCU protected pointer. The reason for the same is that the opp pointer which
- * is returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
+ * Return: 0 on success or a negative error value.
*/
-struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
+int dev_pm_opp_unregister_notifier(struct device *dev,
+ struct notifier_block *nb)
{
- struct opp_table *opp_table = _find_opp_table(dev);
+ struct opp_table *opp_table;
+ int ret;
+ opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
- return ERR_CAST(opp_table); /* matching type */
+ return PTR_ERR(opp_table);
+
+ ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
- return &opp_table->srcu_head;
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
+EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
/*
* Free OPPs either created using static entries present in DT or even the
* dynamically added entries based on remove_all param.
*/
-void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
+void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
+ bool remove_all)
{
- struct opp_table *opp_table;
struct dev_pm_opp *opp, *tmp;
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
+ /* Find if opp_table manages a single device */
+ if (list_is_singular(&opp_table->dev_list)) {
+ /* Free static OPPs */
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+ if (remove_all || !opp->dynamic)
+ dev_pm_opp_put(opp);
+ }
+ } else {
+ _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
+ }
+}
+
+void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
+{
+ struct opp_table *opp_table;
/* Check for existing table for 'dev' */
opp_table = _find_opp_table(dev);
@@ -1890,22 +1663,12 @@ void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
IS_ERR_OR_NULL(dev) ?
"Invalid device" : dev_name(dev),
error);
- goto unlock;
+ return;
}
- /* Find if opp_table manages a single device */
- if (list_is_singular(&opp_table->dev_list)) {
- /* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (remove_all || !opp->dynamic)
- _opp_remove(opp_table, opp, true);
- }
- } else {
- _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
- }
+ _dev_pm_opp_remove_table(opp_table, dev, remove_all);
-unlock:
- mutex_unlock(&opp_table_lock);
+ dev_pm_opp_put_opp_table(opp_table);
}
/**
@@ -1914,15 +1677,9 @@ unlock:
*
* Free both OPPs created using static entries present in DT and the
* dynamically added entries.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
void dev_pm_opp_remove_table(struct device *dev)
{
- _dev_pm_opp_remove_table(dev, true);
+ _dev_pm_opp_find_and_remove_table(dev, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 8c3434bdb26d..2d87bc1adf38 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -42,11 +42,6 @@
*
* WARNING: It is important for the callers to ensure refreshing their copy of
* the table if any of the mentioned functions have been invoked in the interim.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Since we just use the regular accessor functions to access the internal data
- * structures, we use RCU read lock inside this function. As a result, users of
- * this function DONOT need to use explicit locks for invoking.
*/
int dev_pm_opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
@@ -56,19 +51,13 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
int i, max_opps, ret = 0;
unsigned long rate;
- rcu_read_lock();
-
max_opps = dev_pm_opp_get_opp_count(dev);
- if (max_opps <= 0) {
- ret = max_opps ? max_opps : -ENODATA;
- goto out;
- }
+ if (max_opps <= 0)
+ return max_opps ? max_opps : -ENODATA;
freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
- if (!freq_table) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!freq_table)
+ return -ENOMEM;
for (i = 0, rate = 0; i < max_opps; i++, rate++) {
/* find next rate */
@@ -83,6 +72,8 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
/* Is Boost/turbo opp ? */
if (dev_pm_opp_is_turbo(opp))
freq_table[i].flags = CPUFREQ_BOOST_FREQ;
+
+ dev_pm_opp_put(opp);
}
freq_table[i].driver_data = i;
@@ -91,7 +82,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
*table = &freq_table[0];
out:
- rcu_read_unlock();
if (ret)
kfree(freq_table);
@@ -147,12 +137,6 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
* This removes the OPP tables for CPUs present in the @cpumask.
* This should be used to remove all the OPPs entries associated with
* the cpus in @cpumask.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
{
@@ -169,12 +153,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
* @cpumask.
*
* Returns -ENODEV if OPP table isn't already present.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
const struct cpumask *cpumask)
@@ -184,13 +162,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
struct device *dev;
int cpu, ret = 0;
- mutex_lock(&opp_table_lock);
-
opp_table = _find_opp_table(cpu_dev);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
- goto unlock;
- }
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
for_each_cpu(cpu, cpumask) {
if (cpu == cpu_dev->id)
@@ -213,8 +187,8 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
/* Mark opp-table as multiple CPUs are sharing it now */
opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
}
-unlock:
- mutex_unlock(&opp_table_lock);
+
+ dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -229,12 +203,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
*
* Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
* table's status is access-unknown.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
@@ -242,17 +210,13 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
struct opp_table *opp_table;
int ret = 0;
- mutex_lock(&opp_table_lock);
-
opp_table = _find_opp_table(cpu_dev);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
- goto unlock;
- }
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
ret = -EINVAL;
- goto unlock;
+ goto put_opp_table;
}
cpumask_clear(cpumask);
@@ -264,8 +228,8 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
cpumask_set_cpu(cpu_dev->id, cpumask);
}
-unlock:
- mutex_unlock(&opp_table_lock);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
return ret;
}
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 3f7d2591b173..779428676f63 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -24,9 +24,11 @@
static struct opp_table *_managed_opp(const struct device_node *np)
{
- struct opp_table *opp_table;
+ struct opp_table *opp_table, *managed_table = NULL;
+
+ mutex_lock(&opp_table_lock);
- list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+ list_for_each_entry(opp_table, &opp_tables, node) {
if (opp_table->np == np) {
/*
* Multiple devices can point to the same OPP table and
@@ -35,14 +37,18 @@ static struct opp_table *_managed_opp(const struct device_node *np)
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
- return opp_table;
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
+ _get_opp_table_kref(opp_table);
+ managed_table = opp_table;
+ }
- return NULL;
+ break;
}
}
- return NULL;
+ mutex_unlock(&opp_table_lock);
+
+ return managed_table;
}
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
@@ -229,34 +235,28 @@ free_microvolt:
* @dev: device pointer used to lookup OPP table.
*
* Free OPPs created using static entries present in DT.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- _dev_pm_opp_remove_table(dev, false);
+ _dev_pm_opp_find_and_remove_table(dev, false);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
/* Returns opp descriptor node for a device, caller must do of_node_put() */
-static struct device_node *_of_get_opp_desc_node(struct device *dev)
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
{
/*
- * TODO: Support for multiple OPP tables.
- *
* There should be only ONE phandle present in "operating-points-v2"
* property.
*/
return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
+ * @opp_table: OPP table
* @dev: device for which we do this operation
* @np: device node
*
@@ -264,12 +264,6 @@ static struct device_node *_of_get_opp_desc_node(struct device *dev)
* opp can be controlled using dev_pm_opp_enable/disable functions and may be
* removed by dev_pm_opp_remove.
*
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
* Return:
* 0 On success OR
* Duplicate OPPs (both freq and volt are same) and opp->available
@@ -278,22 +272,17 @@ static struct device_node *_of_get_opp_desc_node(struct device *dev)
* -ENOMEM Memory allocation failure
* -EINVAL Failed parsing the OPP node
*/
-static int _opp_add_static_v2(struct device *dev, struct device_node *np)
+static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
+ struct device_node *np)
{
- struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
u64 rate;
u32 val;
int ret;
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
- new_opp = _allocate_opp(dev, &opp_table);
- if (!new_opp) {
- ret = -ENOMEM;
- goto unlock;
- }
+ new_opp = _opp_allocate(opp_table);
+ if (!new_opp)
+ return -ENOMEM;
ret = of_property_read_u64(np, "opp-hz", &rate);
if (ret < 0) {
@@ -327,8 +316,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
goto free_opp;
ret = _opp_add(dev, new_opp, opp_table);
- if (ret)
+ if (ret) {
+ /* Don't return error for duplicate OPPs */
+ if (ret == -EBUSY)
+ ret = 0;
goto free_opp;
+ }
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
@@ -345,8 +338,6 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
- mutex_unlock(&opp_table_lock);
-
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
__func__, new_opp->turbo, new_opp->rate,
new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
@@ -356,13 +347,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
+ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(opp_table, new_opp, false);
-unlock:
- mutex_unlock(&opp_table_lock);
+ _opp_free(new_opp);
+
return ret;
}
@@ -373,41 +363,35 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
struct opp_table *opp_table;
int ret = 0, count = 0;
- mutex_lock(&opp_table_lock);
-
opp_table = _managed_opp(opp_np);
if (opp_table) {
/* OPPs are already managed */
if (!_add_opp_dev(dev, opp_table))
ret = -ENOMEM;
- mutex_unlock(&opp_table_lock);
- return ret;
+ goto put_opp_table;
}
- mutex_unlock(&opp_table_lock);
+
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return -ENOMEM;
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_np, np) {
count++;
- ret = _opp_add_static_v2(dev, np);
+ ret = _opp_add_static_v2(opp_table, dev, np);
if (ret) {
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
- goto free_table;
+ _dev_pm_opp_remove_table(opp_table, dev, false);
+ goto put_opp_table;
}
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count))
- return -ENOENT;
-
- mutex_lock(&opp_table_lock);
-
- opp_table = _find_opp_table(dev);
- if (WARN_ON(IS_ERR(opp_table))) {
- ret = PTR_ERR(opp_table);
- mutex_unlock(&opp_table_lock);
- goto free_table;
+ if (WARN_ON(!count)) {
+ ret = -ENOENT;
+ goto put_opp_table;
}
opp_table->np = opp_np;
@@ -416,12 +400,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
else
opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
- mutex_unlock(&opp_table_lock);
-
- return 0;
-
-free_table:
- dev_pm_opp_of_remove_table(dev);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -429,9 +409,10 @@ free_table:
/* Initializes OPP tables based on old-deprecated bindings */
static int _of_add_opp_table_v1(struct device *dev)
{
+ struct opp_table *opp_table;
const struct property *prop;
const __be32 *val;
- int nr;
+ int nr, ret = 0;
prop = of_find_property(dev->of_node, "operating-points", NULL);
if (!prop)
@@ -449,18 +430,27 @@ static int _of_add_opp_table_v1(struct device *dev)
return -EINVAL;
}
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return -ENOMEM;
+
val = prop->value;
while (nr) {
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (_opp_add_v1(dev, freq, volt, false))
- dev_warn(dev, "%s: Failed to add OPP %ld\n",
- __func__, freq);
+ ret = _opp_add_v1(opp_table, dev, freq, volt, false);
+ if (ret) {
+ dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
+ __func__, freq, ret);
+ _dev_pm_opp_remove_table(opp_table, dev, false);
+ break;
+ }
nr -= 2;
}
- return 0;
+ dev_pm_opp_put_opp_table(opp_table);
+ return ret;
}
/**
@@ -469,12 +459,6 @@ static int _of_add_opp_table_v1(struct device *dev)
*
* Register the initial OPP table with the OPP library for given device.
*
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
* Return:
* 0 On success OR
* Duplicate OPPs (both freq and volt are same) and opp->available
@@ -495,7 +479,7 @@ int dev_pm_opp_of_add_table(struct device *dev)
* OPPs have two version of bindings now. The older one is deprecated,
* try for the new binding first.
*/
- opp_np = _of_get_opp_desc_node(dev);
+ opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
if (!opp_np) {
/*
* Try old-deprecated bindings for backward compatibility with
@@ -519,12 +503,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
*
* This removes the OPP tables for CPUs present in the @cpumask.
* This should be used only to remove static entries created from DT.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
{
@@ -537,12 +515,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
* @cpumask: cpumask for which OPP table needs to be added.
*
* This adds the OPP tables for CPUs present in the @cpumask.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
@@ -590,12 +562,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
* This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
*
* Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
*/
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
struct cpumask *cpumask)
@@ -605,7 +571,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
int cpu, ret = 0;
/* Get OPP descriptor node */
- np = _of_get_opp_desc_node(cpu_dev);
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np) {
dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
return -ENOENT;
@@ -630,7 +596,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
}
/* Get OPP descriptor node */
- tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev);
if (!tmp_np) {
dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
__func__);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index af9f2b849a66..166eef990599 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -16,11 +16,11 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/limits.h>
#include <linux/pm_opp.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
+#include <linux/notifier.h>
struct clk;
struct regulator;
@@ -51,11 +51,9 @@ extern struct list_head opp_tables;
* @node: opp table node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
- * RCU usage: opp table is traversed with RCU locks. node
- * modification is possible realtime, hence the modifications
- * are protected by the opp_table_lock for integrity.
* IMPORTANT: the opp nodes should be maintained in increasing
* order.
+ * @kref: for reference count of the OPP.
* @available: true/false - marks if this OPP as available or not
* @dynamic: not-created from static DT entries.
* @turbo: true if turbo (boost) OPP
@@ -65,7 +63,6 @@ extern struct list_head opp_tables;
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
* @opp_table: points back to the opp_table struct this opp belongs to
- * @rcu_head: RCU callback head used for deferred freeing
* @np: OPP's device node.
* @dentry: debugfs dentry pointer (per opp)
*
@@ -73,6 +70,7 @@ extern struct list_head opp_tables;
*/
struct dev_pm_opp {
struct list_head node;
+ struct kref kref;
bool available;
bool dynamic;
@@ -85,7 +83,6 @@ struct dev_pm_opp {
unsigned long clock_latency_ns;
struct opp_table *opp_table;
- struct rcu_head rcu_head;
struct device_node *np;
@@ -98,7 +95,6 @@ struct dev_pm_opp {
* struct opp_device - devices managed by 'struct opp_table'
* @node: list node
* @dev: device to which the struct object belongs
- * @rcu_head: RCU callback head used for deferred freeing
* @dentry: debugfs dentry pointer (per device)
*
* This is an internal data structure maintaining the devices that are managed
@@ -107,7 +103,6 @@ struct dev_pm_opp {
struct opp_device {
struct list_head node;
const struct device *dev;
- struct rcu_head rcu_head;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
@@ -125,12 +120,11 @@ enum opp_table_access {
* @node: table node - contains the devices with OPPs that
* have been registered. Nodes once added are not modified in this
* table.
- * RCU usage: nodes are not modified in the table of opp_table,
- * however addition is possible and is secured by opp_table_lock
- * @srcu_head: notifier head to notify the OPP availability changes.
- * @rcu_head: RCU callback head used for deferred freeing
+ * @head: notifier head to notify the OPP availability changes.
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
+ * @kref: for reference count of the table.
+ * @lock: mutex protecting the opp_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @shared_opp: OPP is shared between multiple devices.
@@ -151,18 +145,15 @@ enum opp_table_access {
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
* meant for book keeping and private to OPP library.
- *
- * Because the opp structures can be used from both rcu and srcu readers, we
- * need to wait for the grace period of both of them before freeing any
- * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
*/
struct opp_table {
struct list_head node;
- struct srcu_notifier_head srcu_head;
- struct rcu_head rcu_head;
+ struct blocking_notifier_head head;
struct list_head dev_list;
struct list_head opp_list;
+ struct kref kref;
+ struct mutex lock;
struct device_node *np;
unsigned long clock_latency_ns_max;
@@ -190,14 +181,17 @@ struct opp_table {
};
/* Routines internal to opp core */
+void _get_opp_table_kref(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
-struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
+void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all);
+void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all);
+struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
+void _opp_free(struct dev_pm_opp *opp);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
-void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify);
-int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic);
+int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
+struct opp_table *_add_opp_table(struct device *dev);
#ifdef CONFIG_OF
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 58fcc758334e..d888d9869b6a 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -281,7 +281,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
- kfree(c->notifiers);
+ kfree(qos->resume_latency.notifiers);
kfree(qos);
out:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 872eac4cb1df..a14fac6a01d3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+ dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 404d94c6c8bc..ae0429827f31 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
struct wake_irq *wirq = _wirq;
int res;
+ /* Maybe abort suspend? */
+ if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+ pm_wakeup_event(wirq->dev, 0);
+
+ return IRQ_HANDLED;
+ }
+
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
res = pm_runtime_resume(wirq->dev);
if (res < 0)
@@ -183,6 +190,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->irq = irq;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
/*
* Consumer device may need to power up and restore state
* so we use a threaded irq.
@@ -312,8 +322,12 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
if (!wirq)
return;
- if (device_may_wakeup(wirq->dev))
+ if (device_may_wakeup(wirq->dev)) {
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+ enable_irq(wirq->irq);
+
enable_irq_wake(wirq->irq);
+ }
}
/**
@@ -328,6 +342,10 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
if (!wirq)
return;
- if (device_may_wakeup(wirq->dev))
+ if (device_may_wakeup(wirq->dev)) {
disable_irq_wake(wirq->irq);
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+ disable_irq_nosync(wirq->irq);
+ }
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 43a36d68c3fd..c458c63e353f 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,7 +21,7 @@
struct property_set {
struct fwnode_handle fwnode;
- struct property_entry *properties;
+ const struct property_entry *properties;
};
static inline bool is_pset_node(struct fwnode_handle *fwnode)
@@ -35,10 +35,10 @@ static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
container_of(fwnode, struct property_set, fwnode) : NULL;
}
-static struct property_entry *pset_prop_get(struct property_set *pset,
- const char *name)
+static const struct property_entry *pset_prop_get(struct property_set *pset,
+ const char *name)
{
- struct property_entry *prop;
+ const struct property_entry *prop;
if (!pset || !pset->properties)
return NULL;
@@ -50,11 +50,11 @@ static struct property_entry *pset_prop_get(struct property_set *pset,
return NULL;
}
-static void *pset_prop_find(struct property_set *pset, const char *propname,
- size_t length)
+static const void *pset_prop_find(struct property_set *pset,
+ const char *propname, size_t length)
{
- struct property_entry *prop;
- void *pointer;
+ const struct property_entry *prop;
+ const void *pointer;
prop = pset_prop_get(pset, propname);
if (!prop)
@@ -74,7 +74,7 @@ static int pset_prop_read_u8_array(struct property_set *pset,
const char *propname,
u8 *values, size_t nval)
{
- void *pointer;
+ const void *pointer;
size_t length = nval * sizeof(*values);
pointer = pset_prop_find(pset, propname, length);
@@ -89,7 +89,7 @@ static int pset_prop_read_u16_array(struct property_set *pset,
const char *propname,
u16 *values, size_t nval)
{
- void *pointer;
+ const void *pointer;
size_t length = nval * sizeof(*values);
pointer = pset_prop_find(pset, propname, length);
@@ -104,7 +104,7 @@ static int pset_prop_read_u32_array(struct property_set *pset,
const char *propname,
u32 *values, size_t nval)
{
- void *pointer;
+ const void *pointer;
size_t length = nval * sizeof(*values);
pointer = pset_prop_find(pset, propname, length);
@@ -119,7 +119,7 @@ static int pset_prop_read_u64_array(struct property_set *pset,
const char *propname,
u64 *values, size_t nval)
{
- void *pointer;
+ const void *pointer;
size_t length = nval * sizeof(*values);
pointer = pset_prop_find(pset, propname, length);
@@ -133,7 +133,7 @@ static int pset_prop_read_u64_array(struct property_set *pset,
static int pset_prop_count_elems_of_size(struct property_set *pset,
const char *propname, size_t length)
{
- struct property_entry *prop;
+ const struct property_entry *prop;
prop = pset_prop_get(pset, propname);
if (!prop)
@@ -146,7 +146,7 @@ static int pset_prop_read_string_array(struct property_set *pset,
const char *propname,
const char **strings, size_t nval)
{
- void *pointer;
+ const void *pointer;
size_t length = nval * sizeof(*strings);
pointer = pset_prop_find(pset, propname, length);
@@ -160,8 +160,8 @@ static int pset_prop_read_string_array(struct property_set *pset,
static int pset_prop_read_string(struct property_set *pset,
const char *propname, const char **strings)
{
- struct property_entry *prop;
- const char **pointer;
+ const struct property_entry *prop;
+ const char * const *pointer;
prop = pset_prop_get(pset, propname);
if (!prop)
@@ -682,77 +682,64 @@ out:
}
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
-/**
- * pset_free_set - releases memory allocated for copied property set
- * @pset: Property set to release
- *
- * Function takes previously copied property set and releases all the
- * memory allocated to it.
- */
-static void pset_free_set(struct property_set *pset)
+static int property_copy_string_array(struct property_entry *dst,
+ const struct property_entry *src)
{
- const struct property_entry *prop;
- size_t i, nval;
+ char **d;
+ size_t nval = src->length / sizeof(*d);
+ int i;
- if (!pset)
- return;
+ d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
- for (prop = pset->properties; prop->name; prop++) {
- if (prop->is_array) {
- if (prop->is_string && prop->pointer.str) {
- nval = prop->length / sizeof(const char *);
- for (i = 0; i < nval; i++)
- kfree(prop->pointer.str[i]);
- }
- kfree(prop->pointer.raw_data);
- } else if (prop->is_string) {
- kfree(prop->value.str);
+ for (i = 0; i < nval; i++) {
+ d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
+ if (!d[i] && src->pointer.str[i]) {
+ while (--i >= 0)
+ kfree(d[i]);
+ kfree(d);
+ return -ENOMEM;
}
- kfree(prop->name);
}
- kfree(pset->properties);
- kfree(pset);
+ dst->pointer.raw_data = d;
+ return 0;
}
-static int pset_copy_entry(struct property_entry *dst,
- const struct property_entry *src)
+static int property_entry_copy_data(struct property_entry *dst,
+ const struct property_entry *src)
{
- const char **d, **s;
- size_t i, nval;
+ int error;
dst->name = kstrdup(src->name, GFP_KERNEL);
if (!dst->name)
return -ENOMEM;
if (src->is_array) {
- if (!src->length)
- return -ENODATA;
+ if (!src->length) {
+ error = -ENODATA;
+ goto out_free_name;
+ }
if (src->is_string) {
- nval = src->length / sizeof(const char *);
- dst->pointer.str = kcalloc(nval, sizeof(const char *),
- GFP_KERNEL);
- if (!dst->pointer.str)
- return -ENOMEM;
-
- d = dst->pointer.str;
- s = src->pointer.str;
- for (i = 0; i < nval; i++) {
- d[i] = kstrdup(s[i], GFP_KERNEL);
- if (!d[i] && s[i])
- return -ENOMEM;
- }
+ error = property_copy_string_array(dst, src);
+ if (error)
+ goto out_free_name;
} else {
dst->pointer.raw_data = kmemdup(src->pointer.raw_data,
src->length, GFP_KERNEL);
- if (!dst->pointer.raw_data)
- return -ENOMEM;
+ if (!dst->pointer.raw_data) {
+ error = -ENOMEM;
+ goto out_free_name;
+ }
}
} else if (src->is_string) {
dst->value.str = kstrdup(src->value.str, GFP_KERNEL);
- if (!dst->value.str && src->value.str)
- return -ENOMEM;
+ if (!dst->value.str && src->value.str) {
+ error = -ENOMEM;
+ goto out_free_name;
+ }
} else {
dst->value.raw_data = src->value.raw_data;
}
@@ -762,6 +749,95 @@ static int pset_copy_entry(struct property_entry *dst,
dst->is_string = src->is_string;
return 0;
+
+out_free_name:
+ kfree(dst->name);
+ return error;
+}
+
+static void property_entry_free_data(const struct property_entry *p)
+{
+ size_t i, nval;
+
+ if (p->is_array) {
+ if (p->is_string && p->pointer.str) {
+ nval = p->length / sizeof(const char *);
+ for (i = 0; i < nval; i++)
+ kfree(p->pointer.str[i]);
+ }
+ kfree(p->pointer.raw_data);
+ } else if (p->is_string) {
+ kfree(p->value.str);
+ }
+ kfree(p->name);
+}
+
+/**
+ * property_entries_dup - duplicate array of properties
+ * @properties: array of properties to copy
+ *
+ * This function creates a deep copy of the given NULL-terminated array
+ * of property entries.
+ */
+struct property_entry *
+property_entries_dup(const struct property_entry *properties)
+{
+ struct property_entry *p;
+ int i, n = 0;
+
+ while (properties[n].name)
+ n++;
+
+ p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < n; i++) {
+ int ret = property_entry_copy_data(&p[i], &properties[i]);
+ if (ret) {
+ while (--i >= 0)
+ property_entry_free_data(&p[i]);
+ kfree(p);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(property_entries_dup);
+
+/**
+ * property_entries_free - free previously allocated array of properties
+ * @properties: array of properties to destroy
+ *
+ * This function frees given NULL-terminated array of property entries,
+ * along with their data.
+ */
+void property_entries_free(const struct property_entry *properties)
+{
+ const struct property_entry *p;
+
+ for (p = properties; p->name; p++)
+ property_entry_free_data(p);
+
+ kfree(properties);
+}
+EXPORT_SYMBOL_GPL(property_entries_free);
+
+/**
+ * pset_free_set - releases memory allocated for copied property set
+ * @pset: Property set to release
+ *
+ * Function takes previously copied property set and releases all the
+ * memory allocated to it.
+ */
+static void pset_free_set(struct property_set *pset)
+{
+ if (!pset)
+ return;
+
+ property_entries_free(pset->properties);
+ kfree(pset);
}
/**
@@ -776,32 +852,20 @@ static int pset_copy_entry(struct property_entry *dst,
*/
static struct property_set *pset_copy_set(const struct property_set *pset)
{
- const struct property_entry *entry;
+ struct property_entry *properties;
struct property_set *p;
- size_t i, n = 0;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
- while (pset->properties[n].name)
- n++;
-
- p->properties = kcalloc(n + 1, sizeof(*entry), GFP_KERNEL);
- if (!p->properties) {
+ properties = property_entries_dup(pset->properties);
+ if (IS_ERR(properties)) {
kfree(p);
- return ERR_PTR(-ENOMEM);
- }
-
- for (i = 0; i < n; i++) {
- int ret = pset_copy_entry(&p->properties[i],
- &pset->properties[i]);
- if (ret) {
- pset_free_set(p);
- return ERR_PTR(ret);
- }
+ return ERR_CAST(properties);
}
+ p->properties = properties;
return p;
}
@@ -847,7 +911,8 @@ EXPORT_SYMBOL_GPL(device_remove_properties);
* @dev as its secondary firmware node. The function takes a copy of
* @properties.
*/
-int device_add_properties(struct device *dev, struct property_entry *properties)
+int device_add_properties(struct device *dev,
+ const struct property_entry *properties)
{
struct property_set *p, pset;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index b11af3f2c1db..b1e9aae9a5d0 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -81,7 +81,7 @@ static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
node = rbtree_ctx->root.rb_node;
while (node) {
- rbnode = container_of(node, struct regcache_rbtree_node, node);
+ rbnode = rb_entry(node, struct regcache_rbtree_node, node);
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
&top_reg);
if (reg >= base_reg && reg <= top_reg) {
@@ -108,8 +108,7 @@ static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
parent = NULL;
new = &root->rb_node;
while (*new) {
- rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
- node);
+ rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
/* base and top registers of the current rbnode */
regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
&top_reg_tmp);
@@ -152,7 +151,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
- n = container_of(node, struct regcache_rbtree_node, node);
+ n = rb_entry(node, struct regcache_rbtree_node, node);
mem_size += sizeof(*n);
mem_size += (n->blklen * map->cache_word_size);
mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4e582561e1e7..b0a0dcf32fb7 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -224,7 +224,7 @@ void regcache_exit(struct regmap *map)
}
/**
- * regcache_read: Fetch the value of a given register from the cache.
+ * regcache_read - Fetch the value of a given register from the cache.
*
* @map: map to configure.
* @reg: The register index.
@@ -255,7 +255,7 @@ int regcache_read(struct regmap *map,
}
/**
- * regcache_write: Set the value of a given register in the cache.
+ * regcache_write - Set the value of a given register in the cache.
*
* @map: map to configure.
* @reg: The register index.
@@ -328,7 +328,7 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
}
/**
- * regcache_sync: Sync the register cache with the hardware.
+ * regcache_sync - Sync the register cache with the hardware.
*
* @map: map to configure.
*
@@ -396,7 +396,7 @@ out:
EXPORT_SYMBOL_GPL(regcache_sync);
/**
- * regcache_sync_region: Sync part of the register cache with the hardware.
+ * regcache_sync_region - Sync part of the register cache with the hardware.
*
* @map: map to sync.
* @min: first register to sync
@@ -452,7 +452,7 @@ out:
EXPORT_SYMBOL_GPL(regcache_sync_region);
/**
- * regcache_drop_region: Discard part of the register cache
+ * regcache_drop_region - Discard part of the register cache
*
* @map: map to operate on
* @min: first register to discard
@@ -483,10 +483,10 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
EXPORT_SYMBOL_GPL(regcache_drop_region);
/**
- * regcache_cache_only: Put a register map into cache only mode
+ * regcache_cache_only - Put a register map into cache only mode
*
* @map: map to configure
- * @cache_only: flag if changes should be written to the hardware
+ * @enable: flag if changes should be written to the hardware
*
* When a register map is marked as cache only writes to the register
* map API will only update the register cache, they will not cause
@@ -505,7 +505,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
EXPORT_SYMBOL_GPL(regcache_cache_only);
/**
- * regcache_mark_dirty: Indicate that HW registers were reset to default values
+ * regcache_mark_dirty - Indicate that HW registers were reset to default values
*
* @map: map to mark
*
@@ -527,10 +527,10 @@ void regcache_mark_dirty(struct regmap *map)
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
/**
- * regcache_cache_bypass: Put a register map into cache bypass mode
+ * regcache_cache_bypass - Put a register map into cache bypass mode
*
* @map: map to configure
- * @cache_bypass: flag if changes should not be written to the cache
+ * @enable: flag if changes should not be written to the cache
*
* When a register map is marked with the cache bypass option, writes
* to the register map API will only update the hardware and not the
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index ec262476d043..cd54189f2b1d 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -398,13 +398,14 @@ static const struct irq_domain_ops regmap_domain_ops = {
};
/**
- * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
+ * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
*
- * map: The regmap for the device.
- * irq: The IRQ the device uses to signal interrupts
- * irq_flags: The IRQF_ flags to use for the primary interrupt.
- * chip: Configuration for the interrupt controller.
- * data: Runtime data structure for the controller, allocated on success
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts.
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success.
*
* Returns 0 on success or an errno on failure.
*
@@ -659,12 +660,12 @@ err_alloc:
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
/**
- * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
+ * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
*
* @irq: Primary IRQ for the device
- * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
*
- * This function also dispose all mapped irq on chip.
+ * This function also disposes of all mapped IRQs on the chip.
*/
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
@@ -723,18 +724,19 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
*
- * @dev: The device pointer on which irq_chip belongs to.
- * @map: The regmap for the device.
- * @irq: The IRQ the device uses to signal interrupts
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
- * @chip: Configuration for the interrupt controller.
- * @data: Runtime data structure for the controller, allocated on success
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
*
* Returns 0 on success or an errno on failure.
*
- * The regmap_irq_chip data automatically be released when the device is
+ * The &regmap_irq_chip_data will be automatically released when the device is
* unbound.
*/
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
@@ -765,11 +767,13 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
- * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
+ * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
*
* @dev: Device for which which resource was allocated.
- * @irq: Primary IRQ for the device
- * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ * @irq: Primary IRQ for the device.
+ * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
+ *
+ * A resource managed version of regmap_del_irq_chip().
*/
void devm_regmap_del_irq_chip(struct device *dev, int irq,
struct regmap_irq_chip_data *data)
@@ -786,11 +790,11 @@ void devm_regmap_del_irq_chip(struct device *dev, int irq,
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
/**
- * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
+ * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
*
- * Useful for drivers to request their own IRQs.
+ * @data: regmap irq controller to operate on.
*
- * @data: regmap_irq controller to operate on.
+ * Useful for drivers to request their own IRQs.
*/
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
@@ -800,12 +804,12 @@ int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
/**
- * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
+ * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
*
- * Useful for drivers to request their own IRQs.
+ * @data: regmap irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs.
*
- * @data: regmap_irq controller to operate on.
- * @irq: index of the interrupt requested in the chip IRQs
+ * Useful for drivers to request their own IRQs.
*/
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
@@ -818,14 +822,14 @@ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
/**
- * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
+ * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
+ *
+ * @data: regmap_irq controller to operate on.
*
* Useful for drivers to request their own IRQs and for integration
* with subsystems. For ease of integration NULL is accepted as a
* domain, allowing devices to just call this even if no domain is
* allocated.
- *
- * @data: regmap_irq controller to operate on.
*/
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ae63bb0875ea..b9a779a4a739 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -459,7 +459,7 @@ static bool _regmap_range_add(struct regmap *map,
while (*new) {
struct regmap_range_node *this =
- container_of(*new, struct regmap_range_node, node);
+ rb_entry(*new, struct regmap_range_node, node);
parent = *new;
if (data->range_max < this->range_min)
@@ -483,7 +483,7 @@ static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
while (node) {
struct regmap_range_node *this =
- container_of(node, struct regmap_range_node, node);
+ rb_entry(node, struct regmap_range_node, node);
if (reg < this->range_min)
node = node->rb_left;
@@ -1091,8 +1091,7 @@ static void regmap_field_init(struct regmap_field *rm_field,
}
/**
- * devm_regmap_field_alloc(): Allocate and initialise a register field
- * in a register map.
+ * devm_regmap_field_alloc() - Allocate and initialise a register field.
*
* @dev: Device that will be interacted with
* @regmap: regmap bank in which this register field is located.
@@ -1118,13 +1117,15 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
/**
- * devm_regmap_field_free(): Free register field allocated using
- * devm_regmap_field_alloc. Usally drivers need not call this function,
- * as the memory allocated via devm will be freed as per device-driver
- * life-cyle.
+ * devm_regmap_field_free() - Free a register field allocated using
+ * devm_regmap_field_alloc.
*
* @dev: Device that will be interacted with
* @field: regmap field which should be freed.
+ *
+ * Free register field allocated using devm_regmap_field_alloc(). Usually
+ * drivers need not call this function, as the memory allocated via devm
+ * will be freed as per device-driver life-cyle.
*/
void devm_regmap_field_free(struct device *dev,
struct regmap_field *field)
@@ -1134,8 +1135,7 @@ void devm_regmap_field_free(struct device *dev,
EXPORT_SYMBOL_GPL(devm_regmap_field_free);
/**
- * regmap_field_alloc(): Allocate and initialise a register field
- * in a register map.
+ * regmap_field_alloc() - Allocate and initialise a register field.
*
* @regmap: regmap bank in which this register field is located.
* @reg_field: Register field with in the bank.
@@ -1159,7 +1159,8 @@ struct regmap_field *regmap_field_alloc(struct regmap *regmap,
EXPORT_SYMBOL_GPL(regmap_field_alloc);
/**
- * regmap_field_free(): Free register field allocated using regmap_field_alloc
+ * regmap_field_free() - Free register field allocated using
+ * regmap_field_alloc.
*
* @field: regmap field which should be freed.
*/
@@ -1170,7 +1171,7 @@ void regmap_field_free(struct regmap_field *field)
EXPORT_SYMBOL_GPL(regmap_field_free);
/**
- * regmap_reinit_cache(): Reinitialise the current register cache
+ * regmap_reinit_cache() - Reinitialise the current register cache
*
* @map: Register map to operate on.
* @config: New configuration. Only the cache data will be used.
@@ -1205,7 +1206,9 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
EXPORT_SYMBOL_GPL(regmap_reinit_cache);
/**
- * regmap_exit(): Free a previously allocated register map
+ * regmap_exit() - Free a previously allocated register map
+ *
+ * @map: Register map to operate on.
*/
void regmap_exit(struct regmap *map)
{
@@ -1245,7 +1248,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
}
/**
- * dev_get_regmap(): Obtain the regmap (if any) for a device
+ * dev_get_regmap() - Obtain the regmap (if any) for a device
*
* @dev: Device to retrieve the map for
* @name: Optional name for the register map, usually NULL.
@@ -1268,7 +1271,7 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
EXPORT_SYMBOL_GPL(dev_get_regmap);
/**
- * regmap_get_device(): Obtain the device from a regmap
+ * regmap_get_device() - Obtain the device from a regmap
*
* @map: Register map to operate on.
*
@@ -1654,7 +1657,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
}
/**
- * regmap_write(): Write a value to a single register
+ * regmap_write() - Write a value to a single register
*
* @map: Register map to write to
* @reg: Register to write to
@@ -1681,7 +1684,7 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
EXPORT_SYMBOL_GPL(regmap_write);
/**
- * regmap_write_async(): Write a value to a single register asynchronously
+ * regmap_write_async() - Write a value to a single register asynchronously
*
* @map: Register map to write to
* @reg: Register to write to
@@ -1712,7 +1715,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
EXPORT_SYMBOL_GPL(regmap_write_async);
/**
- * regmap_raw_write(): Write raw values to one or more registers
+ * regmap_raw_write() - Write raw values to one or more registers
*
* @map: Register map to write to
* @reg: Initial register to write to
@@ -1750,9 +1753,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_raw_write);
/**
- * regmap_field_update_bits_base():
- * Perform a read/modify/write cycle on the register field
- * with change, async, force option
+ * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
+ * register field.
*
* @field: Register field to write to
* @mask: Bitmask to change
@@ -1761,6 +1763,9 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
* @async: Boolean indicating asynchronously
* @force: Boolean indicating use force update
*
+ * Perform a read/modify/write cycle on the register field with change,
+ * async, force option.
+ *
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
@@ -1777,9 +1782,8 @@ int regmap_field_update_bits_base(struct regmap_field *field,
EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
/**
- * regmap_fields_update_bits_base():
- * Perform a read/modify/write cycle on the register field
- * with change, async, force option
+ * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
+ * register field with port ID
*
* @field: Register field to write to
* @id: port ID
@@ -1808,8 +1812,8 @@ int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
}
EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
-/*
- * regmap_bulk_write(): Write multiple registers to the device
+/**
+ * regmap_bulk_write() - Write multiple registers to the device
*
* @map: Register map to write to
* @reg: First register to be write from
@@ -2174,18 +2178,18 @@ static int _regmap_multi_reg_write(struct regmap *map,
return _regmap_raw_multi_reg_write(map, regs, num_regs);
}
-/*
- * regmap_multi_reg_write(): Write multiple registers to the device
- *
- * where the set of register,value pairs are supplied in any order,
- * possibly not all in a single range.
+/**
+ * regmap_multi_reg_write() - Write multiple registers to the device
*
* @map: Register map to write to
* @regs: Array of structures containing register,value to be written
* @num_regs: Number of registers to write
*
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
* The 'normal' block write mode will send ultimately send data on the
- * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
+ * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
* addressed. However, this alternative block multi write mode will send
* the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
* must of course support the mode.
@@ -2208,16 +2212,17 @@ int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
}
EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
-/*
- * regmap_multi_reg_write_bypassed(): Write multiple registers to the
- * device but not the cache
- *
- * where the set of register are supplied in any order
+/**
+ * regmap_multi_reg_write_bypassed() - Write multiple registers to the
+ * device but not the cache
*
* @map: Register map to write to
* @regs: Array of structures containing register,value to be written
* @num_regs: Number of registers to write
*
+ * Write multiple registers to the device but not the cache where the set
+ * of register are supplied in any order.
+ *
* This function is intended to be used for writing a large block of data
* atomically to the device in single transfer for those I2C client devices
* that implement this alternative block write mode.
@@ -2248,8 +2253,8 @@ int regmap_multi_reg_write_bypassed(struct regmap *map,
EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
/**
- * regmap_raw_write_async(): Write raw values to one or more registers
- * asynchronously
+ * regmap_raw_write_async() - Write raw values to one or more registers
+ * asynchronously
*
* @map: Register map to write to
* @reg: Initial register to write to
@@ -2385,7 +2390,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
}
/**
- * regmap_read(): Read a value from a single register
+ * regmap_read() - Read a value from a single register
*
* @map: Register map to read from
* @reg: Register to be read from
@@ -2412,7 +2417,7 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
EXPORT_SYMBOL_GPL(regmap_read);
/**
- * regmap_raw_read(): Read raw data from the device
+ * regmap_raw_read() - Read raw data from the device
*
* @map: Register map to read from
* @reg: First register to be read from
@@ -2477,7 +2482,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
EXPORT_SYMBOL_GPL(regmap_raw_read);
/**
- * regmap_field_read(): Read a value to a single register field
+ * regmap_field_read() - Read a value to a single register field
*
* @field: Register field to read from
* @val: Pointer to store read value
@@ -2502,7 +2507,7 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val)
EXPORT_SYMBOL_GPL(regmap_field_read);
/**
- * regmap_fields_read(): Read a value to a single register field with port ID
+ * regmap_fields_read() - Read a value to a single register field with port ID
*
* @field: Register field to read from
* @id: port ID
@@ -2535,7 +2540,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
EXPORT_SYMBOL_GPL(regmap_fields_read);
/**
- * regmap_bulk_read(): Read multiple registers from the device
+ * regmap_bulk_read() - Read multiple registers from the device
*
* @map: Register map to read from
* @reg: First register to be read from
@@ -2692,9 +2697,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
}
/**
- * regmap_update_bits_base:
- * Perform a read/modify/write cycle on the
- * register map with change, async, force option
+ * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
*
* @map: Register map to update
* @reg: Register to update
@@ -2704,10 +2707,14 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
* @async: Boolean indicating asynchronously
* @force: Boolean indicating use force update
*
- * if async was true,
- * With most buses the read must be done synchronously so this is most
- * useful for devices with a cache which do not need to interact with
- * the hardware to determine the current register value.
+ * Perform a read/modify/write cycle on a register map with change, async, force
+ * options.
+ *
+ * If async is true:
+ *
+ * With most buses the read must be done synchronously so this is most useful
+ * for devices with a cache which do not need to interact with the hardware to
+ * determine the current register value.
*
* Returns zero for success, a negative number on error.
*/
@@ -2765,7 +2772,7 @@ static int regmap_async_is_done(struct regmap *map)
}
/**
- * regmap_async_complete: Ensure all asynchronous I/O has completed.
+ * regmap_async_complete - Ensure all asynchronous I/O has completed.
*
* @map: Map to operate on.
*
@@ -2797,8 +2804,8 @@ int regmap_async_complete(struct regmap *map)
EXPORT_SYMBOL_GPL(regmap_async_complete);
/**
- * regmap_register_patch: Register and apply register updates to be applied
- * on device initialistion
+ * regmap_register_patch - Register and apply register updates to be applied
+ * on device initialistion
*
* @map: Register map to apply updates to.
* @regs: Values to update.
@@ -2855,8 +2862,10 @@ int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
}
EXPORT_SYMBOL_GPL(regmap_register_patch);
-/*
- * regmap_get_val_bytes(): Report the size of a register value
+/**
+ * regmap_get_val_bytes() - Report the size of a register value
+ *
+ * @map: Register map to operate on.
*
* Report the size of a register value, mainly intended to for use by
* generic infrastructure built on top of regmap.
@@ -2871,7 +2880,9 @@ int regmap_get_val_bytes(struct regmap *map)
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
/**
- * regmap_get_max_register(): Report the max register value
+ * regmap_get_max_register() - Report the max register value
+ *
+ * @map: Register map to operate on.
*
* Report the max register value, mainly intended to for use by
* generic infrastructure built on top of regmap.
@@ -2883,7 +2894,9 @@ int regmap_get_max_register(struct regmap *map)
EXPORT_SYMBOL_GPL(regmap_get_max_register);
/**
- * regmap_get_reg_stride(): Report the register address stride
+ * regmap_get_reg_stride() - Report the register address stride
+ *
+ * @map: Register map to operate on.
*
* Report the register address stride, mainly intended to for use by
* generic infrastructure built on top of regmap.
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index f642c4264c27..168fa175d65a 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_b.c */
int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b4f6520e74f0..62f5bfa5065d 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,8 +15,6 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
u32 mask, u32 value)
{
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_early_init(cc);
- if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
- bcma_chipco_serial_init(cc);
-
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
bcma_core_chipcommon_flash_detect(cc);
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
return res;
}
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
{
-#if IS_BUILTIN(CONFIG_BCM47XX)
unsigned int irq;
u32 baud_base;
u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
ports[i].baud_base = baud_base;
ports[i].reg_shift = 0;
}
-#endif /* CONFIG_BCM47XX */
}
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 96f171328200..89af807cf29c 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
{
+ struct bcma_bus *bus = mcore->core->bus;
+
if (mcore->early_setup_done)
return;
+ bcma_chipco_serial_init(&bus->drv_cc);
bcma_core_mips_nvram_init(mcore);
mcore->early_setup_done = true;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 2c1798e38abd..12da68ec48ba 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,17 +136,17 @@ static bool bcma_is_core_needed_early(u16 core_id)
return false;
}
-static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
+static struct device_node *bcma_of_find_child_device(struct device *parent,
struct bcma_device *core)
{
struct device_node *node;
u64 size;
const __be32 *reg;
- if (!parent || !parent->dev.of_node)
+ if (!parent->of_node)
return NULL;
- for_each_child_of_node(parent->dev.of_node, node) {
+ for_each_child_of_node(parent->of_node, node) {
reg = of_get_address(node, 0, &size, NULL);
if (!reg)
continue;
@@ -156,7 +156,7 @@ static struct device_node *bcma_of_find_child_device(struct platform_device *par
return NULL;
}
-static int bcma_of_irq_parse(struct platform_device *parent,
+static int bcma_of_irq_parse(struct device *parent,
struct bcma_device *core,
struct of_phandle_args *out_irq, int num)
{
@@ -169,7 +169,7 @@ static int bcma_of_irq_parse(struct platform_device *parent,
return rc;
}
- out_irq->np = parent->dev.of_node;
+ out_irq->np = parent->of_node;
out_irq->args_count = 1;
out_irq->args[0] = num;
@@ -177,13 +177,13 @@ static int bcma_of_irq_parse(struct platform_device *parent,
return of_irq_parse_raw(laddr, out_irq);
}
-static unsigned int bcma_of_get_irq(struct platform_device *parent,
+static unsigned int bcma_of_get_irq(struct device *parent,
struct bcma_device *core, int num)
{
struct of_phandle_args out_irq;
int ret;
- if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -196,7 +196,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
return irq_create_of_mapping(&out_irq);
}
-static void bcma_of_fill_device(struct platform_device *parent,
+static void bcma_of_fill_device(struct device *parent,
struct bcma_device *core)
{
struct device_node *node;
@@ -227,7 +227,7 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num)
return mips_irq <= 4 ? mips_irq + 2 : 0;
}
if (bus->host_pdev)
- return bcma_of_get_irq(bus->host_pdev, core, num);
+ return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
return 0;
case BCMA_HOSTTYPE_SDIO:
return 0;
@@ -253,7 +253,8 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
core->dma_dev = &bus->host_pdev->dev;
core->dev.parent = &bus->host_pdev->dev;
- bcma_of_fill_device(bus->host_pdev, core);
+ if (core->dev.parent)
+ bcma_of_fill_device(core->dev.parent, core);
} else {
core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
@@ -633,8 +634,11 @@ static int bcma_device_probe(struct device *dev)
drv);
int err = 0;
+ get_device(dev);
if (adrv->probe)
err = adrv->probe(core);
+ if (err)
+ put_device(dev);
return err;
}
@@ -647,6 +651,7 @@ static int bcma_device_remove(struct device *dev)
if (adrv->remove)
adrv->remove(core);
+ put_device(dev);
return 0;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 223ff2fcae7e..f744de7a0f9b 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -69,6 +69,7 @@ config AMIGA_Z2RAM
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
+ select BLK_SCSI_REQUEST # only for the generic cdrom code
help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
"GD-ROM" by SEGA to signify it is capable of reading special disks
@@ -114,6 +115,7 @@ config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support"
depends on PCI
select CHECK_SIGNATURE
+ select BLK_SCSI_REQUEST
help
This is the driver for Compaq Smart Array 5xxx controllers.
Everyone using these boards should say Y here.
@@ -386,6 +388,7 @@ config BLK_DEV_RAM_DAX
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
+ select BLK_SCSI_REQUEST
help
Note: This driver is deprecated and will be removed from the
kernel in the near future!
@@ -501,6 +504,16 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+config VIRTIO_BLK_SCSI
+ bool "SCSI passthrough request for the Virtio block driver"
+ depends on VIRTIO_BLK
+ select BLK_SCSI_REQUEST
+ ---help---
+ Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
+ virtio-blk devices. This is only supported for the legacy
+ virtio protocol and not enabled by default by any hypervisor.
+ Your probably want to virtio-scsi instead.
+
config BLK_DEV_HD
bool "Very old hard disk (MFM/RLL/IDE) driver"
depends on HAVE_IDE
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index ec9d8610b25f..027b876370bc 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
+ q->backing_dev_info->name = "aoe";
+ q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index e5c5b8eb14a9..27d613795653 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -52,6 +52,7 @@
#include <scsi/scsi.h>
#include <scsi/sg.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_request.h>
#include <linux/cdrom.h>
#include <linux/scatterlist.h>
#include <linux/kthread.h>
@@ -1853,8 +1854,8 @@ static void cciss_softirq_done(struct request *rq)
dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
/* set the residual count for pc requests */
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
- rq->resid_len = c->err_info->ResidualCnt;
+ if (blk_rq_is_passthrough(rq))
+ scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
@@ -1941,9 +1942,16 @@ static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
int drv_index)
{
- disk->queue = blk_init_queue(do_cciss_request, &h->lock);
+ disk->queue = blk_alloc_queue(GFP_KERNEL);
if (!disk->queue)
goto init_queue_failure;
+
+ disk->queue->cmd_size = sizeof(struct scsi_request);
+ disk->queue->request_fn = do_cciss_request;
+ disk->queue->queue_lock = &h->lock;
+ if (blk_init_allocated_queue(disk->queue) < 0)
+ goto cleanup_queue;
+
sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
disk->major = h->major;
disk->first_minor = drv_index << NWD_SHIFT;
@@ -3075,7 +3083,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
driver_byte = DRIVER_OK;
msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
- if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ if (blk_rq_is_passthrough(cmd->rq))
host_byte = DID_PASSTHROUGH;
else
host_byte = DID_OK;
@@ -3084,7 +3092,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
host_byte, driver_byte);
if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
- if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
+ if (!blk_rq_is_passthrough(cmd->rq))
dev_warn(&h->pdev->dev, "cmd %p "
"has SCSI Status 0x%x\n",
cmd, cmd->err_info->ScsiStatus);
@@ -3095,31 +3103,23 @@ static inline int evaluate_target_status(ctlr_info_t *h,
sense_key = 0xf & cmd->err_info->SenseInfo[2];
/* no status or recovered error */
if (((sense_key == 0x0) || (sense_key == 0x1)) &&
- (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
+ !blk_rq_is_passthrough(cmd->rq))
error_value = 0;
if (check_for_unit_attention(h, cmd)) {
- *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC);
+ *retry_cmd = !blk_rq_is_passthrough(cmd->rq);
return 0;
}
/* Not SG_IO or similar? */
- if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (!blk_rq_is_passthrough(cmd->rq)) {
if (error_value != 0)
dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
" sense key = 0x%x\n", cmd, sense_key);
return error_value;
}
- /* SG_IO or similar, copy sense data back */
- if (cmd->rq->sense) {
- if (cmd->rq->sense_len > cmd->err_info->SenseLen)
- cmd->rq->sense_len = cmd->err_info->SenseLen;
- memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
- cmd->rq->sense_len);
- } else
- cmd->rq->sense_len = 0;
-
+ scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
return error_value;
}
@@ -3146,15 +3146,14 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
break;
case CMD_DATA_UNDERRUN:
- if (cmd->rq->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(cmd->rq)) {
dev_warn(&h->pdev->dev, "cmd %p has"
" completed with data underrun "
"reported\n", cmd);
- cmd->rq->resid_len = cmd->err_info->ResidualCnt;
}
break;
case CMD_DATA_OVERRUN:
- if (cmd->rq->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(cmd->rq))
dev_warn(&h->pdev->dev, "cciss: cmd %p has"
" completed with data overrun "
"reported\n", cmd);
@@ -3164,7 +3163,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"reported invalid\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_PROTOCOL_ERR:
@@ -3172,7 +3171,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"protocol error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_HARDWARE_ERR:
@@ -3180,7 +3179,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
" hardware error\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_CONNECTION_LOST:
@@ -3188,7 +3187,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"connection lost\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_ABORTED:
@@ -3196,7 +3195,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"aborted\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_ABORT_FAILED:
@@ -3204,7 +3203,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"abort failed\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_UNSOLICITED_ABORT:
@@ -3219,21 +3218,21 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
"%p retried too many times\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ABORT);
break;
case CMD_TIMEOUT:
dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
case CMD_UNABORTABLE:
dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
break;
default:
@@ -3242,7 +3241,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
cmd->err_info->CommandStatus);
rq->errors = make_status_bytes(SAM_STAT_GOOD,
cmd->err_info->CommandStatus, DRIVER_OK,
- (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_is_passthrough(cmd->rq) ?
DID_PASSTHROUGH : DID_ERROR);
}
@@ -3395,7 +3394,9 @@ static void do_cciss_request(struct request_queue *q)
c->Header.SGList = h->max_cmd_sgentries;
set_performant_mode(h, c);
- if (likely(creq->cmd_type == REQ_TYPE_FS)) {
+ switch (req_op(creq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
if(h->cciss_read == CCISS_READ_10) {
c->Request.CDB[1] = 0;
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
@@ -3425,12 +3426,16 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
- } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
- c->Request.CDBLen = creq->cmd_len;
- memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
- } else {
+ break;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ c->Request.CDBLen = scsi_req(creq)->cmd_len;
+ memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
+ scsi_req(creq)->sense = c->err_info->SenseInfo;
+ break;
+ default:
dev_warn(&h->pdev->dev, "bad request type %d\n",
- creq->cmd_type);
+ creq->cmd_flags);
BUG();
}
@@ -4074,41 +4079,27 @@ clean_up:
static void cciss_interrupt_mode(ctlr_info_t *h)
{
-#ifdef CONFIG_PCI_MSI
- int err;
- struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
- {0, 2}, {0, 3}
- };
+ int ret;
/* Some boards advertise MSI but don't really support it */
if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
(h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
goto default_int_mode;
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
- err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4);
- if (!err) {
- h->intr[0] = cciss_msix_entries[0].vector;
- h->intr[1] = cciss_msix_entries[1].vector;
- h->intr[2] = cciss_msix_entries[2].vector;
- h->intr[3] = cciss_msix_entries[3].vector;
- h->msix_vector = 1;
- return;
- } else {
- dev_warn(&h->pdev->dev,
- "MSI-X init failed %d\n", err);
- }
- }
- if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
- if (!pci_enable_msi(h->pdev))
- h->msi_vector = 1;
- else
- dev_warn(&h->pdev->dev, "MSI init failed\n");
+ ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX);
+ if (ret >= 0) {
+ h->intr[0] = pci_irq_vector(h->pdev, 0);
+ h->intr[1] = pci_irq_vector(h->pdev, 1);
+ h->intr[2] = pci_irq_vector(h->pdev, 2);
+ h->intr[3] = pci_irq_vector(h->pdev, 3);
+ return;
}
+
+ ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI);
+
default_int_mode:
-#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- h->intr[h->intr_mode] = h->pdev->irq;
+ h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0);
return;
}
@@ -4888,7 +4879,7 @@ static int cciss_request_irq(ctlr_info_t *h,
irqreturn_t (*msixhandler)(int, void *),
irqreturn_t (*intxhandler)(int, void *))
{
- if (h->msix_vector || h->msi_vector) {
+ if (h->pdev->msi_enabled || h->pdev->msix_enabled) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
0, h->devname, h))
return 0;
@@ -4934,12 +4925,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
int ctlr = h->ctlr;
free_irq(h->intr[h->intr_mode], h);
-#ifdef CONFIG_PCI_MSI
- if (h->msix_vector)
- pci_disable_msix(h->pdev);
- else if (h->msi_vector)
- pci_disable_msi(h->pdev);
-#endif /* CONFIG_PCI_MSI */
+ pci_free_irq_vectors(h->pdev);
cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
cciss_free_scatterlists(h);
cciss_free_cmd_pool(h);
@@ -5295,12 +5281,7 @@ static void cciss_remove_one(struct pci_dev *pdev)
cciss_shutdown(pdev);
-#ifdef CONFIG_PCI_MSI
- if (h->msix_vector)
- pci_disable_msix(h->pdev);
- else if (h->msi_vector)
- pci_disable_msi(h->pdev);
-#endif /* CONFIG_PCI_MSI */
+ pci_free_irq_vectors(h->pdev);
iounmap(h->transtable);
iounmap(h->cfgtable);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 7fda30e4a241..24b5fd75501a 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -90,8 +90,6 @@ struct ctlr_info
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
unsigned int intr[4];
- unsigned int msix_vector;
- unsigned int msi_vector;
int intr_mode;
int cciss_max_sectors;
BYTE cciss_read;
@@ -333,7 +331,7 @@ static unsigned long SA5_performant_completed(ctlr_info_t *h)
*/
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
/* msi auto clears the interrupt pending bit. */
- if (!(h->msi_vector || h->msix_vector)) {
+ if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) {
writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
/* Do a read in order to flush the write to the controller
* (as per spec.)
@@ -393,7 +391,7 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
if (!register_value)
return false;
- if (h->msi_vector || h->msix_vector)
+ if (h->pdev->msi_enabled || h->pdev->msix_enabled)
return true;
/* Read outbound doorbell to flush */
@@ -402,27 +400,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
}
static struct access_method SA5_access = {
- SA5_submit_command,
- SA5_intr_mask,
- SA5_fifo_full,
- SA5_intr_pending,
- SA5_completed,
+ .submit_command = SA5_submit_command,
+ .set_intr_mask = SA5_intr_mask,
+ .fifo_full = SA5_fifo_full,
+ .intr_pending = SA5_intr_pending,
+ .command_completed = SA5_completed,
};
static struct access_method SA5B_access = {
- SA5_submit_command,
- SA5B_intr_mask,
- SA5_fifo_full,
- SA5B_intr_pending,
- SA5_completed,
+ .submit_command = SA5_submit_command,
+ .set_intr_mask = SA5B_intr_mask,
+ .fifo_full = SA5_fifo_full,
+ .intr_pending = SA5B_intr_pending,
+ .command_completed = SA5_completed,
};
static struct access_method SA5_performant_access = {
- SA5_submit_command,
- SA5_performant_intr_mask,
- SA5_fifo_full,
- SA5_performant_intr_pending,
- SA5_performant_completed,
+ .submit_command = SA5_submit_command,
+ .set_intr_mask = SA5_performant_intr_mask,
+ .fifo_full = SA5_fifo_full,
+ .intr_pending = SA5_performant_intr_pending,
+ .command_completed = SA5_performant_completed,
};
struct board_type {
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index ab62b81c2ca7..dece26f119d4 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1070,7 +1070,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
.done = 0,
.flags = flags,
.error = 0,
- .kref = { ATOMIC_INIT(2) },
+ .kref = KREF_INIT(2),
};
if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 83482721bc01..615e5b5178a0 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev);
- r = bdi_congested(&q->backing_dev_info, bdi_bits);
+ r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device);
if (r)
reason = 'b';
@@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev;
- q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = device;
+ q->backing_dev_info->congested_fn = drbd_congested;
+ q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_write_cache(q, true, true);
@@ -2948,7 +2948,6 @@ void drbd_delete_device(struct drbd_device *device)
struct drbd_resource *resource = device->resource;
struct drbd_connection *connection;
struct drbd_peer_device *peer_device;
- int refs = 3;
/* move to free_peer_device() */
for_each_peer_device(peer_device, device)
@@ -2956,13 +2955,15 @@ void drbd_delete_device(struct drbd_device *device)
drbd_debugfs_device_cleanup(device);
for_each_connection(connection, resource) {
idr_remove(&connection->peer_devices, device->vnr);
- refs++;
+ kref_put(&device->kref, drbd_destroy_device);
}
idr_remove(&resource->devices, device->vnr);
+ kref_put(&device->kref, drbd_destroy_device);
idr_remove(&drbd_devices, device_to_minor(device));
+ kref_put(&device->kref, drbd_destroy_device);
del_gendisk(device->vdisk);
synchronize_rcu();
- kref_sub(&device->kref, refs, drbd_destroy_device);
+ kref_put(&device->kref, drbd_destroy_device);
}
static int __init drbd_init(void)
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index f35db29cac76..908c704e20aa 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
if (b) {
blk_queue_stack_limits(q, b);
- if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+ if (q->backing_dev_info->ra_pages !=
+ b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info.ra_pages,
- b->backing_dev_info.ra_pages);
- q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
+ q->backing_dev_info->ra_pages,
+ b->backing_dev_info->ra_pages);
+ q->backing_dev_info->ra_pages =
+ b->backing_dev_info->ra_pages;
}
}
fixup_discard_if_not_supported(q);
@@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s,
s->dev_disk_flags = md->flags;
q = bdev_get_queue(device->ldev->backing_bdev);
s->dev_lower_blocked =
- bdi_congested(&q->backing_dev_info,
+ bdi_congested(q->backing_dev_info,
(1 << WB_async_congested) |
(1 << WB_sync_congested));
put_ldev(device);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be2b93fd2c11..8378142f7a55 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
- bdi_rw_congested(&device->rq_queue->backing_dev_info);
+ bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de279fe4e4fd..652114ae1a8a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -421,7 +421,6 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
struct drbd_peer_device *peer_device = first_peer_device(device);
unsigned s = req->rq_state;
int c_put = 0;
- int k_put = 0;
if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
set |= RQ_COMPLETION_SUSP;
@@ -437,6 +436,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
/* intent: get references */
+ kref_get(&req->kref);
+
if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
atomic_inc(&req->completion_ref);
@@ -473,15 +474,12 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
- /* local completion may still come in later,
- * we need to keep the req object around. */
- kref_get(&req->kref);
++c_put;
}
if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
if (req->rq_state & RQ_LOCAL_ABORTED)
- ++k_put;
+ kref_put(&req->kref, drbd_req_destroy);
else
++c_put;
list_del_init(&req->req_pending_local);
@@ -503,7 +501,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if (s & RQ_NET_SENT)
atomic_sub(req->i.size >> 9, &device->ap_in_flight);
if (s & RQ_EXP_BARR_ACK)
- ++k_put;
+ kref_put(&req->kref, drbd_req_destroy);
req->net_done_jif = jiffies;
/* in ahead/behind mode, or just in case,
@@ -516,25 +514,16 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
/* potentially complete and destroy */
- if (k_put || c_put) {
- /* Completion does it's own kref_put. If we are going to
- * kref_sub below, we need req to be still around then. */
- int at_least = k_put + !!c_put;
- int refcount = atomic_read(&req->kref.refcount);
- if (refcount < at_least)
- drbd_err(device,
- "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
- s, req->rq_state, refcount, at_least);
- }
-
/* If we made progress, retry conflicting peer requests, if any. */
if (req->i.waiting)
wake_up(&device->misc_wait);
- if (c_put)
- k_put += drbd_req_put_completion_ref(req, m, c_put);
- if (k_put)
- kref_sub(&req->kref, k_put, drbd_req_destroy);
+ if (c_put) {
+ if (drbd_req_put_completion_ref(req, m, c_put))
+ kref_put(&req->kref, drbd_req_destroy);
+ } else {
+ kref_put(&req->kref, drbd_req_destroy);
+ }
}
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
@@ -938,7 +927,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a391a3cfb3fe..45b4384f650c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2900,8 +2900,8 @@ static void do_fd_request(struct request_queue *q)
return;
if (WARN(atomic_read(&usage_count) == 0,
- "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
- current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
+ "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
+ current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
return;
@@ -3119,7 +3119,7 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL;
loop:
- ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
+ ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
*rcmd = ptr;
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index a9b48ed7a3cd..6043648da1e8 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -626,30 +626,29 @@ repeat:
req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, bio_data(req->bio));
#endif
- if (req->cmd_type == REQ_TYPE_FS) {
- switch (rq_data_dir(req)) {
- case READ:
- hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
- &read_intr);
- if (reset)
- goto repeat;
- break;
- case WRITE:
- hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
- &write_intr);
- if (reset)
- goto repeat;
- if (wait_DRQ()) {
- bad_rw_intr();
- goto repeat;
- }
- outsw(HD_DATA, bio_data(req->bio), 256);
- break;
- default:
- printk("unknown hd-command\n");
- hd_end_request_cur(-EIO);
- break;
+
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
+ &read_intr);
+ if (reset)
+ goto repeat;
+ break;
+ case REQ_OP_WRITE:
+ hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
+ &write_intr);
+ if (reset)
+ goto repeat;
+ if (wait_DRQ()) {
+ bad_rw_intr();
+ goto repeat;
}
+ outsw(HD_DATA, bio_data(req->bio), 256);
+ break;
+ default:
+ printk("unknown hd-command\n");
+ hd_end_request_cur(-EIO);
+ break;
}
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f347285c67ec..304377182c1a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1097,9 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
+ /* I/O need to be drained during transfer transition */
+ blk_mq_freeze_queue(lo->lo_queue);
+
err = loop_release_xfer(lo);
if (err)
- return err;
+ goto exit;
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
@@ -1114,12 +1117,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = loop_init_xfer(lo, xfer, info);
if (err)
- return err;
+ goto exit;
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit)
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
- return -EFBIG;
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
+ err = -EFBIG;
+ goto exit;
+ }
loop_config_discard(lo);
@@ -1156,7 +1161,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
/* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio);
- return 0;
+ exit:
+ blk_mq_unfreeze_queue(lo->lo_queue);
+ return err;
}
static int
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e937fcf71769..286f276f586e 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -670,15 +670,17 @@ static void mg_request_poll(struct request_queue *q)
break;
}
- if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
- mg_end_request_cur(host, -EIO);
- continue;
- }
-
- if (rq_data_dir(host->req) == READ)
+ switch (req_op(host->req)) {
+ case REQ_OP_READ:
mg_read(host->req);
- else
+ break;
+ case REQ_OP_WRITE:
mg_write(host->req);
+ break;
+ default:
+ mg_end_request_cur(host, -EIO);
+ break;
+ }
}
}
@@ -687,13 +689,15 @@ static unsigned int mg_issue_req(struct request *req,
unsigned int sect_num,
unsigned int sect_cnt)
{
- if (rq_data_dir(req) == READ) {
+ switch (req_op(host->req)) {
+ case REQ_OP_READ:
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
- } else {
+ break;
+ case REQ_OP_WRITE:
/* TODO : handler */
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
@@ -712,6 +716,10 @@ static unsigned int mg_issue_req(struct request *req,
mod_timer(&host->timer, jiffies + 3 * HZ);
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
+ break;
+ default:
+ mg_end_request_cur(host, -EIO);
+ break;
}
return MG_ERR_NONE;
}
@@ -753,11 +761,6 @@ static void mg_request(struct request_queue *q)
continue;
}
- if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
- mg_end_request_cur(host, -EIO);
- continue;
- }
-
if (!mg_issue_req(req, host, sect_num, sect_cnt))
return;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 50a2020b5b72..0be84a3cb6d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,6 +41,9 @@
#include <linux/nbd.h>
+static DEFINE_IDR(nbd_index_idr);
+static DEFINE_MUTEX(nbd_index_mutex);
+
struct nbd_sock {
struct socket *sock;
struct mutex tx_lock;
@@ -89,8 +92,9 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
-static struct nbd_device *nbd_dev;
static int max_part;
+static struct workqueue_struct *recv_workqueue;
+static int part_shift;
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
@@ -193,13 +197,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
req->errors++;
- /*
- * If our disconnect packet times out then we're already holding the
- * config_lock and could deadlock here, so just set an error and return,
- * we'll handle shutting everything down later.
- */
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- return BLK_EH_HANDLED;
mutex_lock(&nbd->config_lock);
sock_shutdown(nbd);
mutex_unlock(&nbd->config_lock);
@@ -271,21 +268,36 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
- int result, flags;
+ int result;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
u32 type;
u32 tag = blk_mq_unique_tag(req);
- if (req_op(req) == REQ_OP_DISCARD)
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
type = NBD_CMD_TRIM;
- else if (req_op(req) == REQ_OP_FLUSH)
+ break;
+ case REQ_OP_FLUSH:
type = NBD_CMD_FLUSH;
- else if (rq_data_dir(req) == WRITE)
+ break;
+ case REQ_OP_WRITE:
type = NBD_CMD_WRITE;
- else
+ break;
+ case REQ_OP_READ:
type = NBD_CMD_READ;
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (rq_data_dir(req) == WRITE &&
+ (nbd->flags & NBD_FLAG_READ_ONLY)) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Write on read-only\n");
+ return -EIO;
+ }
memset(&request, 0, sizeof(request));
request.magic = htonl(NBD_REQUEST_MAGIC);
@@ -310,7 +322,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (type != NBD_CMD_WRITE)
return 0;
- flags = 0;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
@@ -319,9 +330,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
bio_for_each_segment(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
+ int flags = is_last ? 0 : MSG_MORE;
- if (is_last)
- flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
result = sock_send_bvec(nbd, index, &bvec, flags);
@@ -512,18 +522,6 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
goto error_out;
}
- if (req->cmd_type != REQ_TYPE_FS &&
- req->cmd_type != REQ_TYPE_DRV_PRIV)
- goto error_out;
-
- if (req->cmd_type == REQ_TYPE_FS &&
- rq_data_dir(req) == WRITE &&
- (nbd->flags & NBD_FLAG_READ_ONLY)) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Write on read-only\n");
- goto error_out;
- }
-
req->errors = 0;
nsock = nbd->socks[index];
@@ -787,7 +785,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
INIT_WORK(&args[i].work, recv_work);
args[i].nbd = nbd;
args[i].index = i;
- queue_work(system_long_wq, &args[i].work);
+ queue_work(recv_workqueue, &args[i].work);
}
wait_event_interruptible(nbd->recv_wq,
atomic_read(&nbd->recv_threads) == 0);
@@ -998,6 +996,103 @@ static struct blk_mq_ops nbd_mq_ops = {
.timeout = nbd_xmit_timeout,
};
+static void nbd_dev_remove(struct nbd_device *nbd)
+{
+ struct gendisk *disk = nbd->disk;
+ nbd->magic = 0;
+ if (disk) {
+ del_gendisk(disk);
+ blk_cleanup_queue(disk->queue);
+ blk_mq_free_tag_set(&nbd->tag_set);
+ put_disk(disk);
+ }
+ kfree(nbd);
+}
+
+static int nbd_dev_add(int index)
+{
+ struct nbd_device *nbd;
+ struct gendisk *disk;
+ struct request_queue *q;
+ int err = -ENOMEM;
+
+ nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
+ if (!nbd)
+ goto out;
+
+ disk = alloc_disk(1 << part_shift);
+ if (!disk)
+ goto out_free_nbd;
+
+ if (index >= 0) {
+ err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
+ GFP_KERNEL);
+ if (err == -ENOSPC)
+ err = -EEXIST;
+ } else {
+ err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
+ if (err >= 0)
+ index = err;
+ }
+ if (err < 0)
+ goto out_free_disk;
+
+ nbd->disk = disk;
+ nbd->tag_set.ops = &nbd_mq_ops;
+ nbd->tag_set.nr_hw_queues = 1;
+ nbd->tag_set.queue_depth = 128;
+ nbd->tag_set.numa_node = NUMA_NO_NODE;
+ nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
+ nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
+ nbd->tag_set.driver_data = nbd;
+
+ err = blk_mq_alloc_tag_set(&nbd->tag_set);
+ if (err)
+ goto out_free_idr;
+
+ q = blk_mq_init_queue(&nbd->tag_set);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto out_free_tags;
+ }
+ disk->queue = q;
+
+ /*
+ * Tell the block layer that we are not a rotational device
+ */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+ disk->queue->limits.discard_granularity = 512;
+ blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
+ disk->queue->limits.discard_zeroes_data = 0;
+ blk_queue_max_hw_sectors(disk->queue, 65536);
+ disk->queue->limits.max_sectors = 256;
+
+ nbd->magic = NBD_MAGIC;
+ mutex_init(&nbd->config_lock);
+ disk->major = NBD_MAJOR;
+ disk->first_minor = index << part_shift;
+ disk->fops = &nbd_fops;
+ disk->private_data = nbd;
+ sprintf(disk->disk_name, "nbd%d", index);
+ init_waitqueue_head(&nbd->recv_wq);
+ nbd_reset(nbd);
+ add_disk(disk);
+ return index;
+
+out_free_tags:
+ blk_mq_free_tag_set(&nbd->tag_set);
+out_free_idr:
+ idr_remove(&nbd_index_idr, index);
+out_free_disk:
+ put_disk(disk);
+out_free_nbd:
+ kfree(nbd);
+out:
+ return err;
+}
+
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
@@ -1005,9 +1100,7 @@ static struct blk_mq_ops nbd_mq_ops = {
static int __init nbd_init(void)
{
- int err = -ENOMEM;
int i;
- int part_shift;
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
@@ -1036,111 +1129,38 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
-
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
- if (!nbd_dev)
+ recv_workqueue = alloc_workqueue("knbd-recv",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!recv_workqueue)
return -ENOMEM;
- for (i = 0; i < nbds_max; i++) {
- struct request_queue *q;
- struct gendisk *disk = alloc_disk(1 << part_shift);
- if (!disk)
- goto out;
- nbd_dev[i].disk = disk;
-
- nbd_dev[i].tag_set.ops = &nbd_mq_ops;
- nbd_dev[i].tag_set.nr_hw_queues = 1;
- nbd_dev[i].tag_set.queue_depth = 128;
- nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
- nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
- nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
-
- err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
- if (err) {
- put_disk(disk);
- goto out;
- }
-
- /*
- * The new linux 2.5 block layer implementation requires
- * every gendisk to have its very own request_queue struct.
- * These structs are big so we dynamically allocate them.
- */
- q = blk_mq_init_queue(&nbd_dev[i].tag_set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- put_disk(disk);
- goto out;
- }
- disk->queue = q;
-
- /*
- * Tell the block layer that we are not a rotational device
- */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
- disk->queue->limits.discard_granularity = 512;
- blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
- disk->queue->limits.discard_zeroes_data = 0;
- blk_queue_max_hw_sectors(disk->queue, 65536);
- disk->queue->limits.max_sectors = 256;
- }
-
- if (register_blkdev(NBD_MAJOR, "nbd")) {
- err = -EIO;
- goto out;
- }
-
- printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
+ if (register_blkdev(NBD_MAJOR, "nbd"))
+ return -EIO;
nbd_dbg_init();
- for (i = 0; i < nbds_max; i++) {
- struct gendisk *disk = nbd_dev[i].disk;
- nbd_dev[i].magic = NBD_MAGIC;
- mutex_init(&nbd_dev[i].config_lock);
- disk->major = NBD_MAJOR;
- disk->first_minor = i << part_shift;
- disk->fops = &nbd_fops;
- disk->private_data = &nbd_dev[i];
- sprintf(disk->disk_name, "nbd%d", i);
- init_waitqueue_head(&nbd_dev[i].recv_wq);
- nbd_reset(&nbd_dev[i]);
- add_disk(disk);
- }
+ mutex_lock(&nbd_index_mutex);
+ for (i = 0; i < nbds_max; i++)
+ nbd_dev_add(i);
+ mutex_unlock(&nbd_index_mutex);
+ return 0;
+}
+static int nbd_exit_cb(int id, void *ptr, void *data)
+{
+ struct nbd_device *nbd = ptr;
+ nbd_dev_remove(nbd);
return 0;
-out:
- while (i--) {
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- blk_cleanup_queue(nbd_dev[i].disk->queue);
- put_disk(nbd_dev[i].disk);
- }
- kfree(nbd_dev);
- return err;
}
static void __exit nbd_cleanup(void)
{
- int i;
-
nbd_dbg_close();
- for (i = 0; i < nbds_max; i++) {
- struct gendisk *disk = nbd_dev[i].disk;
- nbd_dev[i].magic = 0;
- if (disk) {
- del_gendisk(disk);
- blk_cleanup_queue(disk->queue);
- blk_mq_free_tag_set(&nbd_dev[i].tag_set);
- put_disk(disk);
- }
- }
+ idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
+ idr_destroy(&nbd_index_idr);
+ destroy_workqueue(recv_workqueue);
unregister_blkdev(NBD_MAJOR, "nbd");
- kfree(nbd_dev);
- printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
}
module_init(nbd_init);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index c0e14e54909b..6f2e565bccc5 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -420,7 +420,8 @@ static void null_lnvm_end_io(struct request *rq, int error)
{
struct nvm_rq *rqd = rq->end_io_data;
- nvm_end_io(rqd, error);
+ rqd->error = error;
+ nvm_end_io(rqd);
blk_put_request(rq);
}
@@ -431,11 +432,11 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct request *rq;
struct bio *bio = rqd->bio;
- rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
+ rq = blk_mq_alloc_request(q,
+ op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return -ENOMEM;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->__sector = bio->bi_iter.bi_sector;
rq->ioprio = bio_prio(bio);
@@ -460,7 +461,6 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
id->ver_id = 0x1;
id->vmnt = 0;
- id->cgrps = 1;
id->cap = 0x2;
id->dom = 0x1;
@@ -479,7 +479,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
sector_div(size, bs); /* convert size to pages */
size >>= 8; /* concert size to pgs pr blk */
- grp = &id->groups[0];
+ grp = &id->grp;
grp->mtype = 0;
grp->fmtype = 0;
grp->num_ch = 1;
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 92900f5f0b47..8127b8201a01 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -308,12 +308,6 @@ static void osdblk_rq_fn(struct request_queue *q)
if (!rq)
break;
- /* filter out block requests we don't understand */
- if (rq->cmd_type != REQ_TYPE_FS) {
- blk_end_request_all(rq, 0);
- continue;
- }
-
/* deduce our operation (read, write, flush) */
/* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
* into a clearly defined set of RPC commands:
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index efefb5ac3004..3a15247942e4 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -25,6 +25,7 @@ config PARIDE_PD
config PARIDE_PCD
tristate "Parallel port ATAPI CD-ROMs"
depends on PARIDE
+ select BLK_SCSI_REQUEST # only for the generic cdrom code
---help---
This option enables the high-level driver for ATAPI CD-ROM devices
connected through a parallel port. If you chose to build PARIDE
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 5fd2d0e25567..10aed84244f5 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -273,7 +273,7 @@ static const struct block_device_operations pcd_bdops = {
.check_events = pcd_block_check_events,
};
-static struct cdrom_device_ops pcd_dops = {
+static const struct cdrom_device_ops pcd_dops = {
.open = pcd_open,
.release = pcd_release,
.drive_status = pcd_drive_status,
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index c3ed2fc72daa..644ba0888bd4 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -439,18 +439,16 @@ static int pd_retries = 0; /* i/o error retry count */
static int pd_block; /* address of next requested block */
static int pd_count; /* number of blocks still to do */
static int pd_run; /* sectors in current cluster */
-static int pd_cmd; /* current command READ/WRITE */
static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void)
{
- if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) {
+ switch (req_op(pd_req)) {
+ case REQ_OP_DRV_IN:
phase = pd_special;
return pd_special();
- }
-
- pd_cmd = rq_data_dir(pd_req);
- if (pd_cmd == READ || pd_cmd == WRITE) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
@@ -458,7 +456,7 @@ static enum action do_pd_io_start(void)
pd_run = blk_rq_sectors(pd_req);
pd_buf = bio_data(pd_req->bio);
pd_retries = 0;
- if (pd_cmd == READ)
+ if (req_op(pd_req) == REQ_OP_READ)
return do_pd_read_start();
else
return do_pd_write_start();
@@ -723,11 +721,10 @@ static int pd_special_command(struct pd_unit *disk,
struct request *rq;
int err = 0;
- rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->special = func;
err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 1b94c1ca5c5f..66d846ba85a9 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -704,10 +704,10 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- WRITE : READ, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
- blk_rq_set_block_pc(rq);
+ scsi_req_init(rq);
if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -716,8 +716,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
goto out;
}
- rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
- memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
+ scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
+ memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
rq->timeout = 60*HZ;
if (cgc->quiet)
@@ -1243,7 +1243,7 @@ try_next_bio:
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
if (wakeup) {
- clear_bdi_congested(&pd->disk->queue->backing_dev_info,
+ clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC);
}
@@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
+ set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 76f33c84ce3d..a809e3e9feb8 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -196,16 +196,19 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
- if (req_op(req) == REQ_OP_FLUSH) {
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
if (ps3disk_submit_flush_request(dev, req))
- break;
- } else if (req->cmd_type == REQ_TYPE_FS) {
+ return;
+ break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
if (ps3disk_submit_request_sg(dev, req))
- break;
- } else {
+ return;
+ break;
+ default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
__blk_end_request_all(req, -EIO);
- continue;
}
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 36d2b9f4e836..362cecc77130 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1535,7 +1535,7 @@ static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p (was %d)\n", __func__, obj_request,
- atomic_read(&obj_request->kref.refcount));
+ kref_read(&obj_request->kref));
kref_get(&obj_request->kref);
}
@@ -1544,14 +1544,14 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
rbd_assert(obj_request != NULL);
dout("%s: obj %p (was %d)\n", __func__, obj_request,
- atomic_read(&obj_request->kref.refcount));
+ kref_read(&obj_request->kref));
kref_put(&obj_request->kref, rbd_obj_request_destroy);
}
static void rbd_img_request_get(struct rbd_img_request *img_request)
{
dout("%s: img %p (was %d)\n", __func__, img_request,
- atomic_read(&img_request->kref.refcount));
+ kref_read(&img_request->kref));
kref_get(&img_request->kref);
}
@@ -1562,7 +1562,7 @@ static void rbd_img_request_put(struct rbd_img_request *img_request)
{
rbd_assert(img_request != NULL);
dout("%s: img %p (was %d)\n", __func__, img_request,
- atomic_read(&img_request->kref.refcount));
+ kref_read(&img_request->kref));
if (img_request_child_test(img_request))
kref_put(&img_request->kref, rbd_parent_request_destroy);
else
@@ -4099,19 +4099,21 @@ static void rbd_queue_workfn(struct work_struct *work)
bool must_be_locked;
int result;
- if (rq->cmd_type != REQ_TYPE_FS) {
- dout("%s: non-fs request type %d\n", __func__,
- (int) rq->cmd_type);
- result = -EIO;
- goto err;
- }
-
- if (req_op(rq) == REQ_OP_DISCARD)
+ switch (req_op(rq)) {
+ case REQ_OP_DISCARD:
op_type = OBJ_OP_DISCARD;
- else if (req_op(rq) == REQ_OP_WRITE)
+ break;
+ case REQ_OP_WRITE:
op_type = OBJ_OP_WRITE;
- else
+ break;
+ case REQ_OP_READ:
op_type = OBJ_OP_READ;
+ break;
+ default:
+ dout("%s: non-fs request type %d\n", __func__, req_op(rq));
+ result = -EIO;
+ goto err;
+ }
/* Ignore/skip any zero-length requests */
@@ -4524,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index abf805e332e2..27833e4dae2a 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1204,10 +1204,11 @@ static void skd_complete_special(struct skd_device *skdev,
static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
uint cmd_in, ulong arg)
{
- int rc = 0;
+ static const int sg_version_num = 30527;
+ int rc = 0, timeout;
struct gendisk *disk = bdev->bd_disk;
struct skd_device *skdev = disk->private_data;
- void __user *p = (void *)arg;
+ int __user *p = (int __user *)arg;
pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
skdev->name, __func__, __LINE__,
@@ -1218,12 +1219,18 @@ static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd_in) {
case SG_SET_TIMEOUT:
+ rc = get_user(timeout, p);
+ if (!rc)
+ disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
+ break;
case SG_GET_TIMEOUT:
+ rc = jiffies_to_clock_t(disk->queue->sg_timeout);
+ break;
case SG_GET_VERSION_NUM:
- rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
+ rc = put_user(sg_version_num, p);
break;
case SG_IO:
- rc = skd_ioctl_sg_io(skdev, mode, p);
+ rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
break;
default:
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 0e93ad7b8511..c8e072caf56f 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
if (!crq)
return NULL;
- rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
+ rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL);
if (IS_ERR(rq)) {
spin_lock_irqsave(&host->lock, flags);
carm_put_request(host, crq);
@@ -620,7 +620,6 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
@@ -661,7 +660,6 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 10332c24f961..024b473524c0 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -52,11 +52,13 @@ struct virtio_blk {
};
struct virtblk_req {
- struct request *req;
- struct virtio_blk_outhdr out_hdr;
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+ struct scsi_request sreq; /* for SCSI passthrough, must be first */
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
struct virtio_scsi_inhdr in_hdr;
+#endif
+ struct virtio_blk_outhdr out_hdr;
u8 status;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
struct scatterlist sg[];
};
@@ -72,28 +74,88 @@ static inline int virtblk_result(struct virtblk_req *vbr)
}
}
-static int __virtblk_add_req(struct virtqueue *vq,
- struct virtblk_req *vbr,
- struct scatterlist *data_sg,
- bool have_data)
+/*
+ * If this is a packet command we need a couple of additional headers. Behind
+ * the normal outhdr we put a segment with the scsi command block, and before
+ * the normal inhdr we put the sense data and the inhdr with additional status
+ * information.
+ */
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
+ struct scatterlist *data_sg, bool have_data)
{
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0;
- __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
+ sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
+ sgs[num_out++] = &cmd;
+
+ if (have_data) {
+ if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
+ sgs[num_out++] = data_sg;
+ else
+ sgs[num_out + num_in++] = data_sg;
+ }
+
+ sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
+ sgs[num_out + num_in++] = &sense;
+ sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
+ sgs[num_out + num_in++] = &inhdr;
+ sg_init_one(&status, &vbr->status, sizeof(vbr->status));
+ sgs[num_out + num_in++] = &status;
+
+ return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
+}
+
+static inline void virtblk_scsi_reques_done(struct request *req)
+{
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ struct virtio_blk *vblk = req->q->queuedata;
+ struct scsi_request *sreq = &vbr->sreq;
+
+ sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
+ sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
+ req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
+}
+
+static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long data)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct virtio_blk *vblk = disk->private_data;
/*
- * If this is a packet command we need a couple of additional headers.
- * Behind the normal outhdr we put a segment with the scsi command
- * block, and before the normal inhdr we put the sense data and the
- * inhdr with additional status information.
+ * Only allow the generic SCSI ioctls if the host can support it.
*/
- if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
- sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
- sgs[num_out++] = &cmd;
- }
+ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ return -ENOTTY;
+
+ return scsi_cmd_blk_ioctl(bdev, mode, cmd,
+ (void __user *)data);
+}
+#else
+static inline int virtblk_add_req_scsi(struct virtqueue *vq,
+ struct virtblk_req *vbr, struct scatterlist *data_sg,
+ bool have_data)
+{
+ return -EIO;
+}
+static inline void virtblk_scsi_reques_done(struct request *req)
+{
+}
+#define virtblk_ioctl NULL
+#endif /* CONFIG_VIRTIO_BLK_SCSI */
+
+static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
+ struct scatterlist *data_sg, bool have_data)
+{
+ struct scatterlist hdr, status, *sgs[3];
+ unsigned int num_out = 0, num_in = 0;
+
+ sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
+ sgs[num_out++] = &hdr;
if (have_data) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
@@ -102,14 +164,6 @@ static int __virtblk_add_req(struct virtqueue *vq,
sgs[num_out + num_in++] = data_sg;
}
- if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
- memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
- sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
- sgs[num_out + num_in++] = &sense;
- sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
- sgs[num_out + num_in++] = &inhdr;
- }
-
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
sgs[num_out + num_in++] = &status;
@@ -119,15 +173,16 @@ static int __virtblk_add_req(struct virtqueue *vq,
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- struct virtio_blk *vblk = req->q->queuedata;
int error = virtblk_result(vbr);
- if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
- req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
- req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
- req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
- } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
+ switch (req_op(req)) {
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ virtblk_scsi_reques_done(req);
+ break;
+ case REQ_OP_DRV_IN:
req->errors = (error != 0);
+ break;
}
blk_mq_end_request(req, error);
@@ -146,7 +201,9 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
- blk_mq_complete_request(vbr->req, vbr->req->errors);
+ struct request *req = blk_mq_rq_from_pdu(vbr);
+
+ blk_mq_complete_request(req, req->errors);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -170,49 +227,50 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
int qid = hctx->queue_num;
int err;
bool notify = false;
+ u32 type;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
- vbr->req = req;
- if (req_op(req) == REQ_OP_FLUSH) {
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- } else {
- switch (req->cmd_type) {
- case REQ_TYPE_FS:
- vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- break;
- case REQ_TYPE_BLOCK_PC:
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- break;
- case REQ_TYPE_DRV_PRIV:
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
- break;
- default:
- /* We don't put anything else in the queue. */
- BUG();
- }
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ type = 0;
+ break;
+ case REQ_OP_FLUSH:
+ type = VIRTIO_BLK_T_FLUSH;
+ break;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ type = VIRTIO_BLK_T_SCSI_CMD;
+ break;
+ case REQ_OP_DRV_IN:
+ type = VIRTIO_BLK_T_GET_ID;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_MQ_RQ_QUEUE_ERROR;
}
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
+ vbr->out_hdr.sector = type ?
+ 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
+
blk_mq_start_request(req);
- num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
+ num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
if (num) {
- if (rq_data_dir(vbr->req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
else
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+ if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
+ err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+ else
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx);
@@ -242,10 +300,9 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct request *req;
int err;
- req = blk_get_request(q, READ, GFP_KERNEL);
+ req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
if (IS_ERR(req))
return PTR_ERR(req);
- req->cmd_type = REQ_TYPE_DRV_PRIV;
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
@@ -257,22 +314,6 @@ out:
return err;
}
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long data)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct virtio_blk *vblk = disk->private_data;
-
- /*
- * Only allow the generic SCSI ioctls if the host can support it.
- */
- if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
- return -ENOTTY;
-
- return scsi_cmd_blk_ioctl(bdev, mode, cmd,
- (void __user *)data);
-}
-
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
@@ -538,6 +579,9 @@ static int virtblk_init_request(void *data, struct request *rq,
struct virtio_blk *vblk = data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+ vbr->sreq.sense = vbr->sense;
+#endif
sg_init_table(vbr->sg, vblk->sg_elems);
return 0;
}
@@ -770,7 +814,7 @@ static void virtblk_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
+ refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
put_disk(vblk->disk);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@@ -821,7 +865,10 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
- VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
+ VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+ VIRTIO_BLK_F_SCSI,
+#endif
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 415e79b69d34..8fe61b5dc5a6 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -38,8 +38,8 @@ struct backend_info {
static struct kmem_cache *xen_blkif_cachep;
static void connect(struct backend_info *);
static int connect_ring(struct backend_info *);
-static void backend_changed(struct xenbus_watch *, const char **,
- unsigned int);
+static void backend_changed(struct xenbus_watch *, const char *,
+ const char *);
static void xen_blkif_free(struct xen_blkif *blkif);
static void xen_vbd_free(struct xen_vbd *vbd);
@@ -661,7 +661,7 @@ fail:
* ready, connect.
*/
static void backend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
int err;
unsigned major;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b2bdfa81f929..5067a0a952cb 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -197,13 +197,13 @@ struct blkfront_info
/* Number of pages per ring buffer. */
unsigned int nr_ring_pages;
struct request_queue *rq;
- unsigned int feature_flush;
- unsigned int feature_fua;
+ unsigned int feature_flush:1;
+ unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ unsigned int feature_persistent:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned int feature_persistent:1;
/* Number of 4KB segments handled */
unsigned int max_indirect_segments;
int is_ready;
@@ -865,7 +865,7 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
- return ((req->cmd_type != REQ_TYPE_FS) ||
+ return (blk_rq_is_passthrough(req) ||
((req_op(req) == REQ_OP_FLUSH) &&
!info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
}
else
grants = info->max_indirect_segments;
- psegs = grants / GRANTS_PER_PSEG;
+ psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
err = fill_grant_buffer(rinfo,
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
blkfront_setup_discard(info);
info->feature_persistent =
- xenbus_read_unsigned(info->xbdev->otherend,
- "feature-persistent", 0);
+ !!xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ if (indirect_segments > xen_blkif_max_segments)
+ indirect_segments = xen_blkif_max_segments;
+ if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ indirect_segments = 0;
+ info->max_indirect_segments = indirect_segments;
}
/*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
+ if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c4328d9d9981..757dce2147e0 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -468,7 +468,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
struct request *req;
while ((req = blk_peek_request(q)) != NULL) {
- if (req->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(req))
break;
blk_start_request(req);
__blk_end_request_all(req, -EIO);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e5ab7d9e8c45..3cd7856156b4 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -117,7 +117,7 @@ static void zram_revalidate_disk(struct zram *zram)
{
revalidate_disk(zram->disk);
/* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
- zram->disk->queue->backing_dev_info.capabilities |=
+ zram->disk->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
}
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 3cc9bff9d99d..c2c14a12713b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -344,7 +344,7 @@ config BT_WILINK
config BT_QCOMSMD
tristate "Qualcomm SMD based HCI support"
- depends on QCOM_SMD && QCOM_WCNSS_CTRL
+ depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
select BT_QCA
help
Qualcomm SMD based HCI driver.
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fadba88745dc..b793853ff05f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x04CA, 0x3014) },
+ { USB_DEVICE(0x04CA, 0x3018) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index fdb44829ab6f..ba3dd2eafc09 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -178,6 +178,9 @@ static int btbcm_reset(struct hci_dev *hdev)
}
kfree_skb(skb);
+ /* 100 msec delay for module to complete reset process */
+ msleep(100);
+
return 0;
}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index e6a85f0e6309..c38cb5b91291 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -502,7 +502,7 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv,
ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
BT_CAL_HDR_LEN + len);
if (ret)
- BT_ERR("Failed to download caibration data");
+ BT_ERR("Failed to download calibration data");
return 0;
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index d02f2c14df32..08e01f002bad 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -97,11 +97,11 @@ static int btmrvl_sdio_probe_of(struct device *dev,
cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0);
if (!cfg->irq_bt) {
dev_err(dev, "fail to parse irq_bt from device tree");
+ cfg->irq_bt = -1;
} else {
ret = devm_request_irq(dev, cfg->irq_bt,
btmrvl_wake_irq_bt,
- IRQF_TRIGGER_LOW,
- "bt_wake", cfg);
+ 0, "bt_wake", cfg);
if (ret) {
dev_err(dev,
"Failed to request irq_bt %d (%d)\n",
@@ -1624,7 +1624,7 @@ static int btmrvl_sdio_suspend(struct device *dev)
if (priv->adapter->hs_state != HS_ACTIVATED) {
if (btmrvl_enable_hs(priv)) {
- BT_ERR("HS not actived, suspend failed!");
+ BT_ERR("HS not activated, suspend failed!");
priv->adapter->is_suspending = false;
return -EBUSY;
}
@@ -1682,8 +1682,12 @@ static int btmrvl_sdio_resume(struct device *dev)
/* Disable platform specific wakeup interrupt */
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
disable_irq_wake(card->plt_wake_cfg->irq_bt);
- if (!card->plt_wake_cfg->wake_by_bt)
- disable_irq(card->plt_wake_cfg->irq_bt);
+ disable_irq(card->plt_wake_cfg->irq_bt);
+ if (card->plt_wake_cfg->wake_by_bt)
+ /* Undo our disable, since interrupt handler already
+ * did this.
+ */
+ enable_irq(card->plt_wake_cfg->irq_bt);
}
return 0;
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 08c2c93887c1..8d4868af9bbd 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -165,6 +165,7 @@ static const struct of_device_id btqcomsmd_of_match[] = {
{ .compatible = "qcom,wcnss-bt", },
{ },
};
+MODULE_DEVICE_TABLE(of, btqcomsmd_of_match);
static struct platform_driver btqcomsmd_driver = {
.probe = btqcomsmd_probe,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2f633df9f4e6..1c8094ef3f22 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -24,6 +24,8 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/firmware.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -130,6 +132,10 @@ static const struct usb_device_id btusb_table[] = {
/* Broadcom BCM43142A0 (Foxconn/Lenovo) */
{ USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
+ /* Broadcom BCM920703 (HTC Vive) */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
+
/* Foxconn - Hon Hai */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
@@ -154,6 +160,10 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
+ /* Dell Computer - Broadcom based */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x413c, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
+
/* Toshiba Corp - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
@@ -209,6 +219,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -369,6 +380,7 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_BOOTING 9
#define BTUSB_RESET_RESUME 10
#define BTUSB_DIAG_RUNNING 11
+#define BTUSB_OOB_WAKE_ENABLED 12
struct btusb_data {
struct hci_dev *hdev;
@@ -416,6 +428,8 @@ struct btusb_data {
int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
int (*setup_on_usb)(struct hci_dev *hdev);
+
+ int oob_wake_irq; /* irq for out-of-band wake-on-bt */
};
static inline void btusb_free_frags(struct btusb_data *data)
@@ -2338,6 +2352,50 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
return 0;
}
+#ifdef CONFIG_PM
+/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
+static int marvell_config_oob_wake(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct device *dev = &data->udev->dev;
+ u16 pin, gap, opcode;
+ int ret;
+ u8 cmd[5];
+
+ /* Move on if no wakeup pin specified */
+ if (of_property_read_u16(dev->of_node, "marvell,wakeup-pin", &pin) ||
+ of_property_read_u16(dev->of_node, "marvell,wakeup-gap-ms", &gap))
+ return 0;
+
+ /* Vendor specific command to configure a GPIO as wake-up pin */
+ opcode = hci_opcode_pack(0x3F, 0x59);
+ cmd[0] = opcode & 0xFF;
+ cmd[1] = opcode >> 8;
+ cmd[2] = 2; /* length of parameters that follow */
+ cmd[3] = pin;
+ cmd[4] = gap; /* time in ms, for which wakeup pin should be asserted */
+
+ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
+ if (!skb) {
+ bt_dev_err(hdev, "%s: No memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+
+ ret = btusb_send_frame(hdev, skb);
+ if (ret) {
+ bt_dev_err(hdev, "%s: configuration failed\n", __func__);
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
const bdaddr_t *bdaddr)
{
@@ -2728,6 +2786,66 @@ static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
}
#endif
+#ifdef CONFIG_PM
+static irqreturn_t btusb_oob_wake_handler(int irq, void *priv)
+{
+ struct btusb_data *data = priv;
+
+ pm_wakeup_event(&data->udev->dev, 0);
+
+ /* Disable only if not already disabled (keep it balanced) */
+ if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) {
+ disable_irq_nosync(irq);
+ disable_irq_wake(irq);
+ }
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id btusb_match_table[] = {
+ { .compatible = "usb1286,204e" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, btusb_match_table);
+
+/* Use an oob wakeup pin? */
+static int btusb_config_oob_wake(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct device *dev = &data->udev->dev;
+ int irq, ret;
+
+ clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags);
+
+ if (!of_match_device(btusb_match_table, dev))
+ return 0;
+
+ /* Move on if no IRQ specified */
+ irq = of_irq_get_byname(dev->of_node, "wakeup");
+ if (irq <= 0) {
+ bt_dev_dbg(hdev, "%s: no OOB Wakeup IRQ in DT", __func__);
+ return 0;
+ }
+
+ ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
+ 0, "OOB Wake-on-BT", data);
+ if (ret) {
+ bt_dev_err(hdev, "%s: IRQ request failed", __func__);
+ return ret;
+ }
+
+ ret = device_init_wakeup(dev, true);
+ if (ret) {
+ bt_dev_err(hdev, "%s: failed to init_wakeup", __func__);
+ return ret;
+ }
+
+ data->oob_wake_irq = irq;
+ disable_irq(irq);
+ bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
+ return 0;
+}
+#endif
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2849,6 +2967,18 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
+#ifdef CONFIG_PM
+ err = btusb_config_oob_wake(hdev);
+ if (err)
+ goto out_free_dev;
+
+ /* Marvell devices may need a specific chip configuration */
+ if (id->driver_info & BTUSB_MARVELL && data->oob_wake_irq) {
+ err = marvell_config_oob_wake(hdev);
+ if (err)
+ goto out_free_dev;
+ }
+#endif
if (id->driver_info & BTUSB_CW6622)
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
@@ -2991,18 +3121,15 @@ static int btusb_probe(struct usb_interface *intf,
err = usb_set_interface(data->udev, 0, 0);
if (err < 0) {
BT_ERR("failed to set interface 0, alt 0 %d", err);
- hci_free_dev(hdev);
- return err;
+ goto out_free_dev;
}
}
if (data->isoc) {
err = usb_driver_claim_interface(&btusb_driver,
data->isoc, data);
- if (err < 0) {
- hci_free_dev(hdev);
- return err;
- }
+ if (err < 0)
+ goto out_free_dev;
}
#ifdef CONFIG_BT_HCIBTUSB_BCM
@@ -3016,14 +3143,16 @@ static int btusb_probe(struct usb_interface *intf,
#endif
err = hci_register_dev(hdev);
- if (err < 0) {
- hci_free_dev(hdev);
- return err;
- }
+ if (err < 0)
+ goto out_free_dev;
usb_set_intfdata(intf, data);
return 0;
+
+out_free_dev:
+ hci_free_dev(hdev);
+ return err;
}
static void btusb_disconnect(struct usb_interface *intf)
@@ -3062,6 +3191,9 @@ static void btusb_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&btusb_driver, data->isoc);
}
+ if (data->oob_wake_irq)
+ device_init_wakeup(&data->udev->dev, false);
+
hci_free_dev(hdev);
}
@@ -3090,6 +3222,12 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
+ if (data->oob_wake_irq && device_may_wakeup(&data->udev->dev)) {
+ set_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags);
+ enable_irq_wake(data->oob_wake_irq);
+ enable_irq(data->oob_wake_irq);
+ }
+
/* Optionally request a device reset on resume, but only when
* wakeups are disabled. If wakeups are enabled we assume the
* device will stay powered up throughout suspend.
@@ -3127,6 +3265,12 @@ static int btusb_resume(struct usb_interface *intf)
if (--data->suspend_count)
return 0;
+ /* Disable only if not already disabled (keep it balanced) */
+ if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) {
+ disable_irq(data->oob_wake_irq);
+ disable_irq_wake(data->oob_wake_irq);
+ }
+
if (!test_bit(HCI_RUNNING, &hdev->flags))
goto done;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8f6c23c20c52..5262a2077d7a 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -618,14 +618,25 @@ unlock:
}
#endif
-static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
-static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
-static const struct acpi_gpio_params host_wakeup_gpios = { 2, 0, false };
-
-static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
- { "device-wakeup-gpios", &device_wakeup_gpios, 1 },
- { "shutdown-gpios", &shutdown_gpios, 1 },
- { "host-wakeup-gpios", &host_wakeup_gpios, 1 },
+static const struct acpi_gpio_params int_last_device_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params int_last_shutdown_gpios = { 1, 0, false };
+static const struct acpi_gpio_params int_last_host_wakeup_gpios = { 2, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_int_last_gpios[] = {
+ { "device-wakeup-gpios", &int_last_device_wakeup_gpios, 1 },
+ { "shutdown-gpios", &int_last_shutdown_gpios, 1 },
+ { "host-wakeup-gpios", &int_last_host_wakeup_gpios, 1 },
+ { },
+};
+
+static const struct acpi_gpio_params int_first_host_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params int_first_device_wakeup_gpios = { 1, 0, false };
+static const struct acpi_gpio_params int_first_shutdown_gpios = { 2, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = {
+ { "device-wakeup-gpios", &int_first_device_wakeup_gpios, 1 },
+ { "shutdown-gpios", &int_first_shutdown_gpios, 1 },
+ { "host-wakeup-gpios", &int_first_host_wakeup_gpios, 1 },
{ },
};
@@ -692,12 +703,19 @@ static int bcm_acpi_probe(struct bcm_device *dev)
struct platform_device *pdev = dev->pdev;
LIST_HEAD(resources);
const struct dmi_system_id *dmi_id;
+ const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios;
+ const struct acpi_device_id *id;
int ret;
- /* Retrieve GPIO data */
dev->name = dev_name(&pdev->dev);
+
+ /* Retrieve GPIO data */
+ id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+ if (id)
+ gpio_mapping = (const struct acpi_gpio_mapping *) id->driver_data;
+
ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
- acpi_bcm_default_gpios);
+ gpio_mapping);
if (ret)
return ret;
@@ -822,20 +840,22 @@ static const struct hci_uart_proto bcm_proto = {
#ifdef CONFIG_ACPI
static const struct acpi_device_id bcm_acpi_match[] = {
- { "BCM2E1A", 0 },
- { "BCM2E39", 0 },
- { "BCM2E3A", 0 },
- { "BCM2E3D", 0 },
- { "BCM2E3F", 0 },
- { "BCM2E40", 0 },
- { "BCM2E54", 0 },
- { "BCM2E55", 0 },
- { "BCM2E64", 0 },
- { "BCM2E65", 0 },
- { "BCM2E67", 0 },
- { "BCM2E71", 0 },
- { "BCM2E7B", 0 },
- { "BCM2E7C", 0 },
+ { "BCM2E1A", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E39", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E3A", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E3D", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E3F", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E40", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E54", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E55", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E64", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E95", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
+ { "BCM2E96", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
{ },
};
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 05c230719a47..f242dfd0c2e2 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -335,7 +335,7 @@ static void hci_ibs_tx_idle_timeout(unsigned long arg)
/* Fall through */
default:
- BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+ BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break;
}
@@ -373,7 +373,7 @@ static void hci_ibs_wake_retrans_timeout(unsigned long arg)
/* Fall through */
default:
- BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+ BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 59cca72647a6..87739649eac2 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -281,8 +281,8 @@
#include <linux/fcntl.h>
#include <linux/blkdev.h>
#include <linux/times.h>
-
#include <linux/uaccess.h>
+#include <scsi/scsi_request.h>
/* used to tell the module to turn on full debugging messages */
static bool debug;
@@ -342,8 +342,8 @@ static void cdrom_sysctl_register(void);
static LIST_HEAD(cdrom_list);
-static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
- struct packet_command *cgc)
+int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
+ struct packet_command *cgc)
{
if (cgc->sense) {
cgc->sense->sense_key = 0x05;
@@ -354,6 +354,7 @@ static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
cgc->stat = -EIO;
return -EIO;
}
+EXPORT_SYMBOL(cdrom_dummy_generic_packet);
static int cdrom_flush_cache(struct cdrom_device_info *cdi)
{
@@ -371,7 +372,7 @@ static int cdrom_flush_cache(struct cdrom_device_info *cdi)
static int cdrom_get_disc_info(struct cdrom_device_info *cdi,
disc_information *di)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
int ret, buflen;
@@ -586,7 +587,7 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
int register_cdrom(struct cdrom_device_info *cdi)
{
static char banner_printed;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
int *change_capability = (int *)&cdo->capability; /* hack */
cd_dbg(CD_OPEN, "entering register_cdrom\n");
@@ -610,7 +611,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
ENSURE(reset, CDC_RESET);
ENSURE(generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
- cdo->n_minors = 0;
cdi->options = CDO_USE_FFLAGS;
if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
@@ -630,8 +630,7 @@ int register_cdrom(struct cdrom_device_info *cdi)
else
cdi->cdda_method = CDDA_OLD;
- if (!cdo->generic_packet)
- cdo->generic_packet = cdrom_dummy_generic_packet;
+ WARN_ON(!cdo->generic_packet);
cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
mutex_lock(&cdrom_mutex);
@@ -652,7 +651,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
if (cdi->exit)
cdi->exit(cdi);
- cdi->ops->n_minors--;
cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
@@ -1036,7 +1034,7 @@ static
int open_for_data(struct cdrom_device_info *cdi)
{
int ret;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
tracktype tracks;
cd_dbg(CD_OPEN, "entering open_for_data\n");
/* Check if the driver can report drive status. If it can, we
@@ -1198,8 +1196,8 @@ err:
/* This code is similar to that in open_for_data. The routine is called
whenever an audio play operation is requested.
*/
-static int check_for_audio_disc(struct cdrom_device_info * cdi,
- struct cdrom_device_ops * cdo)
+static int check_for_audio_disc(struct cdrom_device_info *cdi,
+ const struct cdrom_device_ops *cdo)
{
int ret;
tracktype tracks;
@@ -1254,7 +1252,7 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi,
void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
int opened_for_data;
cd_dbg(CD_CLOSE, "entering cdrom_release\n");
@@ -1294,7 +1292,7 @@ static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
struct cdrom_changer_info *buf)
{
struct packet_command cgc;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
int length;
/*
@@ -1643,7 +1641,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
int ret;
u_char buf[20];
struct packet_command cgc;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
rpc_state_t rpc_state;
memset(buf, 0, sizeof(buf));
@@ -1791,7 +1789,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
{
unsigned char buf[21], *base;
struct dvd_layer *layer;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
int ret, layer_num = s->physical.layer_num;
if (layer_num >= DVD_LAYERS)
@@ -1842,7 +1840,7 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
{
int ret;
u_char buf[8];
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
@@ -1866,7 +1864,7 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
{
int ret, size;
u_char *buf;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->disckey.value) + 4;
@@ -1894,7 +1892,7 @@ static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
{
int ret, size = 4 + 188;
u_char *buf;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
@@ -1928,7 +1926,7 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
{
int ret = 0, size;
u_char *buf;
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->manufact.value) + 4;
@@ -1995,7 +1993,7 @@ int cdrom_mode_sense(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int page_code, int page_control)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
memset(cgc->cmd, 0, sizeof(cgc->cmd));
@@ -2010,7 +2008,7 @@ int cdrom_mode_sense(struct cdrom_device_info *cdi,
int cdrom_mode_select(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
memset(cgc->cmd, 0, sizeof(cgc->cmd));
memset(cgc->buffer, 0, 2);
@@ -2025,7 +2023,7 @@ int cdrom_mode_select(struct cdrom_device_info *cdi,
static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
struct cdrom_subchnl *subchnl, int mcn)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
char buffer[32];
int ret;
@@ -2073,7 +2071,7 @@ static int cdrom_read_cd(struct cdrom_device_info *cdi,
struct packet_command *cgc, int lba,
int blocksize, int nblocks)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
memset(&cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_READ_10;
@@ -2093,7 +2091,7 @@ static int cdrom_read_block(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int lba, int nblocks, int format, int blksize)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
memset(&cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_READ_CD;
@@ -2172,6 +2170,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
{
struct request_queue *q = cdi->disk->queue;
struct request *rq;
+ struct scsi_request *req;
struct bio *bio;
unsigned int len;
int nr, ret = 0;
@@ -2190,12 +2189,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
- rq = blk_get_request(q, READ, GFP_KERNEL);
+ rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
break;
}
- blk_rq_set_block_pc(rq);
+ req = scsi_req(rq);
+ scsi_req_init(rq);
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret) {
@@ -2203,23 +2203,23 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
break;
}
- rq->cmd[0] = GPCMD_READ_CD;
- rq->cmd[1] = 1 << 2;
- rq->cmd[2] = (lba >> 24) & 0xff;
- rq->cmd[3] = (lba >> 16) & 0xff;
- rq->cmd[4] = (lba >> 8) & 0xff;
- rq->cmd[5] = lba & 0xff;
- rq->cmd[6] = (nr >> 16) & 0xff;
- rq->cmd[7] = (nr >> 8) & 0xff;
- rq->cmd[8] = nr & 0xff;
- rq->cmd[9] = 0xf8;
-
- rq->cmd_len = 12;
+ req->cmd[0] = GPCMD_READ_CD;
+ req->cmd[1] = 1 << 2;
+ req->cmd[2] = (lba >> 24) & 0xff;
+ req->cmd[3] = (lba >> 16) & 0xff;
+ req->cmd[4] = (lba >> 8) & 0xff;
+ req->cmd[5] = lba & 0xff;
+ req->cmd[6] = (nr >> 16) & 0xff;
+ req->cmd[7] = (nr >> 8) & 0xff;
+ req->cmd[8] = nr & 0xff;
+ req->cmd[9] = 0xf8;
+
+ req->cmd_len = 12;
rq->timeout = 60 * HZ;
bio = rq->bio;
if (blk_execute_rq(q, cdi->disk, rq, 0)) {
- struct request_sense *s = rq->sense;
+ struct request_sense *s = req->sense;
ret = -EIO;
cdi->last_sense = s->sense_key;
}
@@ -2764,7 +2764,7 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
*/
static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
struct modesel_head mh;
@@ -2790,7 +2790,7 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
static int cdrom_get_track_info(struct cdrom_device_info *cdi,
__u16 track, __u8 type, track_information *ti)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
int ret, buflen;
@@ -3049,7 +3049,7 @@ static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_msf msf;
cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf)))
@@ -3069,7 +3069,7 @@ static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
void __user *arg,
struct packet_command *cgc)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
struct cdrom_blk blk;
cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
if (copy_from_user(&blk, (struct cdrom_blk __user *)arg, sizeof(blk)))
@@ -3164,7 +3164,7 @@ static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
cd_dbg(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
cgc->cmd[0] = GPCMD_START_STOP_UNIT;
cgc->cmd[1] = 1;
@@ -3177,7 +3177,7 @@ static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
- struct cdrom_device_ops *cdo = cdi->ops;
+ const struct cdrom_device_ops *cdo = cdi->ops;
cd_dbg(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
cgc->cmd[0] = GPCMD_PAUSE_RESUME;
cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 584bc3126403..1372763a948f 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -481,7 +481,7 @@ static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
return -EINVAL;
}
-static struct cdrom_device_ops gdrom_ops = {
+static const struct cdrom_device_ops gdrom_ops = {
.open = gdrom_open,
.release = gdrom_release,
.drive_status = gdrom_drivestatus,
@@ -489,9 +489,9 @@ static struct cdrom_device_ops gdrom_ops = {
.get_last_session = gdrom_get_last_session,
.reset = gdrom_hardreset,
.audio_ioctl = gdrom_audio_ioctl,
+ .generic_packet = cdrom_dummy_generic_packet,
.capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
- .n_minors = 1,
};
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
@@ -659,23 +659,24 @@ static void gdrom_request(struct request_queue *rq)
struct request *req;
while ((req = blk_fetch_request(rq)) != NULL) {
- if (req->cmd_type != REQ_TYPE_FS) {
- printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
- __blk_end_request_all(req, -EIO);
- continue;
- }
- if (rq_data_dir(req) != READ) {
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ /*
+ * Add to list of deferred work and then schedule
+ * workqueue.
+ */
+ list_add_tail(&req->queuelist, &gdrom_deferred);
+ schedule_work(&work);
+ break;
+ case REQ_OP_WRITE:
pr_notice("Read only device - write request ignored\n");
__blk_end_request_all(req, -EIO);
- continue;
+ break;
+ default:
+ printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
+ __blk_end_request_all(req, -EIO);
+ break;
}
-
- /*
- * Add to list of deferred work and then schedule
- * workqueue.
- */
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
}
}
@@ -807,16 +808,20 @@ static int probe_gdrom(struct platform_device *devptr)
if (err)
goto probe_fail_cmdirq_register;
gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
- if (!gd.gdrom_rq)
+ if (!gd.gdrom_rq) {
+ err = -ENOMEM;
goto probe_fail_requestq;
+ }
err = probe_gdrom_setupqueue();
if (err)
goto probe_fail_toc;
gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
- if (!gd.toc)
+ if (!gd.toc) {
+ err = -ENOMEM;
goto probe_fail_toc;
+ }
add_disk(gd.disk);
return 0;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 6ce5ce8be2f2..87fba424817e 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -92,7 +92,6 @@ static void add_early_randomness(struct hwrng *rng)
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);
- memset(rng_buffer, 0, size);
}
static inline void cleanup_rng(struct kref *kref)
@@ -288,7 +287,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
}
out:
- memset(rng_buffer, 0, rng_buffer_size());
return ret ? : err;
out_unlock_reading:
@@ -427,7 +425,6 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
- memset(rng_fillbuf, 0, rng_buffer_size());
}
hwrng_fill = NULL;
return 0;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 277186d3b668..af985cca413c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -6,6 +6,7 @@ menuconfig TCG_TPM
tristate "TPM Hardware Support"
depends on HAS_IOMEM
select SECURITYFS
+ select CRYPTO_HASH_INFO
---help---
If you have a TPM security chip in your system, which
implements the Trusted Computing Group's specification,
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a05b1ebd0b26..3d386a8c579f 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_TCG_TPM) += tpm.o
tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
- tpm_eventlog.o
+ tpm1_eventlog.o tpm2_eventlog.o
tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
tpm-$(CONFIG_OF) += tpm_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 6f060c76217b..e8e0f7c02686 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/miscdevice.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/wait.h>
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index a77262d31911..c406343848da 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -141,7 +141,7 @@ static void tpm_dev_release(struct device *dev)
* Allocates a new struct tpm_chip instance and assigns a free
* device number for it. Must be paired with put_device(&chip->dev).
*/
-struct tpm_chip *tpm_chip_alloc(struct device *dev,
+struct tpm_chip *tpm_chip_alloc(struct device *pdev,
const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
@@ -160,7 +160,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *dev,
rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
mutex_unlock(&idr_lock);
if (rc < 0) {
- dev_err(dev, "No available tpm device numbers\n");
+ dev_err(pdev, "No available tpm device numbers\n");
kfree(chip);
return ERR_PTR(rc);
}
@@ -170,7 +170,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *dev,
chip->dev.class = tpm_class;
chip->dev.release = tpm_dev_release;
- chip->dev.parent = dev;
+ chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
if (chip->dev_num == 0)
@@ -182,7 +182,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *dev,
if (rc)
goto out;
- if (!dev)
+ if (!pdev)
chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
cdev_init(&chip->cdev, &tpm_fops);
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index 912ad30be585..02a8850d3a69 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -38,6 +38,9 @@ static void user_reader_timeout(unsigned long ptr)
{
struct file_priv *priv = (struct file_priv *)ptr;
+ pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
+ task_tgid_nr(current));
+
schedule_work(&priv->work);
}
@@ -157,7 +160,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
mutex_unlock(&priv->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
- mod_timer(&priv->user_read_timer, jiffies + (60 * HZ));
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
return in_size;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index a2688ac2b48f..bd2128e0b56c 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -47,7 +47,7 @@
static int tpm_suspend_pcr;
module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
MODULE_PARM_DESC(suspend_pcr,
- "PCR to use for dummy writes to faciltate flush on suspend.");
+ "PCR to use for dummy writes to facilitate flush on suspend.");
/*
* Array with one entry per ordinal defining the maximum amount
@@ -328,8 +328,17 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
-/*
- * Internal kernel interface to transmit TPM commands
+/**
+ * tmp_transmit - Internal kernel interface to transmit TPM commands.
+ *
+ * @chip: TPM chip to use
+ * @buf: TPM command buffer
+ * @bufsiz: length of the TPM command buffer
+ * @flags: tpm transmit flags - bitmap
+ *
+ * Return:
+ * 0 when the operation is successful.
+ * A negative number for system errors (errno).
*/
ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
unsigned int flags)
@@ -409,31 +418,55 @@ out:
return rc;
}
-#define TPM_DIGEST_SIZE 20
-#define TPM_RET_CODE_IDX 6
-
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
- int len, unsigned int flags, const char *desc)
+/**
+ * tmp_transmit_cmd - send a tpm command to the device
+ * The function extracts tpm out header return code
+ *
+ * @chip: TPM chip to use
+ * @buf: TPM command buffer
+ * @bufsiz: length of the buffer
+ * @min_rsp_body_length: minimum expected length of response body
+ * @flags: tpm transmit flags - bitmap
+ * @desc: command description used in the error message
+ *
+ * Return:
+ * 0 when the operation is successful.
+ * A negative number for system errors (errno).
+ * A positive number for a TPM error.
+ */
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *buf,
+ size_t bufsiz, size_t min_rsp_body_length,
+ unsigned int flags, const char *desc)
{
const struct tpm_output_header *header;
int err;
+ ssize_t len;
- len = tpm_transmit(chip, (const u8 *)cmd, len, flags);
+ len = tpm_transmit(chip, (const u8 *)buf, bufsiz, flags);
if (len < 0)
return len;
else if (len < TPM_HEADER_SIZE)
return -EFAULT;
- header = cmd;
+ header = buf;
+ if (len != be32_to_cpu(header->length))
+ return -EFAULT;
err = be32_to_cpu(header->return_code);
if (err != 0 && desc)
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
+ if (err)
+ return err;
+
+ if (len < min_rsp_body_length + TPM_HEADER_SIZE)
+ return -EFAULT;
- return err;
+ return 0;
}
+#define TPM_DIGEST_SIZE 20
+#define TPM_RET_CODE_IDX 6
#define TPM_INTERNAL_RESULT_SIZE 200
#define TPM_ORD_GET_CAP cpu_to_be32(101)
#define TPM_ORD_GET_RANDOM cpu_to_be32(70)
@@ -445,7 +478,7 @@ static const struct tpm_input_header tpm_getcap_header = {
};
ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
- const char *desc)
+ const char *desc, size_t min_cap_length)
{
struct tpm_cmd_t tpm_cmd;
int rc;
@@ -468,8 +501,8 @@ ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id);
}
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- desc);
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ min_cap_length, 0, desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
@@ -493,14 +526,13 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
start_cmd.params.startup_in.startup_type = startup_type;
return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- "attempting to start the TPM");
+ 0, "attempting to start the TPM");
}
int tpm_get_timeouts(struct tpm_chip *chip)
{
cap_t cap;
- unsigned long new_timeout[4];
- unsigned long old_timeout[4];
+ unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
ssize_t rc;
if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
@@ -523,8 +555,8 @@ int tpm_get_timeouts(struct tpm_chip *chip)
return 0;
}
- rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
- "attempting to determine the timeouts");
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
+ sizeof(cap.timeout));
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
@@ -533,16 +565,26 @@ int tpm_get_timeouts(struct tpm_chip *chip)
return rc;
rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
- "attempting to determine the timeouts");
+ "attempting to determine the timeouts",
+ sizeof(cap.timeout));
}
- if (rc)
+
+ if (rc) {
+ dev_err(&chip->dev,
+ "A TPM error (%zd) occurred attempting to determine the timeouts\n",
+ rc);
return rc;
+ }
- old_timeout[0] = be32_to_cpu(cap.timeout.a);
- old_timeout[1] = be32_to_cpu(cap.timeout.b);
- old_timeout[2] = be32_to_cpu(cap.timeout.c);
- old_timeout[3] = be32_to_cpu(cap.timeout.d);
- memcpy(new_timeout, old_timeout, sizeof(new_timeout));
+ timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
+ timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
+ timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
+ timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
+ timeout_chip[0] = be32_to_cpu(cap.timeout.a);
+ timeout_chip[1] = be32_to_cpu(cap.timeout.b);
+ timeout_chip[2] = be32_to_cpu(cap.timeout.c);
+ timeout_chip[3] = be32_to_cpu(cap.timeout.d);
+ memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
/*
* Provide ability for vendor overrides of timeout values in case
@@ -550,16 +592,24 @@ int tpm_get_timeouts(struct tpm_chip *chip)
*/
if (chip->ops->update_timeouts != NULL)
chip->timeout_adjusted =
- chip->ops->update_timeouts(chip, new_timeout);
+ chip->ops->update_timeouts(chip, timeout_eff);
if (!chip->timeout_adjusted) {
- /* Don't overwrite default if value is 0 */
- if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
- int i;
+ /* Restore default if chip reported 0 */
+ int i;
+ for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
+ if (timeout_eff[i])
+ continue;
+
+ timeout_eff[i] = timeout_old[i];
+ chip->timeout_adjusted = true;
+ }
+
+ if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
/* timeouts in msec rather usec */
- for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
- new_timeout[i] *= 1000;
+ for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
+ timeout_eff[i] *= 1000;
chip->timeout_adjusted = true;
}
}
@@ -568,19 +618,20 @@ int tpm_get_timeouts(struct tpm_chip *chip)
if (chip->timeout_adjusted) {
dev_info(&chip->dev,
HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
- old_timeout[0], new_timeout[0],
- old_timeout[1], new_timeout[1],
- old_timeout[2], new_timeout[2],
- old_timeout[3], new_timeout[3]);
+ timeout_chip[0], timeout_eff[0],
+ timeout_chip[1], timeout_eff[1],
+ timeout_chip[2], timeout_eff[2],
+ timeout_chip[3], timeout_eff[3]);
}
- chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
- chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
- chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
- chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
+ chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
+ chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
+ chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
+ chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
- "attempting to determine the durations");
+ "attempting to determine the durations",
+ sizeof(cap.duration));
if (rc)
return rc;
@@ -631,13 +682,14 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
struct tpm_cmd_t cmd;
cmd.header.in = continue_selftest_header;
- rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, 0,
+ rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, 0, 0,
"continue selftest");
return rc;
}
#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
#define READ_PCR_RESULT_SIZE 30
+#define READ_PCR_RESULT_BODY_SIZE 20
static const struct tpm_input_header pcrread_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(14),
@@ -651,7 +703,8 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
- rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, 0,
+ rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
+ READ_PCR_RESULT_BODY_SIZE, 0,
"attempting to read a pcr value");
if (rc == 0)
@@ -714,6 +767,7 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
#define EXTEND_PCR_RESULT_SIZE 34
+#define EXTEND_PCR_RESULT_BODY_SIZE 20
static const struct tpm_input_header pcrextend_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(34),
@@ -735,13 +789,25 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
struct tpm_cmd_t cmd;
int rc;
struct tpm_chip *chip;
+ struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
+ u32 count = 0;
+ int i;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- rc = tpm2_pcr_extend(chip, pcr_idx, hash);
+ memset(digest_list, 0, sizeof(digest_list));
+
+ for (i = 0; i < ARRAY_SIZE(chip->active_banks) &&
+ chip->active_banks[i] != TPM2_ALG_ERROR; i++) {
+ digest_list[i].alg_id = chip->active_banks[i];
+ memcpy(digest_list[i].digest, hash, TPM_DIGEST_SIZE);
+ count++;
+ }
+
+ rc = tpm2_pcr_extend(chip, pcr_idx, count, digest_list);
tpm_put_ops(chip);
return rc;
}
@@ -749,7 +815,8 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ EXTEND_PCR_RESULT_BODY_SIZE, 0,
"attempting extend a PCR value");
tpm_put_ops(chip);
@@ -853,7 +920,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
if (chip == NULL)
return -ENODEV;
- rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd");
+ rc = tpm_transmit_cmd(chip, cmd, buflen, 0, 0, "attempting tpm_cmd");
tpm_put_ops(chip);
return rc;
@@ -955,7 +1022,8 @@ int tpm_pm_suspend(struct device *dev)
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ EXTEND_PCR_RESULT_BODY_SIZE, 0,
"extending dummy pcr before suspend");
}
@@ -963,7 +1031,7 @@ int tpm_pm_suspend(struct device *dev)
for (try = 0; try < TPM_RETRY; try++) {
cmd.header.in = savestate_header;
rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, 0,
- NULL);
+ 0, NULL);
/*
* If the TPM indicates that it is too busy to respond to
@@ -1025,7 +1093,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
{
struct tpm_chip *chip;
struct tpm_cmd_t tpm_cmd;
- u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
+ u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA), rlength;
int err, total = 0, retries = 5;
u8 *dest = out;
@@ -1048,11 +1116,20 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
err = tpm_transmit_cmd(chip, &tpm_cmd,
TPM_GETRANDOM_RESULT_SIZE + num_bytes,
+ offsetof(struct tpm_getrandom_out,
+ rng_data),
0, "attempting get random");
if (err)
break;
recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+
+ rlength = be32_to_cpu(tpm_cmd.header.out.length);
+ if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
+ recd) {
+ total = -EFAULT;
+ break;
+ }
memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
dest += recd;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 848ad6580b46..2f596d74f80c 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -21,6 +21,7 @@
#include "tpm.h"
#define READ_PUBEK_RESULT_SIZE 314
+#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
#define TPM_ORD_READPUBEK cpu_to_be32(124)
static const struct tpm_input_header tpm_readpubek_header = {
.tag = TPM_TAG_RQU_COMMAND,
@@ -39,7 +40,8 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
struct tpm_chip *chip = to_tpm_chip(dev);
tpm_cmd.header.in = tpm_readpubek_header;
- err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0,
+ err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
"attempting to read the PUBEK");
if (err)
goto out;
@@ -95,7 +97,8 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
struct tpm_chip *chip = to_tpm_chip(dev);
rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
- "attempting to determine the number of PCRS");
+ "attempting to determine the number of PCRS",
+ sizeof(cap.num_pcrs));
if (rc)
return 0;
@@ -120,7 +123,8 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent enabled state");
+ "attempting to determine the permanent enabled state",
+ sizeof(cap.perm_flags));
if (rc)
return 0;
@@ -136,7 +140,8 @@ static ssize_t active_show(struct device *dev, struct device_attribute *attr,
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent active state");
+ "attempting to determine the permanent active state",
+ sizeof(cap.perm_flags));
if (rc)
return 0;
@@ -152,7 +157,8 @@ static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
- "attempting to determine the owner state");
+ "attempting to determine the owner state",
+ sizeof(cap.owned));
if (rc)
return 0;
@@ -168,7 +174,8 @@ static ssize_t temp_deactivated_show(struct device *dev,
ssize_t rc;
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
- "attempting to determine the temporary state");
+ "attempting to determine the temporary state",
+ sizeof(cap.stclear_flags));
if (rc)
return 0;
@@ -186,7 +193,8 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *str = buf;
rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
- "attempting to determine the manufacturer");
+ "attempting to determine the manufacturer",
+ sizeof(cap.manufacturer_id));
if (rc)
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
@@ -194,7 +202,8 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
- "attempting to determine the 1.2 version");
+ "attempting to determine the 1.2 version",
+ sizeof(cap.tpm_version_1_2));
if (!rc) {
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
@@ -205,7 +214,8 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
} else {
/* Otherwise just use TPM_STRUCT_VER */
rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version");
+ "attempting to determine the 1.1 version",
+ sizeof(cap.tpm_version));
if (rc)
return 0;
str += sprintf(str,
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 1ae976894257..4937b56a275c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -34,8 +34,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/highmem.h>
-
-#include "tpm_eventlog.h"
+#include <crypto/hash_info.h>
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
@@ -97,6 +96,7 @@ enum tpm2_return_codes {
};
enum tpm2_algorithms {
+ TPM2_ALG_ERROR = 0x0000,
TPM2_ALG_SHA1 = 0x0004,
TPM2_ALG_KEYEDHASH = 0x0008,
TPM2_ALG_SHA256 = 0x000B,
@@ -127,6 +127,7 @@ enum tpm2_permanent_handles {
};
enum tpm2_capabilities {
+ TPM2_CAP_PCRS = 5,
TPM2_CAP_TPM_PROPERTIES = 6,
};
@@ -148,6 +149,11 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
};
+struct tpm_bios_log {
+ void *bios_event_log;
+ void *bios_event_log_end;
+};
+
struct tpm_chip_seqops {
struct tpm_chip *chip;
const struct seq_operations *seqops;
@@ -187,6 +193,8 @@ struct tpm_chip {
const struct attribute_group *groups[3];
unsigned int groups_cnt;
+
+ u16 active_banks[7];
#ifdef CONFIG_ACPI
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
@@ -195,17 +203,6 @@ struct tpm_chip {
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-static inline int tpm_read_index(int base, int index)
-{
- outb(index, base);
- return inb(base+1) & 0xFF;
-}
-
-static inline void tpm_write_index(int base, int index, int value)
-{
- outb(index, base);
- outb(value & 0xFF, base+1);
-}
struct tpm_input_header {
__be16 tag;
__be32 length;
@@ -284,7 +281,7 @@ struct permanent_flags_t {
typedef union {
struct permanent_flags_t perm_flags;
struct stclear_flags_t stclear_flags;
- bool owned;
+ __u8 owned;
__be32 num_pcrs;
struct tpm_version_t tpm_version;
struct tpm_version_1_2_t tpm_version_1_2;
@@ -387,6 +384,11 @@ struct tpm_cmd_t {
tpm_cmd_params params;
} __packed;
+struct tpm2_digest {
+ u16 alg_id;
+ u8 digest[SHA512_DIGEST_SIZE];
+} __packed;
+
/* A string buffer type for constructing TPM commands. This is based on the
* ideas of string buffer code in security/keys/trusted.h but is heap based
* in order to keep the stack usage minimal.
@@ -493,10 +495,11 @@ enum tpm_transmit_flags {
ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
unsigned int flags);
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
- unsigned int flags, const char *desc);
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *buf, size_t bufsiz,
+ size_t min_rsp_body_len, unsigned int flags,
+ const char *desc);
ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
- const char *desc);
+ const char *desc, size_t min_cap_length);
int tpm_get_timeouts(struct tpm_chip *);
int tpm1_auto_startup(struct tpm_chip *chip);
int tpm_do_selftest(struct tpm_chip *chip);
@@ -529,8 +532,14 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
+static inline inline u32 tpm2_rc_value(u32 rc)
+{
+ return (rc & BIT(7)) ? rc & 0xff : rc;
+}
+
int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
-int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
+int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
+ struct tpm2_digest *digests);
int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max);
int tpm2_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm1_eventlog.c
index 11bb1138a828..9a8605e500b5 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm1_eventlog.c
@@ -390,9 +390,6 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
unsigned int cnt;
int rc = 0;
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return 0;
-
rc = tpm_read_log(chip);
if (rc)
return rc;
@@ -407,7 +404,13 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
chip->bin_log_seqops.chip = chip;
- chip->bin_log_seqops.seqops = &tpm_binary_b_measurements_seqops;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ chip->bin_log_seqops.seqops =
+ &tpm2_binary_b_measurements_seqops;
+ else
+ chip->bin_log_seqops.seqops =
+ &tpm_binary_b_measurements_seqops;
+
chip->bios_dir[cnt] =
securityfs_create_file("binary_bios_measurements",
@@ -418,17 +421,21 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
goto err;
cnt++;
- chip->ascii_log_seqops.chip = chip;
- chip->ascii_log_seqops.seqops = &tpm_ascii_b_measurements_seqops;
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- chip->bios_dir[cnt] =
- securityfs_create_file("ascii_bios_measurements",
- 0440, chip->bios_dir[0],
- (void *)&chip->ascii_log_seqops,
- &tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops =
+ &tpm_ascii_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("ascii_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+ }
return 0;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index da5b782a9731..881aea9732bf 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -53,22 +53,6 @@ struct tpm2_pcr_read_out {
u8 digest[TPM_DIGEST_SIZE];
} __packed;
-struct tpm2_null_auth_area {
- __be32 handle;
- __be16 nonce_size;
- u8 attributes;
- __be16 auth_size;
-} __packed;
-
-struct tpm2_pcr_extend_in {
- __be32 pcr_idx;
- __be32 auth_area_size;
- struct tpm2_null_auth_area auth_area;
- __be32 digest_cnt;
- __be16 hash_alg;
- u8 digest[TPM_DIGEST_SIZE];
-} __packed;
-
struct tpm2_get_tpm_pt_in {
__be32 cap_id;
__be32 property_id;
@@ -97,7 +81,6 @@ union tpm2_cmd_params {
struct tpm2_self_test_in selftest_in;
struct tpm2_pcr_read_in pcrread_in;
struct tpm2_pcr_read_out pcrread_out;
- struct tpm2_pcr_extend_in pcrextend_in;
struct tpm2_get_tpm_pt_in get_tpm_pt_in;
struct tpm2_get_tpm_pt_out get_tpm_pt_out;
struct tpm2_get_random_in getrandom_in;
@@ -248,6 +231,9 @@ static const u8 tpm2_ordinal_duration[TPM2_CC_LAST - TPM2_CC_FIRST + 1] = {
(sizeof(struct tpm_input_header) + \
sizeof(struct tpm2_pcr_read_in))
+#define TPM2_PCR_READ_RESP_BODY_SIZE \
+ sizeof(struct tpm2_pcr_read_out)
+
static const struct tpm_input_header tpm2_pcrread_header = {
.tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
.length = cpu_to_be32(TPM2_PCR_READ_IN_SIZE),
@@ -258,11 +244,9 @@ static const struct tpm_input_header tpm2_pcrread_header = {
* tpm2_pcr_read() - read a PCR value
* @chip: TPM chip to use.
* @pcr_idx: index of the PCR to read.
- * @ref_buf: buffer to store the resulting hash,
+ * @res_buf: buffer to store the resulting hash.
*
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
+ * Return: Same as with tpm_transmit_cmd.
*/
int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
@@ -282,8 +266,9 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
sizeof(cmd.params.pcrread_in.pcr_select));
cmd.params.pcrread_in.pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
- "attempting to read a pcr value");
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ TPM2_PCR_READ_RESP_BODY_SIZE,
+ 0, "attempting to read a pcr value");
if (rc == 0) {
buf = cmd.params.pcrread_out.digest;
memcpy(res_buf, buf, TPM_DIGEST_SIZE);
@@ -292,50 +277,71 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
return rc;
}
-#define TPM2_GET_PCREXTEND_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_pcr_extend_in))
-
-static const struct tpm_input_header tpm2_pcrextend_header = {
- .tag = cpu_to_be16(TPM2_ST_SESSIONS),
- .length = cpu_to_be32(TPM2_GET_PCREXTEND_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_PCR_EXTEND)
-};
+struct tpm2_null_auth_area {
+ __be32 handle;
+ __be16 nonce_size;
+ u8 attributes;
+ __be16 auth_size;
+} __packed;
/**
* tpm2_pcr_extend() - extend a PCR value
+ *
* @chip: TPM chip to use.
* @pcr_idx: index of the PCR.
- * @hash: hash value to use for the extend operation.
+ * @count: number of digests passed.
+ * @digests: list of pcr banks and corresponding digest values to extend.
*
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
+ * Return: Same as with tpm_transmit_cmd.
*/
-int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
+int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
+ struct tpm2_digest *digests)
{
- struct tpm2_cmd cmd;
+ struct tpm_buf buf;
+ struct tpm2_null_auth_area auth_area;
int rc;
+ int i;
+ int j;
+
+ if (count > ARRAY_SIZE(chip->active_banks))
+ return -EINVAL;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, pcr_idx);
+
+ auth_area.handle = cpu_to_be32(TPM2_RS_PW);
+ auth_area.nonce_size = 0;
+ auth_area.attributes = 0;
+ auth_area.auth_size = 0;
+
+ tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area));
+ tpm_buf_append(&buf, (const unsigned char *)&auth_area,
+ sizeof(auth_area));
+ tpm_buf_append_u32(&buf, count);
+
+ for (i = 0; i < count; i++) {
+ for (j = 0; j < ARRAY_SIZE(tpm2_hash_map); j++) {
+ if (digests[i].alg_id != tpm2_hash_map[j].tpm_id)
+ continue;
+ tpm_buf_append_u16(&buf, digests[i].alg_id);
+ tpm_buf_append(&buf, (const unsigned char
+ *)&digests[i].digest,
+ hash_digest_size[tpm2_hash_map[j].crypto_id]);
+ }
+ }
- cmd.header.in = tpm2_pcrextend_header;
- cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
- cmd.params.pcrextend_in.auth_area_size =
- cpu_to_be32(sizeof(struct tpm2_null_auth_area));
- cmd.params.pcrextend_in.auth_area.handle =
- cpu_to_be32(TPM2_RS_PW);
- cmd.params.pcrextend_in.auth_area.nonce_size = 0;
- cmd.params.pcrextend_in.auth_area.attributes = 0;
- cmd.params.pcrextend_in.auth_area.auth_size = 0;
- cmd.params.pcrextend_in.digest_cnt = cpu_to_be32(1);
- cmd.params.pcrextend_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
- memcpy(cmd.params.pcrextend_in.digest, hash, TPM_DIGEST_SIZE);
-
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 0, 0,
"attempting extend a PCR value");
+ tpm_buf_destroy(&buf);
+
return rc;
}
+
#define TPM2_GETRANDOM_IN_SIZE \
(sizeof(struct tpm_input_header) + \
sizeof(struct tpm2_get_random_in))
@@ -348,18 +354,18 @@ static const struct tpm_input_header tpm2_getrandom_header = {
/**
* tpm2_get_random() - get random bytes from the TPM RNG
+ *
* @chip: TPM chip to use
* @out: destination buffer for the random bytes
* @max: the max number of bytes to write to @out
*
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
+ * Return:
+ * Size of the output buffer, or -EIO on error.
*/
int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
{
struct tpm2_cmd cmd;
- u32 recd;
+ u32 recd, rlength;
u32 num_bytes;
int err;
int total = 0;
@@ -376,13 +382,19 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
cmd.header.in = tpm2_getrandom_header;
cmd.params.getrandom_in.size = cpu_to_be16(num_bytes);
- err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
- "attempting get random");
+ err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ offsetof(struct tpm2_get_random_out,
+ buffer),
+ 0, "attempting get random");
if (err)
break;
recd = min_t(u32, be16_to_cpu(cmd.params.getrandom_out.size),
num_bytes);
+ rlength = be32_to_cpu(cmd.header.out.length);
+ if (rlength < offsetof(struct tpm2_get_random_out, buffer) +
+ recd)
+ return -EFAULT;
memcpy(dest, cmd.params.getrandom_out.buffer, recd);
dest += recd;
@@ -397,6 +409,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
(sizeof(struct tpm_input_header) + \
sizeof(struct tpm2_get_tpm_pt_in))
+#define TPM2_GET_TPM_PT_OUT_BODY_SIZE \
+ sizeof(struct tpm2_get_tpm_pt_out)
+
static const struct tpm_input_header tpm2_get_tpm_pt_header = {
.tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
.length = cpu_to_be32(TPM2_GET_TPM_PT_IN_SIZE),
@@ -404,15 +419,15 @@ static const struct tpm_input_header tpm2_get_tpm_pt_header = {
};
/**
- * Append TPMS_AUTH_COMMAND to the buffer. The buffer must be allocated with
- * tpm_buf_alloc().
- *
- * @param buf: an allocated tpm_buf instance
- * @param nonce: the session nonce, may be NULL if not used
- * @param nonce_len: the session nonce length, may be 0 if not used
- * @param attributes: the session attributes
- * @param hmac: the session HMAC or password, may be NULL if not used
- * @param hmac_len: the session HMAC or password length, maybe 0 if not used
+ * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+ *
+ * @buf: an allocated tpm_buf instance
+ * @session_handle: session handle
+ * @nonce: the session nonce, may be NULL if not used
+ * @nonce_len: the session nonce length, may be 0 if not used
+ * @attributes: the session attributes
+ * @hmac: the session HMAC or password, may be NULL if not used
+ * @hmac_len: the session HMAC or password length, maybe 0 if not used
*/
static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
const u8 *nonce, u16 nonce_len,
@@ -435,7 +450,8 @@ static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
/**
* tpm2_seal_trusted() - seal the payload of a trusted key
- * @chip_num: TPM chip to use
+ *
+ * @chip: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
*
@@ -447,7 +463,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
{
unsigned int blob_len;
struct tpm_buf buf;
- u32 hash;
+ u32 hash, rlength;
int i;
int rc;
@@ -512,7 +528,8 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 0, "sealing data");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 4, 0,
+ "sealing data");
if (rc)
goto out;
@@ -521,6 +538,11 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
rc = -E2BIG;
goto out;
}
+ rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)->header.out.length);
+ if (rlength < TPM_HEADER_SIZE + 4 + blob_len) {
+ rc = -EFAULT;
+ goto out;
+ }
memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
payload->blob_len = blob_len;
@@ -529,7 +551,7 @@ out:
tpm_buf_destroy(&buf);
if (rc > 0) {
- if ((rc & TPM2_RC_HASH) == TPM2_RC_HASH)
+ if (tpm2_rc_value(rc) == TPM2_RC_HASH)
rc = -EINVAL;
else
rc = -EPERM;
@@ -540,11 +562,17 @@ out:
/**
* tpm2_load_cmd() - execute a TPM2_Load command
- * @chip_num: TPM chip to use
+ *
+ * @chip: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
+ * @blob_handle: returned blob handle
+ * @flags: tpm transmit flags
*
- * Return: same as with tpm_transmit_cmd
+ * Return: 0 on success.
+ * -E2BIG on wrong payload size.
+ * -EPERM on tpm error status.
+ * < 0 error from tpm_transmit_cmd.
*/
static int tpm2_load_cmd(struct tpm_chip *chip,
struct trusted_key_payload *payload,
@@ -584,7 +612,8 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "loading blob");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 4, flags,
+ "loading blob");
if (!rc)
*blob_handle = be32_to_cpup(
(__be32 *) &buf.data[TPM_HEADER_SIZE]);
@@ -600,11 +629,12 @@ out:
/**
* tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
- * @chip_num: TPM chip to use
- * @payload: the key data in clear and encrypted form
- * @options: authentication values and other options
*
- * Return: same as with tpm_transmit_cmd
+ * @chip: TPM chip to use
+ * @handle: the key data in clear and encrypted form
+ * @flags: tpm transmit flags
+ *
+ * Return: Same as with tpm_transmit_cmd.
*/
static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
unsigned int flags)
@@ -621,7 +651,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags,
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 0, flags,
"flushing context");
if (rc)
dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle,
@@ -632,11 +662,16 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
/**
* tpm2_unseal_cmd() - execute a TPM2_Unload command
- * @chip_num: TPM chip to use
+ *
+ * @chip: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
+ * @blob_handle: blob handle
+ * @flags: tpm_transmit_cmd flags
*
- * Return: same as with tpm_transmit_cmd
+ * Return: 0 on success
+ * -EPERM on tpm error status
+ * < 0 error from tpm_transmit_cmd
*/
static int tpm2_unseal_cmd(struct tpm_chip *chip,
struct trusted_key_payload *payload,
@@ -647,6 +682,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
u16 data_len;
u8 *data;
int rc;
+ u32 rlength;
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
if (rc)
@@ -661,13 +697,21 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
options->blobauth /* hmac */,
TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "unsealing");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 6, flags,
+ "unsealing");
if (rc > 0)
rc = -EPERM;
if (!rc) {
data_len = be16_to_cpup(
(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+
+ rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
+ ->header.out.length);
+ if (rlength < TPM_HEADER_SIZE + 6 + data_len) {
+ rc = -EFAULT;
+ goto out;
+ }
data = &buf.data[TPM_HEADER_SIZE + 6];
memcpy(payload->key, data, data_len - 1);
@@ -675,17 +719,19 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
payload->migratable = data[data_len - 1];
}
+out:
tpm_buf_destroy(&buf);
return rc;
}
/**
* tpm2_unseal_trusted() - unseal the payload of a trusted key
- * @chip_num: TPM chip to use
+ *
+ * @chip: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
*
- * Return: < 0 on error and 0 on success.
+ * Return: Same as with tpm_transmit_cmd.
*/
int tpm2_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
@@ -715,9 +761,7 @@ out:
* @value: output variable.
* @desc: passed to tpm_transmit_cmd()
*
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
+ * Return: Same as with tpm_transmit_cmd.
*/
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
const char *desc)
@@ -730,7 +774,8 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id);
cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, desc);
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ TPM2_GET_TPM_PT_OUT_BODY_SIZE, 0, desc);
if (!rc)
*value = be32_to_cpu(cmd.params.get_tpm_pt_out.value);
@@ -750,13 +795,12 @@ static const struct tpm_input_header tpm2_startup_header = {
/**
* tpm2_startup() - send startup command to the TPM chip
+ *
* @chip: TPM chip to use.
- * @startup_type startup type. The value is either
+ * @startup_type: startup type. The value is either
* TPM_SU_CLEAR or TPM_SU_STATE.
*
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
+ * Return: Same as with tpm_transmit_cmd.
*/
static int tpm2_startup(struct tpm_chip *chip, u16 startup_type)
{
@@ -765,7 +809,7 @@ static int tpm2_startup(struct tpm_chip *chip, u16 startup_type)
cmd.header.in = tpm2_startup_header;
cmd.params.startup_in.startup_type = cpu_to_be16(startup_type);
- return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
+ return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, 0,
"attempting to start the TPM");
}
@@ -781,8 +825,9 @@ static const struct tpm_input_header tpm2_shutdown_header = {
/**
* tpm2_shutdown() - send shutdown command to the TPM chip
+ *
* @chip: TPM chip to use.
- * @shutdown_type shutdown type. The value is either
+ * @shutdown_type: shutdown type. The value is either
* TPM_SU_CLEAR or TPM_SU_STATE.
*/
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
@@ -793,7 +838,8 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
cmd.header.in = tpm2_shutdown_header;
cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "stopping the TPM");
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, 0,
+ "stopping the TPM");
/* In places where shutdown command is sent there's no much we can do
* except print the error code on a system failure.
@@ -805,12 +851,11 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
/*
* tpm2_calc_ordinal_duration() - maximum duration for a command
+ *
* @chip: TPM chip to use.
* @ordinal: command code number.
*
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
+ * Return: maximum duration for a command
*/
unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
{
@@ -842,13 +887,12 @@ static const struct tpm_input_header tpm2_selftest_header = {
/**
* tpm2_continue_selftest() - start a self test
+ *
* @chip: TPM chip to use
* @full: test all commands instead of testing only those that were not
* previously tested.
*
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
+ * Return: Same as with tpm_transmit_cmd with exception of RC_TESTING.
*/
static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
{
@@ -858,7 +902,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
cmd.header.in = tpm2_selftest_header;
cmd.params.selftest_in.full_test = full;
- rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE, 0,
+ rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE, 0, 0,
"continue selftest");
/* At least some prototype chips seem to give RC_TESTING error
@@ -874,14 +918,13 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
/**
* tpm2_do_selftest() - run a full self test
+ *
* @chip: TPM chip to use
*
+ * Return: Same as with tpm_transmit_cmd.
+ *
* During the self test TPM2 commands return with the error code RC_TESTING.
* Waiting is done by issuing PCR read until it executes successfully.
- *
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
*/
static int tpm2_do_selftest(struct tpm_chip *chip)
{
@@ -910,7 +953,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
cmd.params.pcrread_in.pcr_select[1] = 0x00;
cmd.params.pcrread_in.pcr_select[2] = 0x00;
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, NULL);
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, 0, NULL);
if (rc < 0)
break;
@@ -928,6 +971,8 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
* tpm2_probe() - probe TPM 2.0
* @chip: TPM chip to use
*
+ * Return: < 0 error and 0 on success.
+ *
* Send idempotent TPM 2.0 command and see whether TPM 2.0 chip replied based on
* the reply tag.
*/
@@ -941,7 +986,7 @@ int tpm2_probe(struct tpm_chip *chip)
cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100);
cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, NULL);
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, 0, NULL);
if (rc < 0)
return rc;
@@ -952,12 +997,85 @@ int tpm2_probe(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm2_probe);
+struct tpm2_pcr_selection {
+ __be16 hash_alg;
+ u8 size_of_select;
+ u8 pcr_select[3];
+} __packed;
+
+static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
+{
+ struct tpm2_pcr_selection pcr_selection;
+ struct tpm_buf buf;
+ void *marker;
+ void *end;
+ void *pcr_select_offset;
+ unsigned int count;
+ u32 sizeof_pcr_selection;
+ u32 rsp_len;
+ int rc;
+ int i = 0;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, TPM2_CAP_PCRS);
+ tpm_buf_append_u32(&buf, 0);
+ tpm_buf_append_u32(&buf, 1);
+
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 9, 0,
+ "get tpm pcr allocation");
+ if (rc)
+ goto out;
+
+ count = be32_to_cpup(
+ (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
+
+ if (count > ARRAY_SIZE(chip->active_banks)) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ marker = &buf.data[TPM_HEADER_SIZE + 9];
+
+ rsp_len = be32_to_cpup((__be32 *)&buf.data[2]);
+ end = &buf.data[rsp_len];
+
+ for (i = 0; i < count; i++) {
+ pcr_select_offset = marker +
+ offsetof(struct tpm2_pcr_selection, size_of_select);
+ if (pcr_select_offset >= end) {
+ rc = -EFAULT;
+ break;
+ }
+
+ memcpy(&pcr_selection, marker, sizeof(pcr_selection));
+ chip->active_banks[i] = be16_to_cpu(pcr_selection.hash_alg);
+ sizeof_pcr_selection = sizeof(pcr_selection.hash_alg) +
+ sizeof(pcr_selection.size_of_select) +
+ pcr_selection.size_of_select;
+ marker = marker + sizeof_pcr_selection;
+ }
+
+out:
+ if (i < ARRAY_SIZE(chip->active_banks))
+ chip->active_banks[i] = TPM2_ALG_ERROR;
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
/**
* tpm2_auto_startup - Perform the standard automatic TPM initialization
* sequence
* @chip: TPM chip to use
*
- * Returns 0 on success, < 0 in case of fatal error.
+ * Initializes timeout values for operation and command durations, conducts
+ * a self-test and reads the list of active PCR banks.
+ *
+ * Return: 0 on success. Otherwise, a system error code is returned.
*/
int tpm2_auto_startup(struct tpm_chip *chip)
{
@@ -985,6 +1103,8 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
}
+ rc = tpm2_get_pcr_allocation(chip);
+
out:
if (rc > 0)
rc = -ENODEV;
diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/tpm2_eventlog.c
new file mode 100644
index 000000000000..513897cf9c4b
--- /dev/null
+++ b/drivers/char/tpm/tpm2_eventlog.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to TPM 2.0 event log as written by Firmware.
+ * It assumes that writer of event log has followed TCG Specification
+ * for Family "2.0" and written the event data in little endian.
+ * With that, it doesn't need any endian conversion for structure
+ * content.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "tpm.h"
+#include "tpm_eventlog.h"
+
+/*
+ * calc_tpm2_event_size() - calculate the event size, where event
+ * is an entry in the TPM 2.0 event log. The event is of type Crypto
+ * Agile Log Entry Format as defined in TCG EFI Protocol Specification
+ * Family "2.0".
+
+ * @event: event whose size is to be calculated.
+ * @event_header: the first event in the event log.
+ *
+ * Returns size of the event. If it is an invalid event, returns 0.
+ */
+static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
+ struct tcg_pcr_event *event_header)
+{
+ struct tcg_efi_specid_event *efispecid;
+ struct tcg_event_field *event_field;
+ void *marker;
+ void *marker_start;
+ u32 halg_size;
+ size_t size;
+ u16 halg;
+ int i;
+ int j;
+
+ marker = event;
+ marker_start = marker;
+ marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type)
+ + sizeof(event->count);
+
+ efispecid = (struct tcg_efi_specid_event *)event_header->event;
+
+ for (i = 0; (i < event->count) && (i < TPM2_ACTIVE_PCR_BANKS);
+ i++) {
+ halg_size = sizeof(event->digests[i].alg_id);
+ memcpy(&halg, marker, halg_size);
+ marker = marker + halg_size;
+ for (j = 0; (j < efispecid->num_algs); j++) {
+ if (halg == efispecid->digest_sizes[j].alg_id) {
+ marker = marker +
+ efispecid->digest_sizes[j].digest_size;
+ break;
+ }
+ }
+ }
+
+ event_field = (struct tcg_event_field *)marker;
+ marker = marker + sizeof(event_field->event_size)
+ + event_field->event_size;
+ size = marker - marker_start;
+
+ if ((event->event_type == 0) && (event_field->event_size == 0))
+ return 0;
+
+ return size;
+}
+
+static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *addr = log->bios_event_log;
+ void *limit = log->bios_event_log_end;
+ struct tcg_pcr_event *event_header;
+ struct tcg_pcr_event2 *event;
+ size_t size;
+ int i;
+
+ event_header = addr;
+ size = sizeof(struct tcg_pcr_event) - sizeof(event_header->event)
+ + event_header->event_size;
+
+ if (*pos == 0) {
+ if (addr + size < limit) {
+ if ((event_header->event_type == 0) &&
+ (event_header->event_size == 0))
+ return NULL;
+ return SEQ_START_TOKEN;
+ }
+ }
+
+ if (*pos > 0) {
+ addr += size;
+ event = addr;
+ size = calc_tpm2_event_size(event, event_header);
+ if ((addr + size >= limit) || (size == 0))
+ return NULL;
+ }
+
+ for (i = 0; i < (*pos - 1); i++) {
+ event = addr;
+ size = calc_tpm2_event_size(event, event_header);
+
+ if ((addr + size >= limit) || (size == 0))
+ return NULL;
+ addr += size;
+ }
+
+ return addr;
+}
+
+static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ struct tcg_pcr_event *event_header;
+ struct tcg_pcr_event2 *event;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *limit = log->bios_event_log_end;
+ size_t event_size;
+ void *marker;
+
+ event_header = log->bios_event_log;
+
+ if (v == SEQ_START_TOKEN) {
+ event_size = sizeof(struct tcg_pcr_event) -
+ sizeof(event_header->event) + event_header->event_size;
+ marker = event_header;
+ } else {
+ event = v;
+ event_size = calc_tpm2_event_size(event, event_header);
+ if (event_size == 0)
+ return NULL;
+ marker = event;
+ }
+
+ marker = marker + event_size;
+ if (marker >= limit)
+ return NULL;
+ v = marker;
+ event = v;
+
+ event_size = calc_tpm2_event_size(event, event_header);
+ if (((v + event_size) >= limit) || (event_size == 0))
+ return NULL;
+
+ (*pos)++;
+ return v;
+}
+
+static void tpm2_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ struct tcg_pcr_event *event_header = log->bios_event_log;
+ struct tcg_pcr_event2 *event = v;
+ void *temp_ptr;
+ size_t size;
+
+ if (v == SEQ_START_TOKEN) {
+ size = sizeof(struct tcg_pcr_event) -
+ sizeof(event_header->event) + event_header->event_size;
+
+ temp_ptr = event_header;
+
+ if (size > 0)
+ seq_write(m, temp_ptr, size);
+ } else {
+ size = calc_tpm2_event_size(event, event_header);
+ temp_ptr = event;
+ if (size > 0)
+ seq_write(m, temp_ptr, size);
+ }
+
+ return 0;
+}
+
+const struct seq_operations tpm2_binary_b_measurements_seqops = {
+ .start = tpm2_bios_measurements_start,
+ .next = tpm2_bios_measurements_next,
+ .stop = tpm2_bios_measurements_stop,
+ .show = tpm2_binary_bios_measurements_show,
+};
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index b7718c95fd0b..169edf3ce86d 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -54,6 +54,9 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
u64 len, start;
struct tpm_bios_log *log;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return -ENODEV;
+
log = &chip->log;
/* Unfortuntely ACPI does not associate the event log with a specific
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
index 4f96d80cdce9..5c82eb47665e 100644
--- a/drivers/char/tpm/tpm_atmel.h
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -96,6 +96,12 @@ enum tpm_atmel_addr {
TPM_ATMEL_BASE_ADDR_HI = 0x09
};
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base+1) & 0xFF;
+}
+
/* Verify this is a 1.1 Atmel TPM */
static int atmel_verify_tpm11(void)
{
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 717b6b47c042..86f355b6df1d 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -264,10 +264,12 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
struct resource *io_res = data;
- struct resource res;
+ struct resource_win win;
+ struct resource *res = &(win.res);
- if (acpi_dev_resource_memory(ares, &res)) {
- *io_res = res;
+ if (acpi_dev_resource_memory(ares, res) ||
+ acpi_dev_resource_address_space(ares, &win)) {
+ *io_res = *res;
io_res->name = NULL;
}
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index 1660d74ea79a..b4b549559203 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -2,9 +2,12 @@
#ifndef __TPM_EVENTLOG_H__
#define __TPM_EVENTLOG_H__
+#include <crypto/hash_info.h>
+
#define TCG_EVENT_NAME_LEN_MAX 255
#define MAX_TEXT_EVENT 1000 /* Max event string length */
#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
+#define TPM2_ACTIVE_PCR_BANKS 3
#ifdef CONFIG_PPC64
#define do_endian_conversion(x) be32_to_cpu(x)
@@ -17,11 +20,6 @@ enum bios_platform_class {
BIOS_SERVER = 0x01,
};
-struct tpm_bios_log {
- void *bios_event_log;
- void *bios_event_log_end;
-};
-
struct tcpa_event {
u32 pcr_index;
u32 event_type;
@@ -73,6 +71,49 @@ enum tcpa_pc_event_ids {
HOST_TABLE_OF_DEVICES,
};
+/* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */
+
+struct tcg_efi_specid_event_algs {
+ u16 alg_id;
+ u16 digest_size;
+} __packed;
+
+struct tcg_efi_specid_event {
+ u8 signature[16];
+ u32 platform_class;
+ u8 spec_version_minor;
+ u8 spec_version_major;
+ u8 spec_errata;
+ u8 uintnsize;
+ u32 num_algs;
+ struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS];
+ u8 vendor_info_size;
+ u8 vendor_info[0];
+} __packed;
+
+struct tcg_pcr_event {
+ u32 pcr_idx;
+ u32 event_type;
+ u8 digest[20];
+ u32 event_size;
+ u8 event[0];
+} __packed;
+
+struct tcg_event_field {
+ u32 event_size;
+ u8 event[0];
+} __packed;
+
+struct tcg_pcr_event2 {
+ u32 pcr_idx;
+ u32 event_type;
+ u32 count;
+ struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS];
+ struct tcg_event_field event;
+} __packed;
+
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
#if defined(CONFIG_ACPI)
int tpm_read_log_acpi(struct tpm_chip *chip);
#else
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 946025a7413b..1b9d61ffe991 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -40,11 +40,12 @@ MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
/**
* ibmvtpm_send_crq - Send a CRQ request
+ *
* @vdev: vio device struct
* @w1: first word
* @w2: second word
*
- * Return value:
+ * Return:
* 0 -Sucess
* Non-zero - Failure
*/
@@ -55,11 +56,12 @@ static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
/**
* tpm_ibmvtpm_recv - Receive data after send
+ *
* @chip: tpm chip struct
* @buf: buffer to read
- * count: size of buffer
+ * @count: size of buffer
*
- * Return value:
+ * Return:
* Number of bytes read
*/
static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
@@ -96,12 +98,13 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/**
* tpm_ibmvtpm_send - Send tpm request
+ *
* @chip: tpm chip struct
* @buf: buffer contains data to send
- * count: size of buffer
+ * @count: size of buffer
*
- * Return value:
- * Number of bytes sent
+ * Return:
+ * Number of bytes sent or < 0 on error.
*/
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
@@ -170,11 +173,12 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
/**
* ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
+ *
* @ibmvtpm: vtpm device struct
*
- * Return value:
- * 0 - Success
- * Non-zero - Failure
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
*/
static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
{
@@ -197,11 +201,12 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
/**
* ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
* - Note that this is vtpm version and not tpm version
+ *
* @ibmvtpm: vtpm device struct
*
- * Return value:
- * 0 - Success
- * Non-zero - Failure
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
*/
static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
{
@@ -225,9 +230,9 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
* ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
* @ibmvtpm: vtpm device struct
*
- * Return value:
- * 0 - Success
- * Non-zero - Failure
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
*/
static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
{
@@ -245,9 +250,9 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
* ibmvtpm_crq_send_init - Send a CRQ initialize message
* @ibmvtpm: vtpm device struct
*
- * Return value:
- * 0 - Success
- * Non-zero - Failure
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
*/
static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
{
@@ -265,8 +270,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
* tpm_ibmvtpm_remove - ibm vtpm remove entry point
* @vdev: vio device struct
*
- * Return value:
- * 0
+ * Return: Always 0.
*/
static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
{
@@ -303,18 +307,19 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
* tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
* @vdev: vio device struct
*
- * Return value:
- * Number of bytes the driver needs to DMA map
+ * Return:
+ * Number of bytes the driver needs to DMA map.
*/
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
{
struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
- /* ibmvtpm initializes at probe time, so the data we are
- * asking for may not be set yet. Estimate that 4K required
- * for TCE-mapped buffer in addition to CRQ.
- */
+ /*
+ * ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
if (!ibmvtpm)
return CRQ_RES_BUF_SIZE + PAGE_SIZE;
@@ -325,8 +330,7 @@ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
* tpm_ibmvtpm_suspend - Suspend
* @dev: device struct
*
- * Return value:
- * 0
+ * Return: Always 0.
*/
static int tpm_ibmvtpm_suspend(struct device *dev)
{
@@ -350,11 +354,12 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
/**
* ibmvtpm_reset_crq - Reset CRQ
+ *
* @ibmvtpm: ibm vtpm struct
*
- * Return value:
- * 0 - Success
- * Non-zero - Failure
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
*/
static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
{
@@ -376,10 +381,10 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
/**
* tpm_ibmvtpm_resume - Resume from suspend
+ *
* @dev: device struct
*
- * Return value:
- * 0
+ * Return: Always 0.
*/
static int tpm_ibmvtpm_resume(struct device *dev)
{
@@ -434,10 +439,10 @@ static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
/**
* ibmvtpm_crq_get_next - Get next responded crq
- * @ibmvtpm vtpm device struct
*
- * Return value:
- * vtpm crq pointer
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return: vtpm crq pointer or NULL.
*/
static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
{
@@ -455,11 +460,10 @@ static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
/**
* ibmvtpm_crq_process - Process responded crq
- * @crq crq to be processed
- * @ibmvtpm vtpm device struct
*
- * Return value:
- * Nothing
+ * @crq: crq to be processed
+ * @ibmvtpm: vtpm device struct
+ *
*/
static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
struct ibmvtpm_dev *ibmvtpm)
@@ -528,6 +532,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
/**
* ibmvtpm_interrupt - Interrupt handler
+ *
* @irq: irq number to handle
* @vtpm_instance: vtpm that received interrupt
*
@@ -554,12 +559,13 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
/**
* tpm_ibmvtpm_probe - ibm vtpm initialize entry point
+ *
* @vio_dev: vio device struct
* @id: vio device id struct
*
- * Return value:
- * 0 - Success
- * Non-zero - Failure
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
*/
static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
const struct vio_device_id *id)
@@ -671,11 +677,12 @@ static struct vio_driver ibmvtpm_driver = {
};
/**
- * ibmvtpm_module_init - Initialize ibm vtpm module
+ * ibmvtpm_module_init - Initialize ibm vtpm module.
*
- * Return value:
- * 0 -Success
- * Non-zero - Failure
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
*/
static int __init ibmvtpm_module_init(void)
{
@@ -683,10 +690,7 @@ static int __init ibmvtpm_module_init(void)
}
/**
- * ibmvtpm_module_exit - Teardown ibm vtpm module
- *
- * Return value:
- * Nothing
+ * ibmvtpm_module_exit - Tear down ibm vtpm module.
*/
static void __exit ibmvtpm_module_exit(void)
{
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 9ff0e072c476..5d6cce74cd3f 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -278,6 +278,18 @@ static struct platform_driver nsc_drv = {
},
};
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base+1) & 0xFF;
+}
+
+static inline void tpm_write_index(int base, int index, int value)
+{
+ outb(index, base);
+ outb(value & 0xFF, base+1);
+}
+
static int __init init_nsc(void)
{
int rc = 0;
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index 7dee42d7b5e0..de57d4ac8901 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -27,6 +27,8 @@ int tpm_read_log_of(struct tpm_chip *chip)
const u32 *sizep;
const u64 *basep;
struct tpm_bios_log *log;
+ u32 size;
+ u64 base;
log = &chip->log;
if (chip->dev.parent && chip->dev.parent->of_node)
@@ -41,18 +43,35 @@ int tpm_read_log_of(struct tpm_chip *chip)
if (sizep == NULL || basep == NULL)
return -EIO;
- if (*sizep == 0) {
+ /*
+ * For both vtpm/tpm, firmware has log addr and log size in big
+ * endian format. But in case of vtpm, there is a method called
+ * sml-handover which is run during kernel init even before
+ * device tree is setup. This sml-handover function takes care
+ * of endianness and writes to sml-base and sml-size in little
+ * endian format. For this reason, vtpm doesn't need conversion
+ * but physical tpm needs the conversion.
+ */
+ if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0) {
+ size = be32_to_cpup(sizep);
+ base = be64_to_cpup(basep);
+ } else {
+ size = *sizep;
+ base = *basep;
+ }
+
+ if (size == 0) {
dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
return -EIO;
}
- log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
+ log->bios_event_log = kmalloc(size, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
- log->bios_event_log_end = log->bios_event_log + *sizep;
+ log->bios_event_log_end = log->bios_event_log + size;
- memcpy(log->bios_event_log, __va(*basep), *sizep);
+ memcpy(log->bios_event_log, __va(base), size);
return 0;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0127af130cb1..c7e1384f1b08 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -159,7 +159,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
irq = tpm_info->irq;
if (itpm)
- phy->priv.flags |= TPM_TIS_ITPM_POSSIBLE;
+ phy->priv.flags |= TPM_TIS_ITPM_WORKAROUND;
return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
acpi_dev_handle);
@@ -432,7 +432,7 @@ err_pnp:
acpi_bus_unregister_driver(&tis_acpi_driver);
err_acpi:
#endif
- platform_device_unregister(force_pdev);
+ platform_driver_unregister(&tis_drv);
err_platform:
if (force_pdev)
platform_device_unregister(force_pdev);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 7993678954a2..c0f296b5d413 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -264,7 +264,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
size_t count = 0;
- bool itpm = priv->flags & TPM_TIS_ITPM_POSSIBLE;
+ bool itpm = priv->flags & TPM_TIS_ITPM_WORKAROUND;
if (request_locality(chip, 0) < 0)
return -EBUSY;
@@ -464,6 +464,9 @@ static int probe_itpm(struct tpm_chip *chip)
size_t len = sizeof(cmd_getticks);
u16 vendor;
+ if (priv->flags & TPM_TIS_ITPM_WORKAROUND)
+ return 0;
+
rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
if (rc < 0)
return rc;
@@ -479,12 +482,15 @@ static int probe_itpm(struct tpm_chip *chip)
tpm_tis_ready(chip);
release_locality(chip, priv->locality, 0);
+ priv->flags |= TPM_TIS_ITPM_WORKAROUND;
+
rc = tpm_tis_send_data(chip, cmd_getticks, len);
- if (rc == 0) {
+ if (rc == 0)
dev_info(&chip->dev, "Detected an iTPM.\n");
- rc = 1;
- } else
+ else {
+ priv->flags &= ~TPM_TIS_ITPM_WORKAROUND;
rc = -EFAULT;
+ }
out:
tpm_tis_ready(chip);
@@ -552,7 +558,8 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
else
- return tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc);
+ return tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc,
+ 0);
}
/* Register the IRQ and issue a command that will cause an interrupt. If an
@@ -740,15 +747,10 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
vendor >> 16, rid);
- if (!(priv->flags & TPM_TIS_ITPM_POSSIBLE)) {
- probe = probe_itpm(chip);
- if (probe < 0) {
- rc = -ENODEV;
- goto out_err;
- }
-
- if (!!probe)
- priv->flags |= TPM_TIS_ITPM_POSSIBLE;
+ probe = probe_itpm(chip);
+ if (probe < 0) {
+ rc = -ENODEV;
+ goto out_err;
}
/* Figure out the capabilities */
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 9191aabbf9c2..e2212f021a02 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -80,7 +80,7 @@ enum tis_defaults {
#define TPM_RID(l) (0x0F04 | ((l) << 12))
enum tpm_tis_flags {
- TPM_TIS_ITPM_POSSIBLE = BIT(0),
+ TPM_TIS_ITPM_WORKAROUND = BIT(0),
};
struct tpm_tis_data {
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index dbaad9c681e3..5292e5768a7e 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -33,7 +33,6 @@
#include <linux/acpi.h>
#include <linux/freezer.h>
-#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_irq.h>
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 5463b58af26e..751059d2140a 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -65,7 +65,12 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev);
/**
* vtpm_proxy_fops_read - Read TPM commands on 'server side'
*
- * Return value:
+ * @filp: file pointer
+ * @buf: read buffer
+ * @count: number of bytes to read
+ * @off: offset
+ *
+ * Return:
* Number of bytes read or negative error code
*/
static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
@@ -115,7 +120,12 @@ static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
/**
* vtpm_proxy_fops_write - Write TPM responses on 'server side'
*
- * Return value:
+ * @filp: file pointer
+ * @buf: write buffer
+ * @count: number of bytes to write
+ * @off: offset
+ *
+ * Return:
* Number of bytes read or negative error value
*/
static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
@@ -155,10 +165,12 @@ static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
}
/*
- * vtpm_proxy_fops_poll: Poll status on 'server side'
+ * vtpm_proxy_fops_poll - Poll status on 'server side'
+ *
+ * @filp: file pointer
+ * @wait: poll table
*
- * Return value:
- * Poll flags
+ * Return: Poll flags
*/
static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
{
@@ -185,6 +197,8 @@ static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
/*
* vtpm_proxy_fops_open - Open vTPM device on 'server side'
*
+ * @filp: file pointer
+ *
* Called when setting up the anonymous file descriptor
*/
static void vtpm_proxy_fops_open(struct file *filp)
@@ -196,8 +210,9 @@ static void vtpm_proxy_fops_open(struct file *filp)
/**
* vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open
+ * Call to undo vtpm_proxy_fops_open
*
- * Call to undo vtpm_proxy_fops_open
+ *@proxy_dev: tpm proxy device
*/
static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev)
{
@@ -212,9 +227,11 @@ static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev)
}
/*
- * vtpm_proxy_fops_release: Close 'server side'
+ * vtpm_proxy_fops_release - Close 'server side'
*
- * Return value:
+ * @inode: inode
+ * @filp: file pointer
+ * Return:
* Always returns 0.
*/
static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
@@ -245,7 +262,10 @@ static const struct file_operations vtpm_proxy_fops = {
/*
* Called when core TPM driver reads TPM responses from 'server side'
*
- * Return value:
+ * @chip: tpm chip to use
+ * @buf: receive buffer
+ * @count: bytes to read
+ * Return:
* Number of TPM response bytes read, negative error value otherwise
*/
static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
@@ -282,7 +302,11 @@ out:
/*
* Called when core TPM driver forwards TPM requests to 'server side'.
*
- * Return value:
+ * @chip: tpm chip to use
+ * @buf: send buffer
+ * @count: bytes to send
+ *
+ * Return:
* 0 in case of success, negative error value otherwise.
*/
static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
@@ -442,7 +466,7 @@ static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev)
/*
* Create a /dev/tpm%d and 'server side' file descriptor pair
*
- * Return value:
+ * Return:
* Returns file pointer on success, an error value otherwise
*/
static struct file *vtpm_proxy_create_device(
@@ -571,7 +595,7 @@ static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
/*
* vtpmx_fops_ioctl: ioctl on /dev/vtpmx
*
- * Return value:
+ * Return:
* Returns 0 on success, a negative error code otherwise.
*/
static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 5aaa268f3a78..656e8af95d52 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -289,7 +289,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
struct tpm_private *priv;
- struct tpm_chip *chip;
int rv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -306,7 +305,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
rv = setup_ring(dev, priv);
if (rv) {
- chip = dev_get_drvdata(&dev->dev);
ring_free(priv);
return rv;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8b00e79c2683..17857beb4892 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
{
struct ports_device *portdev;
- portdev = container_of(work, struct ports_device, control_work);
+ portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 8c8b495cbf0d..cdc092a1d9ef 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
GATE_BUS_TOP, 24, 0, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
- GATE_BUS_TOP, 27, 0, 0),
+ GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
};
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
- GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
- GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
GATE_BUS_TOP, 5, 0, 0),
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
- GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
- GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
GATE_BUS_TOP, 16, 0, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
GATE_BUS_TOP, 17, 0, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
- GATE_BUS_TOP, 18, 0, 0),
+ GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
GATE_BUS_TOP, 28, 0, 0),
GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
GATE_BUS_TOP, 29, 0, 0),
GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
- SRC_MASK_TOP2, 24, 0, 0),
+ SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
SRC_MASK_TOP7, 20, 0, 0),
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index f010562534eb..2c44aeb0b97c 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -633,16 +633,12 @@ static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
struct dev_pm_opp *opp;
int i, uv;
- rcu_read_lock();
-
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
+ if (IS_ERR(opp))
return PTR_ERR(opp);
- }
- uv = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ uv = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
for (i = 0; i < td->i2c_lut_size; i++) {
if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
@@ -1440,8 +1436,6 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
struct dev_pm_opp *opp;
int lut;
- rcu_read_lock();
-
rate = ULONG_MAX;
opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
if (IS_ERR(opp)) {
@@ -1449,6 +1443,7 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
goto out;
}
v_max = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
v = td->soc->cvb->min_millivolts * 1000;
lut = find_vdd_map_entry_exact(td, v);
@@ -1465,6 +1460,8 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
if (v_opp <= td->soc->cvb->min_millivolts * 1000)
td->dvco_rate_min = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
for (;;) {
v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
if (v >= v_opp)
@@ -1496,8 +1493,6 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
ret = 0;
out:
- rcu_read_unlock();
-
return ret;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4866f7aa32e6..3356ab821624 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -5,6 +5,10 @@ config CLKSRC_OF
bool
select CLKSRC_PROBE
+config CLKEVT_OF
+ bool
+ select CLKEVT_PROBE
+
config CLKSRC_ACPI
bool
select CLKSRC_PROBE
@@ -12,6 +16,9 @@ config CLKSRC_ACPI
config CLKSRC_PROBE
bool
+config CLKEVT_PROBE
+ bool
+
config CLKSRC_I8253
bool
@@ -60,6 +67,16 @@ config DW_APB_TIMER_OF
select DW_APB_TIMER
select CLKSRC_OF
+config GEMINI_TIMER
+ bool "Cortina Gemini timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+ select MFD_SYSCON
+ help
+ Enables support for the Gemini timer
+
config ROCKCHIP_TIMER
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
@@ -325,16 +342,30 @@ config ARM_ARCH_TIMER_EVTSTREAM
This must be disabled for hardware validation purposes to detect any
hardware anomalies of missing events.
+config ARM_ARCH_TIMER_OOL_WORKAROUND
+ bool
+
config FSL_ERRATUM_A008585
bool "Workaround for Freescale/NXP Erratum A-008585"
default y
depends on ARM_ARCH_TIMER && ARM64
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
help
This option enables a workaround for Freescale/NXP Erratum
A-008585 ("ARM generic timer may contain an erroneous
value"). The workaround will only be active if the
fsl,erratum-a008585 property is found in the timer node.
+config HISILICON_ERRATUM_161010101
+ bool "Workaround for Hisilicon Erratum 161010101"
+ default y
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ depends on ARM_ARCH_TIMER && ARM64
+ help
+ This option enables a workaround for Hisilicon Erratum
+ 161010101. The workaround will be active if the hisilicon,erratum-161010101
+ property is found in the timer node.
+
config ARM_GLOBAL_TIMER
bool "Support for the ARM global timer" if COMPILE_TEST
select CLKSRC_OF if OF
@@ -467,6 +498,13 @@ config SH_TIMER_MTU2
Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
This hardware comes with 16 bit-timer registers.
+config RENESAS_OSTM
+ bool "Renesas OSTM timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables the support for the Renesas OSTM.
+
config SH_TIMER_TMU
bool "Renesas TMU timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index a14111e1f087..d227d1314f14 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_CLKSRC_PROBE) += clksrc-probe.o
+obj-$(CONFIG_CLKEVT_PROBE) += clkevt-probe.o
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
@@ -8,6 +9,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
+obj-$(CONFIG_RENESAS_OSTM) += renesas-ostm.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
@@ -15,6 +17,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
+obj-$(CONFIG_GEMINI_TIMER) += timer-gemini.o
obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4c8c3fb2e8b2..93aa1364376a 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -96,41 +96,107 @@ early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
*/
#ifdef CONFIG_FSL_ERRATUM_A008585
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
-
-static int fsl_a008585_enable = -1;
-
-static int __init early_fsl_a008585_cfg(char *buf)
+/*
+ * The number of retries is an arbitrary value well beyond the highest number
+ * of iterations the loop has been observed to take.
+ */
+#define __fsl_a008585_read_reg(reg) ({ \
+ u64 _old, _new; \
+ int _retries = 200; \
+ \
+ do { \
+ _old = read_sysreg(reg); \
+ _new = read_sysreg(reg); \
+ _retries--; \
+ } while (unlikely(_old != _new) && _retries); \
+ \
+ WARN_ON_ONCE(!_retries); \
+ _new; \
+})
+
+static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
{
- int ret;
- bool val;
+ return __fsl_a008585_read_reg(cntp_tval_el0);
+}
- ret = strtobool(buf, &val);
- if (ret)
- return ret;
+static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
+{
+ return __fsl_a008585_read_reg(cntv_tval_el0);
+}
- fsl_a008585_enable = val;
- return 0;
+static u64 notrace fsl_a008585_read_cntvct_el0(void)
+{
+ return __fsl_a008585_read_reg(cntvct_el0);
}
-early_param("clocksource.arm_arch_timer.fsl-a008585", early_fsl_a008585_cfg);
+#endif
-u32 __fsl_a008585_read_cntp_tval_el0(void)
+#ifdef CONFIG_HISILICON_ERRATUM_161010101
+/*
+ * Verify whether the value of the second read is larger than the first by
+ * less than 32 is the only way to confirm the value is correct, so clear the
+ * lower 5 bits to check whether the difference is greater than 32 or not.
+ * Theoretically the erratum should not occur more than twice in succession
+ * when reading the system counter, but it is possible that some interrupts
+ * may lead to more than twice read errors, triggering the warning, so setting
+ * the number of retries far beyond the number of iterations the loop has been
+ * observed to take.
+ */
+#define __hisi_161010101_read_reg(reg) ({ \
+ u64 _old, _new; \
+ int _retries = 50; \
+ \
+ do { \
+ _old = read_sysreg(reg); \
+ _new = read_sysreg(reg); \
+ _retries--; \
+ } while (unlikely((_new - _old) >> 5) && _retries); \
+ \
+ WARN_ON_ONCE(!_retries); \
+ _new; \
+})
+
+static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
{
- return __fsl_a008585_read_reg(cntp_tval_el0);
+ return __hisi_161010101_read_reg(cntp_tval_el0);
}
-u32 __fsl_a008585_read_cntv_tval_el0(void)
+static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
{
- return __fsl_a008585_read_reg(cntv_tval_el0);
+ return __hisi_161010101_read_reg(cntv_tval_el0);
}
-u64 __fsl_a008585_read_cntvct_el0(void)
+static u64 notrace hisi_161010101_read_cntvct_el0(void)
{
- return __fsl_a008585_read_reg(cntvct_el0);
+ return __hisi_161010101_read_reg(cntvct_el0);
}
-EXPORT_SYMBOL(__fsl_a008585_read_cntvct_el0);
-#endif /* CONFIG_FSL_ERRATUM_A008585 */
+#endif
+
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
+EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
+
+DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
+EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+
+static const struct arch_timer_erratum_workaround ool_workarounds[] = {
+#ifdef CONFIG_FSL_ERRATUM_A008585
+ {
+ .id = "fsl,erratum-a008585",
+ .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
+ .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
+ .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
+ },
+#endif
+#ifdef CONFIG_HISILICON_ERRATUM_161010101
+ {
+ .id = "hisilicon,erratum-161010101",
+ .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
+ .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
+ .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
+ },
+#endif
+};
+#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline
void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
@@ -281,8 +347,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
-#ifdef CONFIG_FSL_ERRATUM_A008585
-static __always_inline void fsl_a008585_set_next_event(const int access,
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+static __always_inline void erratum_set_next_event_generic(const int access,
unsigned long evt, struct clock_event_device *clk)
{
unsigned long ctrl;
@@ -300,20 +366,20 @@ static __always_inline void fsl_a008585_set_next_event(const int access,
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
-static int fsl_a008585_set_next_event_virt(unsigned long evt,
+static int erratum_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- fsl_a008585_set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
-static int fsl_a008585_set_next_event_phys(unsigned long evt,
+static int erratum_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- fsl_a008585_set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
-#endif /* CONFIG_FSL_ERRATUM_A008585 */
+#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
@@ -343,16 +409,16 @@ static int arch_timer_set_next_event_phys_mem(unsigned long evt,
return 0;
}
-static void fsl_a008585_set_sne(struct clock_event_device *clk)
+static void erratum_workaround_set_sne(struct clock_event_device *clk)
{
-#ifdef CONFIG_FSL_ERRATUM_A008585
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
return;
if (arch_timer_uses_ppi == VIRT_PPI)
- clk->set_next_event = fsl_a008585_set_next_event_virt;
+ clk->set_next_event = erratum_set_next_event_virt;
else
- clk->set_next_event = fsl_a008585_set_next_event_phys;
+ clk->set_next_event = erratum_set_next_event_phys;
#endif
}
@@ -385,7 +451,7 @@ static void __arch_timer_setup(unsigned type,
BUG();
}
- fsl_a008585_set_sne(clk);
+ erratum_workaround_set_sne(clk);
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -580,7 +646,7 @@ static struct clocksource clocksource_counter = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static struct cyclecounter cyclecounter = {
+static struct cyclecounter cyclecounter __ro_after_init = {
.read = arch_counter_read_cc,
.mask = CLOCKSOURCE_MASK(56),
};
@@ -605,7 +671,7 @@ static void __init arch_counter_register(unsigned type)
clocksource_counter.archdata.vdso_direct = true;
-#ifdef CONFIG_FSL_ERRATUM_A008585
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
/*
* Don't use the vdso fastpath if errata require using
* the out-of-line counter accessor.
@@ -893,12 +959,15 @@ static int __init arch_timer_of_init(struct device_node *np)
arch_timer_c3stop = !of_property_read_bool(np, "always-on");
-#ifdef CONFIG_FSL_ERRATUM_A008585
- if (fsl_a008585_enable < 0)
- fsl_a008585_enable = of_property_read_bool(np, "fsl,erratum-a008585");
- if (fsl_a008585_enable) {
- static_branch_enable(&arch_timer_read_ool_enabled);
- pr_info("Enabling workaround for FSL erratum A-008585\n");
+#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+ for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
+ if (of_property_read_bool(np, ool_workarounds[i].id)) {
+ timer_unstable_counter_workaround = &ool_workarounds[i];
+ static_branch_enable(&arch_timer_read_ool_enabled);
+ pr_info("arch_timer: Enabling workaround for %s\n",
+ timer_unstable_counter_workaround->id);
+ break;
+ }
}
#endif
diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c
new file mode 100644
index 000000000000..8c30fec86094
--- /dev/null
+++ b/drivers/clocksource/clkevt-probe.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd. All rights reserved.
+ * Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/clockchip.h>
+
+extern struct of_device_id __clkevt_of_table[];
+
+static const struct of_device_id __clkevt_of_table_sentinel
+ __used __section(__clkevt_of_table_end);
+
+int __init clockevent_probe(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ of_init_fn_1_ret init_func;
+ int ret, clockevents = 0;
+
+ for_each_matching_node_and_match(np, __clkevt_of_table, &match) {
+ if (!of_device_is_available(np))
+ continue;
+
+ init_func = match->data;
+
+ ret = init_func(np);
+ if (ret) {
+ pr_warn("Failed to initialize '%s' (%d)\n",
+ np->name, ret);
+ continue;
+ }
+
+ clockevents++;
+ }
+
+ if (!clockevents) {
+ pr_crit("%s: no matching clockevent found\n", __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 4da1dc2278bd..670ff0f25b67 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
new file mode 100644
index 000000000000..c76f57668fb2
--- /dev/null
+++ b/drivers/clocksource/renesas-ostm.c
@@ -0,0 +1,265 @@
+/*
+ * Renesas Timer Support - OSTM
+ *
+ * Copyright (C) 2017 Renesas Electronics America, Inc.
+ * Copyright (C) 2017 Chris Brandt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+/*
+ * The OSTM contains independent channels.
+ * The first OSTM channel probed will be set up as a free running
+ * clocksource. Additionally we will use this clocksource for the system
+ * schedule timer sched_clock().
+ *
+ * The second (or more) channel probed will be set up as an interrupt
+ * driven clock event.
+ */
+
+struct ostm_device {
+ void __iomem *base;
+ unsigned long ticks_per_jiffy;
+ struct clock_event_device ced;
+};
+
+static void __iomem *system_clock; /* For sched_clock() */
+
+/* OSTM REGISTERS */
+#define OSTM_CMP 0x000 /* RW,32 */
+#define OSTM_CNT 0x004 /* R,32 */
+#define OSTM_TE 0x010 /* R,8 */
+#define OSTM_TS 0x014 /* W,8 */
+#define OSTM_TT 0x018 /* W,8 */
+#define OSTM_CTL 0x020 /* RW,8 */
+
+#define TE 0x01
+#define TS 0x01
+#define TT 0x01
+#define CTL_PERIODIC 0x00
+#define CTL_ONESHOT 0x02
+#define CTL_FREERUN 0x02
+
+static struct ostm_device *ced_to_ostm(struct clock_event_device *ced)
+{
+ return container_of(ced, struct ostm_device, ced);
+}
+
+static void ostm_timer_stop(struct ostm_device *ostm)
+{
+ if (readb(ostm->base + OSTM_TE) & TE) {
+ writeb(TT, ostm->base + OSTM_TT);
+
+ /*
+ * Read back the register simply to confirm the write operation
+ * has completed since I/O writes can sometimes get queued by
+ * the bus architecture.
+ */
+ while (readb(ostm->base + OSTM_TE) & TE)
+ ;
+ }
+}
+
+static int __init ostm_init_clksrc(struct ostm_device *ostm, unsigned long rate)
+{
+ /*
+ * irq not used (clock sources don't use interrupts)
+ */
+
+ ostm_timer_stop(ostm);
+
+ writel(0, ostm->base + OSTM_CMP);
+ writeb(CTL_FREERUN, ostm->base + OSTM_CTL);
+ writeb(TS, ostm->base + OSTM_TS);
+
+ return clocksource_mmio_init(ostm->base + OSTM_CNT,
+ "ostm", rate,
+ 300, 32, clocksource_mmio_readl_up);
+}
+
+static u64 notrace ostm_read_sched_clock(void)
+{
+ return readl(system_clock);
+}
+
+static void __init ostm_init_sched_clock(struct ostm_device *ostm,
+ unsigned long rate)
+{
+ system_clock = ostm->base + OSTM_CNT;
+ sched_clock_register(ostm_read_sched_clock, 32, rate);
+}
+
+static int ostm_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct ostm_device *ostm = ced_to_ostm(ced);
+
+ ostm_timer_stop(ostm);
+
+ writel(delta, ostm->base + OSTM_CMP);
+ writeb(CTL_ONESHOT, ostm->base + OSTM_CTL);
+ writeb(TS, ostm->base + OSTM_TS);
+
+ return 0;
+}
+
+static int ostm_shutdown(struct clock_event_device *ced)
+{
+ struct ostm_device *ostm = ced_to_ostm(ced);
+
+ ostm_timer_stop(ostm);
+
+ return 0;
+}
+static int ostm_set_periodic(struct clock_event_device *ced)
+{
+ struct ostm_device *ostm = ced_to_ostm(ced);
+
+ if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
+ ostm_timer_stop(ostm);
+
+ writel(ostm->ticks_per_jiffy - 1, ostm->base + OSTM_CMP);
+ writeb(CTL_PERIODIC, ostm->base + OSTM_CTL);
+ writeb(TS, ostm->base + OSTM_TS);
+
+ return 0;
+}
+
+static int ostm_set_oneshot(struct clock_event_device *ced)
+{
+ struct ostm_device *ostm = ced_to_ostm(ced);
+
+ ostm_timer_stop(ostm);
+
+ return 0;
+}
+
+static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id)
+{
+ struct ostm_device *ostm = dev_id;
+
+ if (clockevent_state_oneshot(&ostm->ced))
+ ostm_timer_stop(ostm);
+
+ /* notify clockevent layer */
+ if (ostm->ced.event_handler)
+ ostm->ced.event_handler(&ostm->ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
+ unsigned long rate)
+{
+ struct clock_event_device *ced = &ostm->ced;
+ int ret = -ENXIO;
+
+ ret = request_irq(irq, ostm_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL,
+ "ostm", ostm);
+ if (ret) {
+ pr_err("ostm: failed to request irq\n");
+ return ret;
+ }
+
+ ced->name = "ostm";
+ ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
+ ced->set_state_shutdown = ostm_shutdown;
+ ced->set_state_periodic = ostm_set_periodic;
+ ced->set_state_oneshot = ostm_set_oneshot;
+ ced->set_next_event = ostm_clock_event_next;
+ ced->shift = 32;
+ ced->rating = 300;
+ ced->cpumask = cpumask_of(0);
+ clockevents_config_and_register(ced, rate, 0xf, 0xffffffff);
+
+ return 0;
+}
+
+static int __init ostm_init(struct device_node *np)
+{
+ struct ostm_device *ostm;
+ int ret = -EFAULT;
+ struct clk *ostm_clk = NULL;
+ int irq;
+ unsigned long rate;
+
+ ostm = kzalloc(sizeof(*ostm), GFP_KERNEL);
+ if (!ostm)
+ return -ENOMEM;
+
+ ostm->base = of_iomap(np, 0);
+ if (!ostm->base) {
+ pr_err("ostm: failed to remap I/O memory\n");
+ goto err;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq < 0) {
+ pr_err("ostm: Failed to get irq\n");
+ goto err;
+ }
+
+ ostm_clk = of_clk_get(np, 0);
+ if (IS_ERR(ostm_clk)) {
+ pr_err("ostm: Failed to get clock\n");
+ ostm_clk = NULL;
+ goto err;
+ }
+
+ ret = clk_prepare_enable(ostm_clk);
+ if (ret) {
+ pr_err("ostm: Failed to enable clock\n");
+ goto err;
+ }
+
+ rate = clk_get_rate(ostm_clk);
+ ostm->ticks_per_jiffy = (rate + HZ / 2) / HZ;
+
+ /*
+ * First probed device will be used as system clocksource. Any
+ * additional devices will be used as clock events.
+ */
+ if (!system_clock) {
+ ret = ostm_init_clksrc(ostm, rate);
+
+ if (!ret) {
+ ostm_init_sched_clock(ostm, rate);
+ pr_info("ostm: used for clocksource\n");
+ }
+
+ } else {
+ ret = ostm_init_clkevt(ostm, irq, rate);
+
+ if (!ret)
+ pr_info("ostm: used for clock events\n");
+ }
+
+err:
+ if (ret) {
+ clk_disable_unprepare(ostm_clk);
+ iounmap(ostm->base);
+ kfree(ostm);
+ return ret;
+ }
+
+ return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index d4ca9962a759..745844ee973e 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/atmel_tc.h>
+#include <linux/sched_clock.h>
/*
@@ -56,11 +57,16 @@ static u64 tc_get_cycles(struct clocksource *cs)
return (upper << 16) | lower;
}
-static u64 tc_get_cycles32(struct clocksource *cs)
+static u32 tc_get_cv32(void)
{
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
}
+static u64 tc_get_cycles32(struct clocksource *cs)
+{
+ return tc_get_cv32();
+}
+
static struct clocksource clksrc = {
.name = "tcb_clksrc",
.rating = 200,
@@ -69,6 +75,11 @@ static struct clocksource clksrc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u64 notrace tc_read_sched_clock(void)
+{
+ return tc_get_cv32();
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -339,6 +350,9 @@ static int __init tcb_clksrc_init(void)
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
tcb_setup_single_chan(tc, best_divisor_idx);
+
+ /* register sched_clock on chips with single 32 bit counter */
+ sched_clock_register(tc_read_sched_clock, 32, divided_rate);
} else {
/* tclib will give us three clocks no matter what the
* underlying platform supports.
diff --git a/drivers/clocksource/timer-gemini.c b/drivers/clocksource/timer-gemini.c
new file mode 100644
index 000000000000..dda27b7bf1a1
--- /dev/null
+++ b/drivers/clocksource/timer-gemini.c
@@ -0,0 +1,277 @@
+/*
+ * Gemini timer driver
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on a rewrite of arch/arm/mach-gemini/timer.c:
+ * Copyright (C) 2001-2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+
+/*
+ * Relevant registers in the global syscon
+ */
+#define GLOBAL_STATUS 0x04
+#define CPU_AHB_RATIO_MASK (0x3 << 18)
+#define CPU_AHB_1_1 (0x0 << 18)
+#define CPU_AHB_3_2 (0x1 << 18)
+#define CPU_AHB_24_13 (0x2 << 18)
+#define CPU_AHB_2_1 (0x3 << 18)
+#define REG_TO_AHB_SPEED(reg) ((((reg) >> 15) & 0x7) * 10 + 130)
+
+/*
+ * Register definitions for the timers
+ */
+#define TIMER1_COUNT (0x00)
+#define TIMER1_LOAD (0x04)
+#define TIMER1_MATCH1 (0x08)
+#define TIMER1_MATCH2 (0x0c)
+#define TIMER2_COUNT (0x10)
+#define TIMER2_LOAD (0x14)
+#define TIMER2_MATCH1 (0x18)
+#define TIMER2_MATCH2 (0x1c)
+#define TIMER3_COUNT (0x20)
+#define TIMER3_LOAD (0x24)
+#define TIMER3_MATCH1 (0x28)
+#define TIMER3_MATCH2 (0x2c)
+#define TIMER_CR (0x30)
+#define TIMER_INTR_STATE (0x34)
+#define TIMER_INTR_MASK (0x38)
+
+#define TIMER_1_CR_ENABLE (1 << 0)
+#define TIMER_1_CR_CLOCK (1 << 1)
+#define TIMER_1_CR_INT (1 << 2)
+#define TIMER_2_CR_ENABLE (1 << 3)
+#define TIMER_2_CR_CLOCK (1 << 4)
+#define TIMER_2_CR_INT (1 << 5)
+#define TIMER_3_CR_ENABLE (1 << 6)
+#define TIMER_3_CR_CLOCK (1 << 7)
+#define TIMER_3_CR_INT (1 << 8)
+#define TIMER_1_CR_UPDOWN (1 << 9)
+#define TIMER_2_CR_UPDOWN (1 << 10)
+#define TIMER_3_CR_UPDOWN (1 << 11)
+#define TIMER_DEFAULT_FLAGS (TIMER_1_CR_UPDOWN | \
+ TIMER_3_CR_ENABLE | \
+ TIMER_3_CR_UPDOWN)
+
+#define TIMER_1_INT_MATCH1 (1 << 0)
+#define TIMER_1_INT_MATCH2 (1 << 1)
+#define TIMER_1_INT_OVERFLOW (1 << 2)
+#define TIMER_2_INT_MATCH1 (1 << 3)
+#define TIMER_2_INT_MATCH2 (1 << 4)
+#define TIMER_2_INT_OVERFLOW (1 << 5)
+#define TIMER_3_INT_MATCH1 (1 << 6)
+#define TIMER_3_INT_MATCH2 (1 << 7)
+#define TIMER_3_INT_OVERFLOW (1 << 8)
+#define TIMER_INT_ALL_MASK 0x1ff
+
+static unsigned int tick_rate;
+static void __iomem *base;
+
+static u64 notrace gemini_read_sched_clock(void)
+{
+ return readl(base + TIMER3_COUNT);
+}
+
+static int gemini_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ u32 cr;
+
+ /* Setup the match register */
+ cr = readl(base + TIMER1_COUNT);
+ writel(cr + cycles, base + TIMER1_MATCH1);
+ if (readl(base + TIMER1_COUNT) - cr > cycles)
+ return -ETIME;
+
+ return 0;
+}
+
+static int gemini_timer_shutdown(struct clock_event_device *evt)
+{
+ u32 cr;
+
+ /*
+ * Disable also for oneshot: the set_next() call will arm the timer
+ * instead.
+ */
+ /* Stop timer and interrupt. */
+ cr = readl(base + TIMER_CR);
+ cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
+ writel(cr, base + TIMER_CR);
+
+ /* Setup counter start from 0 */
+ writel(0, base + TIMER1_COUNT);
+ writel(0, base + TIMER1_LOAD);
+
+ /* enable interrupt */
+ cr = readl(base + TIMER_INTR_MASK);
+ cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
+ cr |= TIMER_1_INT_MATCH1;
+ writel(cr, base + TIMER_INTR_MASK);
+
+ /* start the timer */
+ cr = readl(base + TIMER_CR);
+ cr |= TIMER_1_CR_ENABLE;
+ writel(cr, base + TIMER_CR);
+
+ return 0;
+}
+
+static int gemini_timer_set_periodic(struct clock_event_device *evt)
+{
+ u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ);
+ u32 cr;
+
+ /* Stop timer and interrupt */
+ cr = readl(base + TIMER_CR);
+ cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
+ writel(cr, base + TIMER_CR);
+
+ /* Setup timer to fire at 1/HT intervals. */
+ cr = 0xffffffff - (period - 1);
+ writel(cr, base + TIMER1_COUNT);
+ writel(cr, base + TIMER1_LOAD);
+
+ /* enable interrupt on overflow */
+ cr = readl(base + TIMER_INTR_MASK);
+ cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2);
+ cr |= TIMER_1_INT_OVERFLOW;
+ writel(cr, base + TIMER_INTR_MASK);
+
+ /* Start the timer */
+ cr = readl(base + TIMER_CR);
+ cr |= TIMER_1_CR_ENABLE;
+ cr |= TIMER_1_CR_INT;
+ writel(cr, base + TIMER_CR);
+
+ return 0;
+}
+
+/* Use TIMER1 as clock event */
+static struct clock_event_device gemini_clockevent = {
+ .name = "TIMER1",
+ /* Reasonably fast and accurate clock event */
+ .rating = 300,
+ .shift = 32,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = gemini_timer_set_next_event,
+ .set_state_shutdown = gemini_timer_shutdown,
+ .set_state_periodic = gemini_timer_set_periodic,
+ .set_state_oneshot = gemini_timer_shutdown,
+ .tick_resume = gemini_timer_shutdown,
+};
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &gemini_clockevent;
+
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction gemini_timer_irq = {
+ .name = "Gemini Timer Tick",
+ .flags = IRQF_TIMER,
+ .handler = gemini_timer_interrupt,
+};
+
+static int __init gemini_timer_of_init(struct device_node *np)
+{
+ static struct regmap *map;
+ int irq;
+ int ret;
+ u32 val;
+
+ map = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(map)) {
+ pr_err("Can't get regmap for syscon handle");
+ return -ENODEV;
+ }
+ ret = regmap_read(map, GLOBAL_STATUS, &val);
+ if (ret) {
+ pr_err("Can't read syscon status register");
+ return -ENXIO;
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("Can't remap registers");
+ return -ENXIO;
+ }
+ /* IRQ for timer 1 */
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
+
+ tick_rate = REG_TO_AHB_SPEED(val) * 1000000;
+ printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000);
+
+ tick_rate /= 6; /* APB bus run AHB*(1/6) */
+
+ switch (val & CPU_AHB_RATIO_MASK) {
+ case CPU_AHB_1_1:
+ printk(KERN_CONT "(1/1)\n");
+ break;
+ case CPU_AHB_3_2:
+ printk(KERN_CONT "(3/2)\n");
+ break;
+ case CPU_AHB_24_13:
+ printk(KERN_CONT "(24/13)\n");
+ break;
+ case CPU_AHB_2_1:
+ printk(KERN_CONT "(2/1)\n");
+ break;
+ }
+
+ /*
+ * Reset the interrupt mask and status
+ */
+ writel(TIMER_INT_ALL_MASK, base + TIMER_INTR_MASK);
+ writel(0, base + TIMER_INTR_STATE);
+ writel(TIMER_DEFAULT_FLAGS, base + TIMER_CR);
+
+ /*
+ * Setup free-running clocksource timer (interrupts
+ * disabled.)
+ */
+ writel(0, base + TIMER3_COUNT);
+ writel(0, base + TIMER3_LOAD);
+ writel(0, base + TIMER3_MATCH1);
+ writel(0, base + TIMER3_MATCH2);
+ clocksource_mmio_init(base + TIMER3_COUNT,
+ "gemini_clocksource", tick_rate,
+ 300, 32, clocksource_mmio_readl_up);
+ sched_clock_register(gemini_read_sched_clock, 32, tick_rate);
+
+ /*
+ * Setup clockevent timer (interrupt-driven.)
+ */
+ writel(0, base + TIMER1_COUNT);
+ writel(0, base + TIMER1_LOAD);
+ writel(0, base + TIMER1_MATCH1);
+ writel(0, base + TIMER1_MATCH2);
+ setup_irq(irq, &gemini_timer_irq);
+ gemini_clockevent.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&gemini_clockevent, tick_rate,
+ 1, 0xffffffff);
+
+ return 0;
+}
+CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "cortina,gemini-timer",
+ gemini_timer_of_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d8b164a7c4e5..4ebae43118ef 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -37,14 +37,6 @@ config CPU_FREQ_STAT
If in doubt, say N.
-config CPU_FREQ_STAT_DETAILS
- bool "CPU frequency transition statistics details"
- depends on CPU_FREQ_STAT
- help
- Show detailed CPU frequency transition table in sysfs.
-
- If in doubt, say N.
-
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
@@ -271,6 +263,16 @@ config IA64_ACPI_CPUFREQ
endif
if MIPS
+config BMIPS_CPUFREQ
+ tristate "BMIPS CPUfreq Driver"
+ help
+ This option adds a CPUfreq driver for BMIPS processors with
+ support for configurable CPU frequency.
+
+ For now, BMIPS5 chips are supported (such as the Broadcom 7425).
+
+ If in doubt, say N.
+
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
help
@@ -332,7 +334,7 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
- depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+ depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ
help
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 920c469f3953..74fa5c5904d3 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -247,6 +247,17 @@ config ARM_TEGRA124_CPUFREQ
help
This adds the CPUFreq driver support for Tegra124 SOCs.
+config ARM_TI_CPUFREQ
+ bool "Texas Instruments CPUFreq support"
+ depends on ARCH_OMAP2PLUS
+ help
+ This driver enables valid OPPs on the running platform based on
+ values contained within the SoC in use. Enable this in order to
+ use the cpufreq-dt driver on all Texas Instruments platforms that
+ provide dt based operating-points-v2 tables with opp-supported-hw
+ data provided. Required for cpufreq support on AM335x, AM437x,
+ DRA7x, and AM57x platforms.
+
config ARM_PXA2xx_CPUFREQ
tristate "Intel PXA2xx CPUfreq driver"
depends on PXA27x || PXA25x
@@ -257,7 +268,7 @@ config ARM_PXA2xx_CPUFREQ
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI
+ depends on ACPI_PROCESSOR
select ACPI_CPPC_LIB
default n
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 1e46c3918e7a..9f5a8045f36d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
+obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
@@ -98,6 +99,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
# Other platform drivers
obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o
obj-$(CONFIG_BFIN_CPU_FREQ) += blackfin-cpufreq.o
+obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
new file mode 100644
index 000000000000..1653151b77df
--- /dev/null
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -0,0 +1,188 @@
+/*
+ * CPU frequency scaling for Broadcom BMIPS SoCs
+ *
+ * Copyright (c) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+/* for mips_hpt_frequency */
+#include <asm/time.h>
+
+#define BMIPS_CPUFREQ_PREFIX "bmips"
+#define BMIPS_CPUFREQ_NAME BMIPS_CPUFREQ_PREFIX "-cpufreq"
+
+#define TRANSITION_LATENCY (25 * 1000) /* 25 us */
+
+#define BMIPS5_CLK_DIV_SET_SHIFT 0x7
+#define BMIPS5_CLK_DIV_SHIFT 0x4
+#define BMIPS5_CLK_DIV_MASK 0xf
+
+enum bmips_type {
+ BMIPS5000,
+ BMIPS5200,
+};
+
+struct cpufreq_compat {
+ const char *compatible;
+ unsigned int bmips_type;
+ unsigned int clk_mult;
+ unsigned int max_freqs;
+};
+
+#define BMIPS(c, t, m, f) { \
+ .compatible = c, \
+ .bmips_type = (t), \
+ .clk_mult = (m), \
+ .max_freqs = (f), \
+}
+
+static struct cpufreq_compat bmips_cpufreq_compat[] = {
+ BMIPS("brcm,bmips5000", BMIPS5000, 8, 4),
+ BMIPS("brcm,bmips5200", BMIPS5200, 8, 4),
+ { }
+};
+
+static struct cpufreq_compat *priv;
+
+static int htp_freq_to_cpu_freq(unsigned int clk_mult)
+{
+ return mips_hpt_frequency * clk_mult / 1000;
+}
+
+static struct cpufreq_frequency_table *
+bmips_cpufreq_get_freq_table(const struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table;
+ unsigned long cpu_freq;
+ int i;
+
+ cpu_freq = htp_freq_to_cpu_freq(priv->clk_mult);
+
+ table = kmalloc((priv->max_freqs + 1) * sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < priv->max_freqs; i++) {
+ table[i].frequency = cpu_freq / (1 << i);
+ table[i].driver_data = i;
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ return table;
+}
+
+static unsigned int bmips_cpufreq_get(unsigned int cpu)
+{
+ unsigned int div;
+ uint32_t mode;
+
+ switch (priv->bmips_type) {
+ case BMIPS5200:
+ case BMIPS5000:
+ mode = read_c0_brcm_mode();
+ div = ((mode >> BMIPS5_CLK_DIV_SHIFT) & BMIPS5_CLK_DIV_MASK);
+ break;
+ default:
+ div = 0;
+ }
+
+ return htp_freq_to_cpu_freq(priv->clk_mult) / (1 << div);
+}
+
+static int bmips_cpufreq_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int div = policy->freq_table[index].driver_data;
+
+ switch (priv->bmips_type) {
+ case BMIPS5200:
+ case BMIPS5000:
+ change_c0_brcm_mode(BMIPS5_CLK_DIV_MASK << BMIPS5_CLK_DIV_SHIFT,
+ (1 << BMIPS5_CLK_DIV_SET_SHIFT) |
+ (div << BMIPS5_CLK_DIV_SHIFT));
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ kfree(policy->freq_table);
+
+ return 0;
+}
+
+static int bmips_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+ int ret;
+
+ freq_table = bmips_cpufreq_get_freq_table(policy);
+ if (IS_ERR(freq_table)) {
+ ret = PTR_ERR(freq_table);
+ pr_err("%s: couldn't determine frequency table (%d).\n",
+ BMIPS_CPUFREQ_NAME, ret);
+ return ret;
+ }
+
+ ret = cpufreq_generic_init(policy, freq_table, TRANSITION_LATENCY);
+ if (ret)
+ bmips_cpufreq_exit(policy);
+ else
+ pr_info("%s: registered\n", BMIPS_CPUFREQ_NAME);
+
+ return ret;
+}
+
+static struct cpufreq_driver bmips_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bmips_cpufreq_target_index,
+ .get = bmips_cpufreq_get,
+ .init = bmips_cpufreq_init,
+ .exit = bmips_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+ .name = BMIPS_CPUFREQ_PREFIX,
+};
+
+static int __init bmips_cpufreq_probe(void)
+{
+ struct cpufreq_compat *cc;
+ struct device_node *np;
+
+ for (cc = bmips_cpufreq_compat; cc->compatible; cc++) {
+ np = of_find_compatible_node(NULL, "cpu", cc->compatible);
+ if (np) {
+ of_node_put(np);
+ priv = cc;
+ break;
+ }
+ }
+
+ /* We hit the guard element of the array. No compatible CPU found. */
+ if (!cc->compatible)
+ return -ENODEV;
+
+ return cpufreq_register_driver(&bmips_cpufreq_driver);
+}
+device_initcall(bmips_cpufreq_probe);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 4fda623e55bb..7281a2c19c36 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
static int brcm_avs_suspend(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
+ int ret;
+
+ ret = brcm_avs_get_pmap(priv, &priv->pmap);
+ if (ret)
+ return ret;
- return brcm_avs_get_pmap(priv, &priv->pmap);
+ /*
+ * We can't use the P-state returned by brcm_avs_get_pmap(), since
+ * that's the initial P-state from when the P-map was downloaded to the
+ * AVS co-processor, not necessarily the P-state we are running at now.
+ * So, we get the current P-state explicitly.
+ */
+ return brcm_avs_get_pstate(priv, &priv->pmap.state);
}
static int brcm_avs_resume(struct cpufreq_policy *policy)
@@ -867,7 +878,6 @@ unmap_intr_base:
iounmap(priv->avs_intr_base);
unmap_base:
iounmap(priv->base);
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -954,9 +964,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
- return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+ return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
- mdiv_p3, mdiv_p4);
+ mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
}
static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
@@ -1031,7 +1041,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
priv = platform_get_drvdata(pdev);
iounmap(priv->base);
iounmap(priv->avs_intr_base);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 7fcaf26e8f81..921b4a6c3d16 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -87,8 +87,6 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "socionext,uniphier-ld11", },
{ .compatible = "socionext,uniphier-ld20", },
- { .compatible = "ti,am33xx", },
- { .compatible = "ti,dra7", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap3", },
{ .compatible = "ti,omap4", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 269013311e79..c943787d761e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -148,7 +148,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct private_data *priv;
struct device *cpu_dev;
struct clk *cpu_clk;
- struct dev_pm_opp *suspend_opp;
unsigned int transition_latency;
bool fallback = false;
const char *name;
@@ -252,11 +251,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = priv;
policy->clk = cpu_clk;
- rcu_read_lock();
- suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
- if (suspend_opp)
- policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
- rcu_read_unlock();
+ policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cc475eff90b3..a47543281864 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -132,7 +132,7 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
u64 cur_wall_time;
u64 busy_time;
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+ cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
@@ -143,9 +143,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = cputime_to_usecs(cur_wall_time);
+ *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
- return cputime_to_usecs(idle_time);
+ return div_u64(idle_time, NSEC_PER_USEC);
}
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
@@ -1078,15 +1078,11 @@ err_free_policy:
return NULL;
}
-static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
{
struct kobject *kobj;
struct completion *cmp;
- if (notify)
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_REMOVE_POLICY, policy);
-
down_write(&policy->rwsem);
cpufreq_stats_free_table(policy);
kobj = &policy->kobj;
@@ -1104,7 +1100,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
pr_debug("wait complete\n");
}
-static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
unsigned long flags;
int cpu;
@@ -1117,7 +1113,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- cpufreq_policy_put_kobj(policy, notify);
+ cpufreq_policy_put_kobj(policy);
free_cpumask_var(policy->real_cpus);
free_cpumask_var(policy->related_cpus);
free_cpumask_var(policy->cpus);
@@ -1170,8 +1166,6 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy) {
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
- /* Clear mask of registered CPUs */
- cpumask_clear(policy->real_cpus);
}
/*
@@ -1244,17 +1238,12 @@ static int cpufreq_online(unsigned int cpu)
goto out_exit_policy;
cpufreq_stats_create_table(policy);
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_CREATE_POLICY, policy);
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_START, policy);
-
ret = cpufreq_init_policy(policy);
if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
@@ -1282,7 +1271,7 @@ out_exit_policy:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
out_free_policy:
- cpufreq_policy_free(policy, !new_policy);
+ cpufreq_policy_free(policy);
return ret;
}
@@ -1403,7 +1392,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
remove_cpu_dev_symlink(policy, dev);
if (cpumask_empty(policy->real_cpus))
- cpufreq_policy_free(policy, true);
+ cpufreq_policy_free(policy);
}
/**
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 0196467280bd..631bd2c86c5e 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -152,7 +152,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
if (ignore_nice) {
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
+ idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
j_cdbs->prev_cpu_nice = cur_nice;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index ac284e66839c..f570ead62454 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -13,7 +13,6 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/cputime.h>
static DEFINE_SPINLOCK(cpufreq_stats_lock);
@@ -25,9 +24,7 @@ struct cpufreq_stats {
unsigned int last_index;
u64 *time_in_state;
unsigned int *freq_table;
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
unsigned int *trans_table;
-#endif
};
static int cpufreq_stats_update(struct cpufreq_stats *stats)
@@ -46,9 +43,7 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
unsigned int count = stats->max_state;
memset(stats->time_in_state, 0, count * sizeof(u64));
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
memset(stats->trans_table, 0, count * count * sizeof(int));
-#endif
stats->last_time = get_jiffies_64();
stats->total_trans = 0;
}
@@ -84,7 +79,6 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
return count;
}
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stats = policy->stats;
@@ -129,7 +123,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
return len;
}
cpufreq_freq_attr_ro(trans_table);
-#endif
cpufreq_freq_attr_ro(total_trans);
cpufreq_freq_attr_ro(time_in_state);
@@ -139,9 +132,7 @@ static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
&reset.attr,
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
&trans_table.attr,
-#endif
NULL
};
static struct attribute_group stats_attr_group = {
@@ -200,9 +191,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
alloc_size = count * sizeof(int) + count * sizeof(u64);
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
-#endif
/* Allocate memory for time_in_state/freq_table/trans_table in one go */
stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
@@ -211,9 +200,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->freq_table = (unsigned int *)(stats->time_in_state + count);
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
stats->trans_table = stats->freq_table + count;
-#endif
stats->max_state = count;
@@ -259,8 +246,6 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
cpufreq_stats_update(stats);
stats->last_index = new_index;
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
stats->trans_table[old_index * stats->max_state + new_index]++;
-#endif
stats->total_trans++;
}
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index c0f3373706f4..9180d34cc9fc 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -118,12 +118,10 @@ static int init_div_table(void)
unsigned int tmp, clk_div, ema_div, freq, volt_id;
struct dev_pm_opp *opp;
- rcu_read_lock();
cpufreq_for_each_entry(pos, freq_tbl) {
opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
pos->frequency * 1000, true);
if (IS_ERR(opp)) {
- rcu_read_unlock();
dev_err(dvfs_info->dev,
"failed to find valid OPP for %u KHZ\n",
pos->frequency);
@@ -140,6 +138,7 @@ static int init_div_table(void)
/* Calculate EMA */
volt_id = dev_pm_opp_get_voltage(opp);
+
volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
if (volt_id < PMIC_HIGH_VOLT) {
ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
@@ -157,9 +156,9 @@ static int init_div_table(void)
__raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 *
(pos - freq_tbl));
+ dev_pm_opp_put(opp);
}
- rcu_read_unlock();
return 0;
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index ef1fa8145419..7719b02e04f5 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -53,16 +53,15 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
freq_hz = new_freq * 1000;
old_freq = clk_get_rate(arm_clk) / 1000;
- rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
- rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
return PTR_ERR(opp);
}
volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
+
volt_old = regulator_get_voltage(arm_reg);
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
@@ -321,14 +320,15 @@ soc_opp_out:
* freq_table initialised from OPP is therefore sorted in the
* same order.
*/
- rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[--num].frequency * 1000, true);
max_volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
+
ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
if (ret > 0)
transition_latency += ret * 1000;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f91c25718d16..eb0f7fb71685 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -358,6 +358,8 @@ static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
static bool per_cpu_limits __read_mostly;
+static bool driver_registered __read_mostly;
+
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
@@ -394,6 +396,7 @@ static struct perf_limits *limits = &performance_limits;
static struct perf_limits *limits = &powersave_limits;
#endif
+static DEFINE_MUTEX(intel_pstate_driver_lock);
static DEFINE_MUTEX(intel_pstate_limits_lock);
#ifdef CONFIG_ACPI
@@ -538,7 +541,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
acpi_processor_unregister_performance(policy->cpu);
}
-
#else
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
@@ -873,7 +875,10 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
hw_min = HWP_LOWEST_PERF(cap);
- hw_max = HWP_HIGHEST_PERF(cap);
+ if (limits->no_turbo)
+ hw_max = HWP_GUARANTEED_PERF(cap);
+ else
+ hw_max = HWP_HIGHEST_PERF(cap);
range = hw_max - hw_min;
max_perf_pct = perf_limits->max_perf_pct;
@@ -887,11 +892,6 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
adj_range = max_perf_pct * range / 100;
max = hw_min + adj_range;
- if (limits->no_turbo) {
- hw_max = HWP_GUARANTEED_PERF(cap);
- if (hw_max < max)
- max = hw_max;
- }
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
@@ -1007,37 +1007,59 @@ static int pid_param_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
+static struct dentry *debugfs_parent;
+
struct pid_param {
char *name;
void *value;
+ struct dentry *dentry;
};
static struct pid_param pid_files[] = {
- {"sample_rate_ms", &pid_params.sample_rate_ms},
- {"d_gain_pct", &pid_params.d_gain_pct},
- {"i_gain_pct", &pid_params.i_gain_pct},
- {"deadband", &pid_params.deadband},
- {"setpoint", &pid_params.setpoint},
- {"p_gain_pct", &pid_params.p_gain_pct},
- {NULL, NULL}
+ {"sample_rate_ms", &pid_params.sample_rate_ms, },
+ {"d_gain_pct", &pid_params.d_gain_pct, },
+ {"i_gain_pct", &pid_params.i_gain_pct, },
+ {"deadband", &pid_params.deadband, },
+ {"setpoint", &pid_params.setpoint, },
+ {"p_gain_pct", &pid_params.p_gain_pct, },
+ {NULL, NULL, }
};
-static void __init intel_pstate_debug_expose_params(void)
+static void intel_pstate_debug_expose_params(void)
{
- struct dentry *debugfs_parent;
- int i = 0;
+ int i;
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
if (IS_ERR_OR_NULL(debugfs_parent))
return;
- while (pid_files[i].name) {
- debugfs_create_file(pid_files[i].name, 0660,
- debugfs_parent, pid_files[i].value,
- &fops_pid_param);
- i++;
+
+ for (i = 0; pid_files[i].name; i++) {
+ struct dentry *dentry;
+
+ dentry = debugfs_create_file(pid_files[i].name, 0660,
+ debugfs_parent, pid_files[i].value,
+ &fops_pid_param);
+ if (!IS_ERR(dentry))
+ pid_files[i].dentry = dentry;
}
}
+static void intel_pstate_debug_hide_params(void)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(debugfs_parent))
+ return;
+
+ for (i = 0; pid_files[i].name; i++) {
+ debugfs_remove(pid_files[i].dentry);
+ pid_files[i].dentry = NULL;
+ }
+
+ debugfs_remove(debugfs_parent);
+ debugfs_parent = NULL;
+}
+
/************************** debugfs end ************************/
/************************** sysfs begin ************************/
@@ -1048,6 +1070,34 @@ static void __init intel_pstate_debug_expose_params(void)
return sprintf(buf, "%u\n", limits->object); \
}
+static ssize_t intel_pstate_show_status(char *buf);
+static int intel_pstate_update_status(const char *buf, size_t size);
+
+static ssize_t show_status(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ ret = intel_pstate_show_status(buf);
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return ret;
+}
+
+static ssize_t store_status(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ char *p = memchr(buf, '\n', count);
+ int ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ ret = intel_pstate_update_status(buf, p ? p - buf : count);
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return ret < 0 ? ret : count;
+}
+
static ssize_t show_turbo_pct(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -1055,12 +1105,22 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
int total, no_turbo, turbo_pct;
uint32_t turbo_fp;
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!driver_registered) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -1070,8 +1130,18 @@ static ssize_t show_num_pstates(struct kobject *kobj,
struct cpudata *cpu;
int total;
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!driver_registered) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
return sprintf(buf, "%u\n", total);
}
@@ -1080,12 +1150,21 @@ static ssize_t show_no_turbo(struct kobject *kobj,
{
ssize_t ret;
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!driver_registered) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
update_turbo_state();
if (limits->turbo_disabled)
ret = sprintf(buf, "%u\n", limits->turbo_disabled);
else
ret = sprintf(buf, "%u\n", limits->no_turbo);
+ mutex_unlock(&intel_pstate_driver_lock);
+
return ret;
}
@@ -1099,12 +1178,20 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!driver_registered) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
mutex_lock(&intel_pstate_limits_lock);
update_turbo_state();
if (limits->turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock);
+ mutex_unlock(&intel_pstate_driver_lock);
return -EPERM;
}
@@ -1114,6 +1201,8 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_driver_lock);
+
return count;
}
@@ -1127,6 +1216,13 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!driver_registered) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
mutex_lock(&intel_pstate_limits_lock);
limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
@@ -1142,6 +1238,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_driver_lock);
+
return count;
}
@@ -1155,6 +1253,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!driver_registered) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
mutex_lock(&intel_pstate_limits_lock);
limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
@@ -1170,12 +1275,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_driver_lock);
+
return count;
}
show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);
+define_one_global_rw(status);
define_one_global_rw(no_turbo);
define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);
@@ -1183,6 +1291,7 @@ define_one_global_ro(turbo_pct);
define_one_global_ro(num_pstates);
static struct attribute *intel_pstate_attributes[] = {
+ &status.attr,
&no_turbo.attr,
&turbo_pct.attr,
&num_pstates.attr,
@@ -1235,6 +1344,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
}
+#define MSR_IA32_POWER_CTL_BIT_EE 19
+
+/* Disable energy efficiency optimization */
+static void intel_pstate_disable_ee(int cpu)
+{
+ u64 power_ctl;
+ int ret;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
+ if (ret)
+ return;
+
+ if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
+ pr_info("Disabling energy efficiency optimization\n");
+ power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+ wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
+ }
+}
+
static int atom_get_min_pstate(void)
{
u64 value;
@@ -1345,48 +1473,71 @@ static int core_get_max_pstate_physical(void)
return (value >> 8) & 0xFF;
}
+static int core_get_tdp_ratio(u64 plat_info)
+{
+ /* Check how many TDP levels present */
+ if (plat_info & 0x600000000) {
+ u64 tdp_ctrl;
+ u64 tdp_ratio;
+ int tdp_msr;
+ int err;
+
+ /* Get the TDP level (0, 1, 2) to get ratios */
+ err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+ if (err)
+ return err;
+
+ /* TDP MSR are continuous starting at 0x648 */
+ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
+ err = rdmsrl_safe(tdp_msr, &tdp_ratio);
+ if (err)
+ return err;
+
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl & 0x03)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
+ pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
+
+ return (int)tdp_ratio;
+ }
+
+ return -ENXIO;
+}
+
static int core_get_max_pstate(void)
{
u64 tar;
u64 plat_info;
int max_pstate;
+ int tdp_ratio;
int err;
rdmsrl(MSR_PLATFORM_INFO, plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
+ tdp_ratio = core_get_tdp_ratio(plat_info);
+ if (tdp_ratio <= 0)
+ return max_pstate;
+
+ if (hwp_active) {
+ /* Turbo activation ratio is not used on HWP platforms */
+ return tdp_ratio;
+ }
+
err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
if (!err) {
+ int tar_levels;
+
/* Do some sanity checking for safety */
- if (plat_info & 0x600000000) {
- u64 tdp_ctrl;
- u64 tdp_ratio;
- int tdp_msr;
-
- err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
- if (err)
- goto skip_tar;
-
- tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
- err = rdmsrl_safe(tdp_msr, &tdp_ratio);
- if (err)
- goto skip_tar;
-
- /* For level 1 and 2, bits[23:16] contain the ratio */
- if (tdp_ctrl)
- tdp_ratio >>= 16;
-
- tdp_ratio &= 0xff; /* ratios are only 8 bits long */
- if (tdp_ratio - 1 == tar) {
- max_pstate = tar;
- pr_debug("max_pstate=TAC %x\n", max_pstate);
- } else {
- goto skip_tar;
- }
+ tar_levels = tar & 0xff;
+ if (tdp_ratio - 1 == tar_levels) {
+ max_pstate = tar_levels;
+ pr_debug("max_pstate=TAC %x\n", max_pstate);
}
}
-skip_tar:
return max_pstate;
}
@@ -1845,6 +1996,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
{}
};
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
+ ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
+ {}
+};
+
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -1875,6 +2031,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->cpu = cpunum;
if (hwp_active) {
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+ if (id)
+ intel_pstate_disable_ee(cpunum);
+
intel_pstate_hwp_enable(cpu);
pid_params.sample_rate_ms = 50;
pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
@@ -2005,7 +2167,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits = &performance_limits;
perf_limits = limits;
}
- if (policy->max >= policy->cpuinfo.max_freq) {
+ if (policy->max >= policy->cpuinfo.max_freq &&
+ !limits->no_turbo) {
pr_debug("set performance\n");
intel_pstate_set_performance_limits(perf_limits);
goto out;
@@ -2041,12 +2204,37 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ struct perf_limits *perf_limits;
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
+ perf_limits = &performance_limits;
+ else
+ perf_limits = &powersave_limits;
+
+ update_turbo_state();
+ policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
+ perf_limits->no_turbo ?
+ cpu->pstate.max_freq :
+ cpu->pstate.turbo_freq;
+
cpufreq_verify_within_cpu_limits(policy);
if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
policy->policy != CPUFREQ_POLICY_PERFORMANCE)
return -EINVAL;
+ /* When per-CPU limits are used, sysfs limits are not used */
+ if (!per_cpu_limits) {
+ unsigned int max_freq, min_freq;
+
+ max_freq = policy->cpuinfo.max_freq *
+ limits->max_sysfs_pct / 100;
+ min_freq = policy->cpuinfo.max_freq *
+ limits->min_sysfs_pct / 100;
+ cpufreq_verify_within_limits(policy, min_freq, max_freq);
+ }
+
return 0;
}
@@ -2257,6 +2445,111 @@ static struct cpufreq_driver intel_cpufreq = {
static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;
+static void intel_pstate_driver_cleanup(void)
+{
+ unsigned int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_clear_update_util_hook(cpu);
+
+ kfree(all_cpu_data[cpu]);
+ all_cpu_data[cpu] = NULL;
+ }
+ }
+ put_online_cpus();
+}
+
+static int intel_pstate_register_driver(void)
+{
+ int ret;
+
+ ret = cpufreq_register_driver(intel_pstate_driver);
+ if (ret) {
+ intel_pstate_driver_cleanup();
+ return ret;
+ }
+
+ mutex_lock(&intel_pstate_limits_lock);
+ driver_registered = true;
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ if (intel_pstate_driver == &intel_pstate && !hwp_active &&
+ pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
+ intel_pstate_debug_expose_params();
+
+ return 0;
+}
+
+static int intel_pstate_unregister_driver(void)
+{
+ if (hwp_active)
+ return -EBUSY;
+
+ if (intel_pstate_driver == &intel_pstate && !hwp_active &&
+ pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
+ intel_pstate_debug_hide_params();
+
+ mutex_lock(&intel_pstate_limits_lock);
+ driver_registered = false;
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ cpufreq_unregister_driver(intel_pstate_driver);
+ intel_pstate_driver_cleanup();
+
+ return 0;
+}
+
+static ssize_t intel_pstate_show_status(char *buf)
+{
+ if (!driver_registered)
+ return sprintf(buf, "off\n");
+
+ return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
+ "active" : "passive");
+}
+
+static int intel_pstate_update_status(const char *buf, size_t size)
+{
+ int ret;
+
+ if (size == 3 && !strncmp(buf, "off", size))
+ return driver_registered ?
+ intel_pstate_unregister_driver() : -EINVAL;
+
+ if (size == 6 && !strncmp(buf, "active", size)) {
+ if (driver_registered) {
+ if (intel_pstate_driver == &intel_pstate)
+ return 0;
+
+ ret = intel_pstate_unregister_driver();
+ if (ret)
+ return ret;
+ }
+
+ intel_pstate_driver = &intel_pstate;
+ return intel_pstate_register_driver();
+ }
+
+ if (size == 7 && !strncmp(buf, "passive", size)) {
+ if (driver_registered) {
+ if (intel_pstate_driver != &intel_pstate)
+ return 0;
+
+ ret = intel_pstate_unregister_driver();
+ if (ret)
+ return ret;
+ }
+
+ intel_pstate_driver = &intel_cpufreq;
+ return intel_pstate_register_driver();
+ }
+
+ return -EINVAL;
+}
+
static int no_load __initdata;
static int no_hwp __initdata;
static int hwp_only __initdata;
@@ -2444,9 +2737,9 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
static int __init intel_pstate_init(void)
{
- int cpu, rc = 0;
const struct x86_cpu_id *id;
struct cpu_defaults *cpu_def;
+ int rc = 0;
if (no_load)
return -ENODEV;
@@ -2478,45 +2771,29 @@ hwp_cpu_matched:
if (intel_pstate_platform_pwr_mgmt_exists())
return -ENODEV;
+ if (!hwp_active && hwp_only)
+ return -ENOTSUPP;
+
pr_info("Intel P-state driver initializing\n");
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
return -ENOMEM;
- if (!hwp_active && hwp_only)
- goto out;
-
intel_pstate_request_control_from_smm();
- rc = cpufreq_register_driver(intel_pstate_driver);
- if (rc)
- goto out;
-
- if (intel_pstate_driver == &intel_pstate && !hwp_active &&
- pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
- intel_pstate_debug_expose_params();
-
intel_pstate_sysfs_expose_params();
+ mutex_lock(&intel_pstate_driver_lock);
+ rc = intel_pstate_register_driver();
+ mutex_unlock(&intel_pstate_driver_lock);
+ if (rc)
+ return rc;
+
if (hwp_active)
pr_info("HWP enabled\n");
- return rc;
-out:
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (all_cpu_data[cpu]) {
- if (intel_pstate_driver == &intel_pstate)
- intel_pstate_clear_update_util_hook(cpu);
-
- kfree(all_cpu_data[cpu]);
- }
- }
-
- put_online_cpus();
- vfree(all_cpu_data);
- return -ENODEV;
+ return 0;
}
device_initcall(intel_pstate_init);
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 643f43179df1..ab25b1235a5e 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -232,16 +232,14 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
freq_hz = freq_table[index].frequency * 1000;
- rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
- rcu_read_unlock();
pr_err("cpu%d: failed to find OPP for %ld\n",
policy->cpu, freq_hz);
return PTR_ERR(opp);
}
vproc = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
/*
* If the new voltage or the intermediate voltage is higher than the
@@ -411,16 +409,14 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
/* Search a safe voltage for intermediate frequency. */
rate = clk_get_rate(inter_clk);
- rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
if (IS_ERR(opp)) {
- rcu_read_unlock();
pr_err("failed to get intermediate opp for cpu%d\n", cpu);
ret = PTR_ERR(opp);
goto out_free_opp_table;
}
info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
info->cpu_dev = cpu_dev;
info->proc_reg = proc_reg;
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 376e63ca94e8..71e81bbf031b 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -63,16 +63,14 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
freq = ret;
if (mpu_reg) {
- rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
- rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
__func__, new_freq);
return -EINVAL;
}
volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 37671b545880..3ff5160451b4 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -144,6 +144,7 @@ static struct powernv_pstate_info {
unsigned int max;
unsigned int nominal;
unsigned int nr_pstates;
+ bool wof_enabled;
} powernv_pstate_info;
/* Use following macros for conversions between pstate_id and index */
@@ -203,6 +204,7 @@ static int init_powernv_pstates(void)
const __be32 *pstate_ids, *pstate_freqs;
u32 len_ids, len_freqs;
u32 pstate_min, pstate_max, pstate_nominal;
+ u32 pstate_turbo, pstate_ultra_turbo;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
@@ -225,8 +227,29 @@ static int init_powernv_pstates(void)
pr_warn("ibm,pstate-nominal not found\n");
return -ENODEV;
}
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
+ &pstate_ultra_turbo)) {
+ powernv_pstate_info.wof_enabled = false;
+ goto next;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
+ &pstate_turbo)) {
+ powernv_pstate_info.wof_enabled = false;
+ goto next;
+ }
+
+ if (pstate_turbo == pstate_ultra_turbo)
+ powernv_pstate_info.wof_enabled = false;
+ else
+ powernv_pstate_info.wof_enabled = true;
+
+next:
pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
pstate_nominal, pstate_max);
+ pr_info("Workload Optimized Frequency is %s in the platform\n",
+ (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
@@ -268,6 +291,13 @@ static int init_powernv_pstates(void)
powernv_pstate_info.nominal = i;
else if (id == pstate_min)
powernv_pstate_info.min = i;
+
+ if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
+ int j;
+
+ for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
+ powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
+ }
}
/* End of list marker entry */
@@ -305,9 +335,12 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
+#define SCALING_BOOST_FREQS_ATTR_INDEX 2
+
static struct freq_attr *powernv_cpu_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
+ &cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
@@ -1013,11 +1046,22 @@ static int __init powernv_cpufreq_init(void)
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
+ if (powernv_pstate_info.wof_enabled)
+ powernv_cpufreq_driver.boost_enabled = true;
+ else
+ powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
- if (!rc)
- return 0;
+ if (rc) {
+ pr_info("Failed to register the cpufreq driver (%d)\n", rc);
+ goto cleanup_notifiers;
+ }
- pr_info("Failed to register the cpufreq driver (%d)\n", rc);
+ if (powernv_pstate_info.wof_enabled)
+ cpufreq_enable_boost_support();
+
+ return 0;
+cleanup_notifiers:
unregister_all_notifiers();
clean_chip_info();
out:
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index dc112481a408..eeaa92251512 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -100,9 +100,6 @@ static int pmi_notifier(struct notifier_block *nb,
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
* policy events?)
*/
- if (event == CPUFREQ_START)
- return 0;
-
node = cbe_cpu_to_node(policy->cpu);
pr_debug("got notified, event=%lu, node=%u\n", event, node);
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 53d8c3fb16f6..a6fefac8afe4 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/errno.h>
@@ -37,53 +38,20 @@ struct cpu_data {
struct thermal_cooling_device *cdev;
};
+/*
+ * Don't use cpufreq on this SoC -- used when the SoC would have otherwise
+ * matched a more generic compatible.
+ */
+#define SOC_BLACKLIST 1
+
/**
* struct soc_data - SoC specific data
- * @freq_mask: mask the disallowed frequencies
- * @flag: unique flags
+ * @flags: SOC_xxx
*/
struct soc_data {
- u32 freq_mask[4];
- u32 flag;
-};
-
-#define FREQ_MASK 1
-/* see hardware specification for the allowed frqeuencies */
-static const struct soc_data sdata[] = {
- { /* used by p2041 and p3041 */
- .freq_mask = {0x8, 0x8, 0x2, 0x2},
- .flag = FREQ_MASK,
- },
- { /* used by p5020 */
- .freq_mask = {0x8, 0x2},
- .flag = FREQ_MASK,
- },
- { /* used by p4080, p5040 */
- .freq_mask = {0},
- .flag = 0,
- },
+ u32 flags;
};
-/*
- * the minimum allowed core frequency, in Hz
- * for chassis v1.0, >= platform frequency
- * for chassis v2.0, >= platform frequency / 2
- */
-static u32 min_cpufreq;
-static const u32 *fmask;
-
-#if defined(CONFIG_ARM)
-static int get_cpu_physical_id(int cpu)
-{
- return topology_core_id(cpu);
-}
-#else
-static int get_cpu_physical_id(int cpu)
-{
- return get_hard_smp_processor_id(cpu);
-}
-#endif
-
static u32 get_bus_freq(void)
{
struct device_node *soc;
@@ -101,9 +69,10 @@ static u32 get_bus_freq(void)
return sysfreq;
}
-static struct device_node *cpu_to_clk_node(int cpu)
+static struct clk *cpu_to_clk(int cpu)
{
- struct device_node *np, *clk_np;
+ struct device_node *np;
+ struct clk *clk;
if (!cpu_present(cpu))
return NULL;
@@ -112,37 +81,28 @@ static struct device_node *cpu_to_clk_node(int cpu)
if (!np)
return NULL;
- clk_np = of_parse_phandle(np, "clocks", 0);
- if (!clk_np)
- return NULL;
-
+ clk = of_clk_get(np, 0);
of_node_put(np);
-
- return clk_np;
+ return clk;
}
/* traverse cpu nodes to get cpu mask of sharing clock wire */
static void set_affected_cpus(struct cpufreq_policy *policy)
{
- struct device_node *np, *clk_np;
struct cpumask *dstp = policy->cpus;
+ struct clk *clk;
int i;
- np = cpu_to_clk_node(policy->cpu);
- if (!np)
- return;
-
for_each_present_cpu(i) {
- clk_np = cpu_to_clk_node(i);
- if (!clk_np)
+ clk = cpu_to_clk(i);
+ if (IS_ERR(clk)) {
+ pr_err("%s: no clock for cpu %d\n", __func__, i);
continue;
+ }
- if (clk_np == np)
+ if (clk_is_match(policy->clk, clk))
cpumask_set_cpu(i, dstp);
-
- of_node_put(clk_np);
}
- of_node_put(np);
}
/* reduce the duplicated frequencies in frequency table */
@@ -198,10 +158,11 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- struct device_node *np, *pnode;
+ struct device_node *np;
int i, count, ret;
- u32 freq, mask;
+ u32 freq;
struct clk *clk;
+ const struct clk_hw *hwclk;
struct cpufreq_frequency_table *table;
struct cpu_data *data;
unsigned int cpu = policy->cpu;
@@ -221,17 +182,13 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_nomem2;
}
- pnode = of_parse_phandle(np, "clocks", 0);
- if (!pnode) {
- pr_err("%s: could not get clock information\n", __func__);
- goto err_nomem2;
- }
+ hwclk = __clk_get_hw(policy->clk);
+ count = clk_hw_get_num_parents(hwclk);
- count = of_property_count_strings(pnode, "clock-names");
data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
if (!data->pclk) {
pr_err("%s: no memory\n", __func__);
- goto err_node;
+ goto err_nomem2;
}
table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
@@ -240,23 +197,11 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_pclk;
}
- if (fmask)
- mask = fmask[get_cpu_physical_id(cpu)];
- else
- mask = 0x0;
-
for (i = 0; i < count; i++) {
- clk = of_clk_get(pnode, i);
+ clk = clk_hw_get_parent_by_index(hwclk, i)->clk;
data->pclk[i] = clk;
freq = clk_get_rate(clk);
- /*
- * the clock is valid if its frequency is not masked
- * and large than minimum allowed frequency.
- */
- if (freq < min_cpufreq || (mask & (1 << i)))
- table[i].frequency = CPUFREQ_ENTRY_INVALID;
- else
- table[i].frequency = freq / 1000;
+ table[i].frequency = freq / 1000;
table[i].driver_data = i;
}
freq_table_redup(table, count);
@@ -282,7 +227,6 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = u64temp + 1;
of_node_put(np);
- of_node_put(pnode);
return 0;
@@ -290,10 +234,7 @@ err_nomem1:
kfree(table);
err_pclk:
kfree(data->pclk);
-err_node:
- of_node_put(pnode);
err_nomem2:
- policy->driver_data = NULL;
kfree(data);
err_np:
of_node_put(np);
@@ -357,12 +298,25 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
+static const struct soc_data blacklist = {
+ .flags = SOC_BLACKLIST,
+};
+
static const struct of_device_id node_matches[] __initconst = {
- { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
- { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
- { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
- { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], },
- { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], },
+ /* e6500 cannot use cpufreq due to erratum A-008083 */
+ { .compatible = "fsl,b4420-clockgen", &blacklist },
+ { .compatible = "fsl,b4860-clockgen", &blacklist },
+ { .compatible = "fsl,t2080-clockgen", &blacklist },
+ { .compatible = "fsl,t4240-clockgen", &blacklist },
+
+ { .compatible = "fsl,ls1012a-clockgen", },
+ { .compatible = "fsl,ls1021a-clockgen", },
+ { .compatible = "fsl,ls1043a-clockgen", },
+ { .compatible = "fsl,ls1046a-clockgen", },
+ { .compatible = "fsl,ls1088a-clockgen", },
+ { .compatible = "fsl,ls2080a-clockgen", },
+ { .compatible = "fsl,p4080-clockgen", },
+ { .compatible = "fsl,qoriq-clockgen-1.0", },
{ .compatible = "fsl,qoriq-clockgen-2.0", },
{}
};
@@ -380,16 +334,12 @@ static int __init qoriq_cpufreq_init(void)
match = of_match_node(node_matches, np);
data = match->data;
- if (data) {
- if (data->flag)
- fmask = data->freq_mask;
- min_cpufreq = get_bus_freq();
- } else {
- min_cpufreq = get_bus_freq() / 2;
- }
of_node_put(np);
+ if (data && data->flags & SOC_BLACKLIST)
+ return -ENODEV;
+
ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
if (!ret)
pr_info("Freescale QorIQ CPU frequency scaling driver\n");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index d6d425773fa4..5b2db3c6568f 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
rate = clk_get_rate(s3c_freq->hclk);
if (rate < 133 * 1000 * 1000) {
pr_err("cpufreq: HCLK not at 133MHz\n");
- clk_put(s3c_freq->hclk);
ret = -EINVAL;
goto err_armclk;
}
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index b366e6d830ea..a7db9011d5fe 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -160,6 +160,7 @@ static int sti_cpufreq_set_opp_info(void)
int pcode, substrate, major, minor;
int ret;
char name[MAX_PCODE_NAME_LEN];
+ struct opp_table *opp_table;
reg_fields = sti_cpufreq_match();
if (!reg_fields) {
@@ -211,20 +212,20 @@ use_defaults:
snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode);
- ret = dev_pm_opp_set_prop_name(dev, name);
- if (ret) {
+ opp_table = dev_pm_opp_set_prop_name(dev, name);
+ if (IS_ERR(opp_table)) {
dev_err(dev, "Failed to set prop name\n");
- return ret;
+ return PTR_ERR(opp_table);
}
version[0] = BIT(major);
version[1] = BIT(minor);
version[2] = BIT(substrate);
- ret = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS);
- if (ret) {
+ opp_table = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS);
+ if (IS_ERR(opp_table)) {
dev_err(dev, "Failed to set supported hardware\n");
- return ret;
+ return PTR_ERR(opp_table);
}
dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n",
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
new file mode 100644
index 000000000000..a7b5658c0460
--- /dev/null
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -0,0 +1,268 @@
+/*
+ * TI CPUFreq/OPP hw-supported driver
+ *
+ * Copyright (C) 2016-2017 Texas Instruments, Inc.
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define REVISION_MASK 0xF
+#define REVISION_SHIFT 28
+
+#define AM33XX_800M_ARM_MPU_MAX_FREQ 0x1E2F
+#define AM43XX_600M_ARM_MPU_MAX_FREQ 0xFFA
+
+#define DRA7_EFUSE_HAS_OD_MPU_OPP 11
+#define DRA7_EFUSE_HAS_HIGH_MPU_OPP 15
+#define DRA7_EFUSE_HAS_ALL_MPU_OPP 23
+
+#define DRA7_EFUSE_NOM_MPU_OPP BIT(0)
+#define DRA7_EFUSE_OD_MPU_OPP BIT(1)
+#define DRA7_EFUSE_HIGH_MPU_OPP BIT(2)
+
+#define VERSION_COUNT 2
+
+struct ti_cpufreq_data;
+
+struct ti_cpufreq_soc_data {
+ unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse);
+ unsigned long efuse_fallback;
+ unsigned long efuse_offset;
+ unsigned long efuse_mask;
+ unsigned long efuse_shift;
+ unsigned long rev_offset;
+};
+
+struct ti_cpufreq_data {
+ struct device *cpu_dev;
+ struct device_node *opp_node;
+ struct regmap *syscon;
+ const struct ti_cpufreq_soc_data *soc_data;
+};
+
+static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ if (!efuse)
+ efuse = opp_data->soc_data->efuse_fallback;
+ /* AM335x and AM437x use "OPP disable" bits, so invert */
+ return ~efuse;
+}
+
+static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = DRA7_EFUSE_NOM_MPU_OPP;
+
+ /*
+ * The efuse on dra7 and am57 parts contains a specific
+ * value indicating the highest available OPP.
+ */
+
+ switch (efuse) {
+ case DRA7_EFUSE_HAS_ALL_MPU_OPP:
+ case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
+ calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
+ case DRA7_EFUSE_HAS_OD_MPU_OPP:
+ calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
+static struct ti_cpufreq_soc_data am3x_soc_data = {
+ .efuse_xlate = amx3_efuse_xlate,
+ .efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
+ .efuse_offset = 0x07fc,
+ .efuse_mask = 0x1fff,
+ .rev_offset = 0x600,
+};
+
+static struct ti_cpufreq_soc_data am4x_soc_data = {
+ .efuse_xlate = amx3_efuse_xlate,
+ .efuse_fallback = AM43XX_600M_ARM_MPU_MAX_FREQ,
+ .efuse_offset = 0x0610,
+ .efuse_mask = 0x3f,
+ .rev_offset = 0x600,
+};
+
+static struct ti_cpufreq_soc_data dra7_soc_data = {
+ .efuse_xlate = dra7_efuse_xlate,
+ .efuse_offset = 0x020c,
+ .efuse_mask = 0xf80000,
+ .efuse_shift = 19,
+ .rev_offset = 0x204,
+};
+
+/**
+ * ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * @opp_data: pointer to ti_cpufreq_data context
+ * @efuse_value: Set to the value parsed from efuse
+ *
+ * Returns error code if efuse not read properly.
+ */
+static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
+ u32 *efuse_value)
+{
+ struct device *dev = opp_data->cpu_dev;
+ u32 efuse;
+ int ret;
+
+ ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
+ &efuse);
+ if (ret) {
+ dev_err(dev,
+ "Failed to read the efuse value from syscon: %d\n",
+ ret);
+ return ret;
+ }
+
+ efuse = (efuse & opp_data->soc_data->efuse_mask);
+ efuse >>= opp_data->soc_data->efuse_shift;
+
+ *efuse_value = opp_data->soc_data->efuse_xlate(opp_data, efuse);
+
+ return 0;
+}
+
+/**
+ * ti_cpufreq_get_rev() - Parse and return rev value present on SoC
+ * @opp_data: pointer to ti_cpufreq_data context
+ * @revision_value: Set to the value parsed from revision register
+ *
+ * Returns error code if revision not read properly.
+ */
+static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
+ u32 *revision_value)
+{
+ struct device *dev = opp_data->cpu_dev;
+ u32 revision;
+ int ret;
+
+ ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
+ &revision);
+ if (ret) {
+ dev_err(dev,
+ "Failed to read the revision number from syscon: %d\n",
+ ret);
+ return ret;
+ }
+
+ *revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK);
+
+ return 0;
+}
+
+static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
+{
+ struct device *dev = opp_data->cpu_dev;
+ struct device_node *np = opp_data->opp_node;
+
+ opp_data->syscon = syscon_regmap_lookup_by_phandle(np,
+ "syscon");
+ if (IS_ERR(opp_data->syscon)) {
+ dev_err(dev,
+ "\"syscon\" is missing, cannot use OPPv2 table.\n");
+ return PTR_ERR(opp_data->syscon);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ti_cpufreq_of_match[] = {
+ { .compatible = "ti,am33xx", .data = &am3x_soc_data, },
+ { .compatible = "ti,am4372", .data = &am4x_soc_data, },
+ { .compatible = "ti,dra7", .data = &dra7_soc_data },
+ {},
+};
+
+static int ti_cpufreq_init(void)
+{
+ u32 version[VERSION_COUNT];
+ struct device_node *np;
+ const struct of_device_id *match;
+ struct ti_cpufreq_data *opp_data;
+ int ret;
+
+ np = of_find_node_by_path("/");
+ match = of_match_node(ti_cpufreq_of_match, np);
+ if (!match)
+ return -ENODEV;
+
+ opp_data = kzalloc(sizeof(*opp_data), GFP_KERNEL);
+ if (!opp_data)
+ return -ENOMEM;
+
+ opp_data->soc_data = match->data;
+
+ opp_data->cpu_dev = get_cpu_device(0);
+ if (!opp_data->cpu_dev) {
+ pr_err("%s: Failed to get device for CPU0\n", __func__);
+ return -ENODEV;
+ }
+
+ opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
+ if (!opp_data->opp_node) {
+ dev_info(opp_data->cpu_dev,
+ "OPP-v2 not supported, cpufreq-dt will attempt to use legacy tables.\n");
+ goto register_cpufreq_dt;
+ }
+
+ ret = ti_cpufreq_setup_syscon_register(opp_data);
+ if (ret)
+ goto fail_put_node;
+
+ /*
+ * OPPs determine whether or not they are supported based on
+ * two metrics:
+ * 0 - SoC Revision
+ * 1 - eFuse value
+ */
+ ret = ti_cpufreq_get_rev(opp_data, &version[0]);
+ if (ret)
+ goto fail_put_node;
+
+ ret = ti_cpufreq_get_efuse(opp_data, &version[1]);
+ if (ret)
+ goto fail_put_node;
+
+ of_node_put(opp_data->opp_node);
+
+ ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+ version, VERSION_COUNT));
+ if (ret) {
+ dev_err(opp_data->cpu_dev,
+ "Failed to set supported hardware\n");
+ goto fail_put_node;
+ }
+
+register_cpufreq_dt:
+ platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return 0;
+
+fail_put_node:
+ of_node_put(opp_data->opp_node);
+
+ return ret;
+}
+device_initcall(ti_cpufreq_init);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index d9b5b9398a0f..8d6d25c38c02 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,6 +19,7 @@
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/math64.h>
+#include <linux/cpu.h>
/*
* Please note when changing the tuning values:
@@ -280,17 +281,23 @@ again:
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
+ struct device *device = get_cpu_device(dev->cpu);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
unsigned int interactivity_req;
unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
+ int resume_latency = dev_pm_qos_read_value(device);
if (data->needs_update) {
menu_update(drv, dev);
data->needs_update = 0;
}
+ /* resume_latency is 0 means no restriction */
+ if (resume_latency && resume_latency < latency_req)
+ latency_req = resume_latency;
+
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0))
return 0;
@@ -357,9 +364,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->disabled || su->disable)
continue;
if (s->target_residency > data->predicted_us)
- continue;
+ break;
if (s->exit_latency > latency_req)
- continue;
+ break;
data->last_state_idx = i;
}
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index e2ce8190ecc9..612898b4aaad 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
static void ccp5_config(struct ccp_device *ccp)
{
/* Public side */
- iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+ iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
}
static void ccp5other_config(struct ccp_device *ccp)
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 830f35e6005f..649e5610a5ce 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -238,6 +238,7 @@ struct ccp_dma_chan {
struct ccp_device *ccp;
spinlock_t lock;
+ struct list_head created;
struct list_head pending;
struct list_head active;
struct list_head complete;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 6553912804f7..e5d9278f4019 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
ccp_free_desc_resources(chan->ccp, &chan->complete);
ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending);
+ ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
spin_lock_irqsave(&chan->lock, flags);
cookie = dma_cookie_assign(tx_desc);
+ list_del(&desc->entry);
list_add_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
spin_lock_irqsave(&chan->lock, sflags);
- list_add_tail(&desc->entry, &chan->pending);
+ list_add_tail(&desc->entry, &chan->created);
spin_unlock_irqrestore(&chan->lock, sflags);
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
/*TODO: Purge the complete list? */
ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending);
+ ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
chan->ccp = ccp;
spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->created);
INIT_LIST_HEAD(&chan->pending);
INIT_LIST_HEAD(&chan->active);
INIT_LIST_HEAD(&chan->complete);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 2ed1e24b44a8..b4b78b37f8a6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
case CRYPTO_ALG_TYPE_AEAD:
ctx_req.req.aead_req = (struct aead_request *)req;
ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
- dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.reqctx->skb) {
kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param;
- struct scatterlist *src, *dst;
- struct scatterlist src_sg[2], dst_sg[2];
+ struct scatterlist *src;
unsigned int frags = 0, transhdr_len;
unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
unsigned int kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err;
- src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
- dst = src;
+ src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+ reqctx->dst = src;
+
if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx);
if (err)
return ERR_PTR(err);
- dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+ req->assoclen);
}
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
null = 1;
assoclen = 0;
}
- reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) {
pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
sg_param.align = 0;
- if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param))
goto dstmap_fail;
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param;
- struct scatterlist *src, *dst;
- struct scatterlist src_sg[2], dst_sg[2];
+ struct scatterlist *src;
unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
unsigned int dst_size = 0, kctx_len;
unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err;
sub_type = get_aead_subtype(tfm);
- src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
- dst = src;
+ src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+ reqctx->dst = src;
+
if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx);
if (err) {
pr_err("AAD copy to destination buffer fails\n");
return ERR_PTR(err);
}
- dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+ req->assoclen);
}
- reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) {
pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
sg_param.align = 0;
- if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param))
goto dstmap_fail;
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param;
- struct scatterlist *src, *dst;
- struct scatterlist src_sg[2], dst_sg[2];
+ struct scatterlist *src;
unsigned int frags = 0, transhdr_len;
unsigned int ivsize = AES_BLOCK_SIZE;
unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err;
- src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
- dst = src;
+ src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+ reqctx->dst = src;
if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx);
if (err)
return ERR_PTR(err);
- dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+ req->assoclen);
}
if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
crypt_len = AES_BLOCK_SIZE;
else
crypt_len = req->cryptlen;
- reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) {
pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
sg_param.align = 0;
- if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param))
goto dstmap_fail;
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
write_sg_to_skb(skb, &frags, src, req->cryptlen);
} else {
aes_gcm_empty_pld_pad(req->dst, authsize - 1);
- write_sg_to_skb(skb, &frags, dst, crypt_len);
+ write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
+
}
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ck_size;
int ret = 0, key_ctx_size = 0;
- if (get_aead_subtype(aead) ==
- CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+ keylen > 3) {
keylen -= 4; /* nonce/salt is present in the last 4 bytes */
memcpy(aeadctx->salt, key + keylen, 4);
}
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 918da8e6e2d8..1c65f07e1cc9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
int assign_chcr_device(struct chcr_dev **dev)
{
struct uld_ctx *u_ctx;
+ int ret = -ENXIO;
/*
* Which device to use if multiple devices are available TODO
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
* must go to the same device to maintain the ordering.
*/
mutex_lock(&dev_mutex); /* TODO ? */
- u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
- if (!u_ctx) {
- mutex_unlock(&dev_mutex);
- return -ENXIO;
+ list_for_each_entry(u_ctx, &uld_ctx_list, entry)
+ if (u_ctx && u_ctx->dev) {
+ *dev = u_ctx->dev;
+ ret = 0;
+ break;
}
-
- *dev = u_ctx->dev;
mutex_unlock(&dev_mutex);
- return 0;
+ return ret;
}
static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
- if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
+ if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
pr_err("ULD register fail: No chcr crypto support in cxgb4");
- return -1;
- }
return 0;
}
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d5af7d64a763..7ec0a8f12475 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -158,6 +158,9 @@ struct ablk_ctx {
};
struct chcr_aead_reqctx {
struct sk_buff *skb;
+ struct scatterlist *dst;
+ struct scatterlist srcffwd[2];
+ struct scatterlist dstffwd[2];
short int dst_nents;
u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index bc5cbc193aae..5b2d78a5b5aa 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
- i = 0;
+ i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index e8822536530b..33f0a6251e38 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -69,6 +69,7 @@
#define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 1e480f140663..8c4fd255a601 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
unsigned int csr_val;
int times = 30;
- if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
+ if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
return 0;
csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
LOCAL_TO_XFER_REG_OFFSET);
handle->pci_dev = pci_info->pci_dev;
- if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
+ if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr;
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 9aea2c7ecbe6..8648b32ebc89 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -306,7 +306,7 @@ struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc)
{
struct devfreq_event_dev *edev;
- static atomic_t event_no = ATOMIC_INIT(0);
+ static atomic_t event_no = ATOMIC_INIT(-1);
int ret;
if (!dev || !desc)
@@ -329,7 +329,7 @@ struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
edev->dev.class = devfreq_event_class;
edev->dev.release = devfreq_event_release_edev;
- dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
+ dev_set_name(&edev->dev, "event%d", atomic_inc_return(&event_no));
ret = device_register(&edev->dev);
if (ret < 0) {
put_device(&edev->dev);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 47206a21bb90..551a271353d2 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -111,18 +111,16 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
return;
}
- rcu_read_lock();
for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
if (IS_ERR(opp)) {
devm_kfree(devfreq->dev.parent, profile->freq_table);
profile->max_state = 0;
- rcu_read_unlock();
return;
}
+ dev_pm_opp_put(opp);
profile->freq_table[i] = freq;
}
- rcu_read_unlock();
}
/**
@@ -130,7 +128,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
* @devfreq: the devfreq instance
* @freq: the update target frequency
*/
-static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
int lev, prev_lev, ret = 0;
unsigned long cur_time;
@@ -166,6 +164,7 @@ out:
devfreq->last_stat_updated = cur_time;
return ret;
}
+EXPORT_SYMBOL(devfreq_update_status);
/**
* find_devfreq_governor() - find devfreq governor from name
@@ -474,11 +473,15 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
}
/**
- * _remove_devfreq() - Remove devfreq from the list and release its resources.
- * @devfreq: the devfreq struct
+ * devfreq_dev_release() - Callback for struct device to release the device.
+ * @dev: the devfreq device
+ *
+ * Remove devfreq from the list and release its resources.
*/
-static void _remove_devfreq(struct devfreq *devfreq)
+static void devfreq_dev_release(struct device *dev)
{
+ struct devfreq *devfreq = to_devfreq(dev);
+
mutex_lock(&devfreq_list_lock);
if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
mutex_unlock(&devfreq_list_lock);
@@ -500,19 +503,6 @@ static void _remove_devfreq(struct devfreq *devfreq)
}
/**
- * devfreq_dev_release() - Callback for struct device to release the device.
- * @dev: the devfreq device
- *
- * This calls _remove_devfreq() if _remove_devfreq() is not called.
- */
-static void devfreq_dev_release(struct device *dev)
-{
- struct devfreq *devfreq = to_devfreq(dev);
-
- _remove_devfreq(devfreq);
-}
-
-/**
* devfreq_add_device() - Add devfreq feature to the device
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
@@ -527,6 +517,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
+ static atomic_t devfreq_no = ATOMIC_INIT(-1);
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -538,15 +529,14 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq = find_device_devfreq(dev);
mutex_unlock(&devfreq_list_lock);
if (!IS_ERR(devfreq)) {
- dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+ dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+ __func__);
err = -EINVAL;
goto err_out;
}
devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
if (!devfreq) {
- dev_err(dev, "%s: Unable to create devfreq for the device\n",
- __func__);
err = -ENOMEM;
goto err_out;
}
@@ -569,18 +559,21 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
}
- dev_set_name(&devfreq->dev, "%s", dev_name(dev));
+ dev_set_name(&devfreq->dev, "devfreq%d",
+ atomic_inc_return(&devfreq_no));
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
goto err_out;
}
- devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ sizeof(unsigned int) *
devfreq->profile->max_state *
devfreq->profile->max_state,
GFP_KERNEL);
- devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
+ devfreq->time_in_state = devm_kzalloc(&devfreq->dev,
+ sizeof(unsigned long) *
devfreq->profile->max_state,
GFP_KERNEL);
devfreq->last_stat_updated = jiffies;
@@ -939,6 +932,9 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
+ } else if (df->governor->immutable || governor->immutable) {
+ ret = -EINVAL;
+ goto out;
}
if (df->governor) {
@@ -968,13 +964,33 @@ static ssize_t available_governors_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct devfreq_governor *tmp_governor;
+ struct devfreq *df = to_devfreq(d);
ssize_t count = 0;
mutex_lock(&devfreq_list_lock);
- list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
- count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
- "%s ", tmp_governor->name);
+
+ /*
+ * The devfreq with immutable governor (e.g., passive) shows
+ * only own governor.
+ */
+ if (df->governor->immutable) {
+ count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
+ "%s ", df->governor_name);
+ /*
+ * The devfreq device shows the registered governor except for
+ * immutable governors such as passive governor .
+ */
+ } else {
+ struct devfreq_governor *governor;
+
+ list_for_each_entry(governor, &devfreq_governor_list, node) {
+ if (governor->immutable)
+ continue;
+ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+ "%s ", governor->name);
+ }
+ }
+
mutex_unlock(&devfreq_list_lock);
/* Truncate the trailing space */
@@ -995,7 +1011,7 @@ static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
if (devfreq->profile->get_cur_freq &&
!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
- return sprintf(buf, "%lu\n", freq);
+ return sprintf(buf, "%lu\n", freq);
return sprintf(buf, "%lu\n", devfreq->previous_freq);
}
@@ -1112,17 +1128,16 @@ static ssize_t available_frequencies_show(struct device *d,
ssize_t count = 0;
unsigned long freq = 0;
- rcu_read_lock();
do {
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
break;
+ dev_pm_opp_put(opp);
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
"%lu ", freq);
freq++;
} while (1);
- rcu_read_unlock();
/* Truncate the trailing space */
if (count)
@@ -1224,11 +1239,8 @@ subsys_initcall(devfreq_init);
* @freq: The frequency given to target function
* @flags: Flags handed from devfreq framework.
*
- * Locking: This function must be called under rcu_read_lock(). opp is a rcu
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
- * under the locked area. The pointer returned must be used prior to unlocking
- * with rcu_read_unlock() to maintain the integrity of the pointer.
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
*/
struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq,
@@ -1265,18 +1277,7 @@ EXPORT_SYMBOL(devfreq_recommended_opp);
*/
int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh;
- int ret = 0;
-
- rcu_read_lock();
- nh = dev_pm_opp_get_notifier(dev);
- if (IS_ERR(nh))
- ret = PTR_ERR(nh);
- rcu_read_unlock();
- if (!ret)
- ret = srcu_notifier_chain_register(nh, &devfreq->nb);
-
- return ret;
+ return dev_pm_opp_register_notifier(dev, &devfreq->nb);
}
EXPORT_SYMBOL(devfreq_register_opp_notifier);
@@ -1292,18 +1293,7 @@ EXPORT_SYMBOL(devfreq_register_opp_notifier);
*/
int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh;
- int ret = 0;
-
- rcu_read_lock();
- nh = dev_pm_opp_get_notifier(dev);
- if (IS_ERR(nh))
- ret = PTR_ERR(nh);
- rcu_read_unlock();
- if (!ret)
- ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
-
- return ret;
+ return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
}
EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 107eb91a9415..9b7350935b73 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -17,13 +17,13 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/suspend.h>
#include <linux/devfreq-event.h>
#include "exynos-ppmu.h"
struct exynos_ppmu_data {
- void __iomem *base;
struct clk *clk;
};
@@ -33,6 +33,7 @@ struct exynos_ppmu {
unsigned int num_events;
struct device *dev;
+ struct regmap *regmap;
struct exynos_ppmu_data ppmu;
};
@@ -107,20 +108,28 @@ static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ int ret;
u32 pmnc;
/* Disable all counters */
- __raw_writel(PPMU_CCNT_MASK |
- PPMU_PMCNT0_MASK |
- PPMU_PMCNT1_MASK |
- PPMU_PMCNT2_MASK |
- PPMU_PMCNT3_MASK,
- info->ppmu.base + PPMU_CNTENC);
+ ret = regmap_write(info->regmap, PPMU_CNTENC,
+ PPMU_CCNT_MASK |
+ PPMU_PMCNT0_MASK |
+ PPMU_PMCNT1_MASK |
+ PPMU_PMCNT2_MASK |
+ PPMU_PMCNT3_MASK);
+ if (ret < 0)
+ return ret;
/* Disable PPMU */
- pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+ ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+ if (ret < 0)
+ return ret;
+
pmnc &= ~PPMU_PMNC_ENABLE_MASK;
- __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+ ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -129,29 +138,42 @@ static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
int id = exynos_ppmu_find_ppmu_id(edev);
+ int ret;
u32 pmnc, cntens;
if (id < 0)
return id;
/* Enable specific counter */
- cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
+ ret = regmap_read(info->regmap, PPMU_CNTENS, &cntens);
+ if (ret < 0)
+ return ret;
+
cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
- __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
+ ret = regmap_write(info->regmap, PPMU_CNTENS, cntens);
+ if (ret < 0)
+ return ret;
/* Set the event of Read/Write data count */
- __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
- info->ppmu.base + PPMU_BEVTxSEL(id));
+ ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id),
+ PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT);
+ if (ret < 0)
+ return ret;
/* Reset cycle counter/performance counter and enable PPMU */
- pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+ ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+ if (ret < 0)
+ return ret;
+
pmnc &= ~(PPMU_PMNC_ENABLE_MASK
| PPMU_PMNC_COUNTER_RESET_MASK
| PPMU_PMNC_CC_RESET_MASK);
pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
- __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+ ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -161,40 +183,64 @@ static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
int id = exynos_ppmu_find_ppmu_id(edev);
- u32 pmnc, cntenc;
+ unsigned int total_count, load_count;
+ unsigned int pmcnt3_high, pmcnt3_low;
+ unsigned int pmnc, cntenc;
+ int ret;
if (id < 0)
return -EINVAL;
/* Disable PPMU */
- pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+ ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+ if (ret < 0)
+ return ret;
+
pmnc &= ~PPMU_PMNC_ENABLE_MASK;
- __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+ ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+ if (ret < 0)
+ return ret;
/* Read cycle count */
- edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
+ ret = regmap_read(info->regmap, PPMU_CCNT, &total_count);
+ if (ret < 0)
+ return ret;
+ edata->total_count = total_count;
/* Read performance count */
switch (id) {
case PPMU_PMNCNT0:
case PPMU_PMNCNT1:
case PPMU_PMNCNT2:
- edata->load_count
- = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
+ ret = regmap_read(info->regmap, PPMU_PMNCT(id), &load_count);
+ if (ret < 0)
+ return ret;
+ edata->load_count = load_count;
break;
case PPMU_PMNCNT3:
- edata->load_count =
- ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
- | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
+ ret = regmap_read(info->regmap, PPMU_PMCNT3_HIGH, &pmcnt3_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(info->regmap, PPMU_PMCNT3_LOW, &pmcnt3_low);
+ if (ret < 0)
+ return ret;
+
+ edata->load_count = ((pmcnt3_high << 8) | pmcnt3_low);
break;
default:
return -EINVAL;
}
/* Disable specific counter */
- cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
+ ret = regmap_read(info->regmap, PPMU_CNTENC, &cntenc);
+ if (ret < 0)
+ return ret;
+
cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
- __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
+ ret = regmap_write(info->regmap, PPMU_CNTENC, cntenc);
+ if (ret < 0)
+ return ret;
dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
edata->load_count, edata->total_count);
@@ -214,36 +260,93 @@ static const struct devfreq_event_ops exynos_ppmu_ops = {
static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ int ret;
u32 pmnc, clear;
/* Disable all counters */
clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
+ ret = regmap_write(info->regmap, PPMU_V2_FLAG, clear);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_INTENC, clear);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CNTENC, clear);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CNT_RESET, clear);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG0, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG1, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG2, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CIG_RESULT, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CNT_AUTO, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CH_EV0_TYPE, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CH_EV1_TYPE, 0x0);
+ if (ret < 0)
+ return ret;
- __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
- __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
- __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
- __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
-
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
- __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
+ ret = regmap_write(info->regmap, PPMU_V2_CH_EV2_TYPE, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_CH_EV3_TYPE, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_SM_ID_V, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_SM_ID_A, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_V, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_A, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(info->regmap, PPMU_V2_INTERRUPT_RESET, 0x0);
+ if (ret < 0)
+ return ret;
/* Disable PPMU */
- pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+ ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+ if (ret < 0)
+ return ret;
+
pmnc &= ~PPMU_PMNC_ENABLE_MASK;
- __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+ ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -251,30 +354,43 @@ static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ unsigned int pmnc, cntens;
int id = exynos_ppmu_find_ppmu_id(edev);
- u32 pmnc, cntens;
+ int ret;
/* Enable all counters */
- cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
+ ret = regmap_read(info->regmap, PPMU_V2_CNTENS, &cntens);
+ if (ret < 0)
+ return ret;
+
cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
- __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
+ ret = regmap_write(info->regmap, PPMU_V2_CNTENS, cntens);
+ if (ret < 0)
+ return ret;
/* Set the event of Read/Write data count */
switch (id) {
case PPMU_PMNCNT0:
case PPMU_PMNCNT1:
case PPMU_PMNCNT2:
- __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
- info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+ ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+ PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT);
+ if (ret < 0)
+ return ret;
break;
case PPMU_PMNCNT3:
- __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
- info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+ ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+ PPMU_V2_EVT3_RW_DATA_CNT);
+ if (ret < 0)
+ return ret;
break;
}
/* Reset cycle counter/performance counter and enable PPMU */
- pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+ ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+ if (ret < 0)
+ return ret;
+
pmnc &= ~(PPMU_PMNC_ENABLE_MASK
| PPMU_PMNC_COUNTER_RESET_MASK
| PPMU_PMNC_CC_RESET_MASK
@@ -284,7 +400,10 @@ static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
- __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+ ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -294,37 +413,61 @@ static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
int id = exynos_ppmu_find_ppmu_id(edev);
- u32 pmnc, cntenc;
- u32 pmcnt_high, pmcnt_low;
- u64 load_count = 0;
+ int ret;
+ unsigned int pmnc, cntenc;
+ unsigned int pmcnt_high, pmcnt_low;
+ unsigned int total_count, count;
+ unsigned long load_count = 0;
/* Disable PPMU */
- pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+ ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+ if (ret < 0)
+ return ret;
+
pmnc &= ~PPMU_PMNC_ENABLE_MASK;
- __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+ ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+ if (ret < 0)
+ return ret;
/* Read cycle count and performance count */
- edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
+ ret = regmap_read(info->regmap, PPMU_V2_CCNT, &total_count);
+ if (ret < 0)
+ return ret;
+ edata->total_count = total_count;
switch (id) {
case PPMU_PMNCNT0:
case PPMU_PMNCNT1:
case PPMU_PMNCNT2:
- load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
+ ret = regmap_read(info->regmap, PPMU_V2_PMNCT(id), &count);
+ if (ret < 0)
+ return ret;
+ load_count = count;
break;
case PPMU_PMNCNT3:
- pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
- pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
- load_count = ((u64)((pmcnt_high & 0xff)) << 32)
- + (u64)pmcnt_low;
+ ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_HIGH,
+ &pmcnt_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_LOW, &pmcnt_low);
+ if (ret < 0)
+ return ret;
+
+ load_count = ((u64)((pmcnt_high & 0xff)) << 32)+ (u64)pmcnt_low;
break;
}
edata->load_count = load_count;
/* Disable all counters */
- cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
+ ret = regmap_read(info->regmap, PPMU_V2_CNTENC, &cntenc);
+ if (ret < 0)
+ return 0;
+
cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
- __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
+ ret = regmap_write(info->regmap, PPMU_V2_CNTENC, cntenc);
+ if (ret < 0)
+ return ret;
dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
edata->load_count, edata->total_count);
@@ -411,10 +554,19 @@ static int of_get_devfreq_events(struct device_node *np,
return 0;
}
-static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
+static struct regmap_config exynos_ppmu_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int exynos_ppmu_parse_dt(struct platform_device *pdev,
+ struct exynos_ppmu *info)
{
struct device *dev = info->dev;
struct device_node *np = dev->of_node;
+ struct resource *res;
+ void __iomem *base;
int ret = 0;
if (!np) {
@@ -423,10 +575,17 @@ static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
}
/* Maps the memory mapped IO to control PPMU register */
- info->ppmu.base = of_iomap(np, 0);
- if (IS_ERR_OR_NULL(info->ppmu.base)) {
- dev_err(dev, "failed to map memory region\n");
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ exynos_ppmu_regmap_config.max_register = resource_size(res) - 4;
+ info->regmap = devm_regmap_init_mmio(dev, base,
+ &exynos_ppmu_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ dev_err(dev, "failed to initialize regmap\n");
+ return PTR_ERR(info->regmap);
}
info->ppmu.clk = devm_clk_get(dev, "ppmu");
@@ -438,15 +597,10 @@ static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
ret = of_get_devfreq_events(np, info);
if (ret < 0) {
dev_err(dev, "failed to parse exynos ppmu dt node\n");
- goto err;
+ return ret;
}
return 0;
-
-err:
- iounmap(info->ppmu.base);
-
- return ret;
}
static int exynos_ppmu_probe(struct platform_device *pdev)
@@ -463,7 +617,7 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
/* Parse dt data to get resource */
- ret = exynos_ppmu_parse_dt(info);
+ ret = exynos_ppmu_parse_dt(pdev, info);
if (ret < 0) {
dev_err(&pdev->dev,
"failed to parse devicetree for resource\n");
@@ -476,8 +630,7 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
if (!info->edev) {
dev_err(&pdev->dev,
"failed to allocate memory devfreq-event devices\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
edev = info->edev;
platform_set_drvdata(pdev, info);
@@ -488,17 +641,16 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
ret = PTR_ERR(edev[i]);
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
- goto err;
+ return PTR_ERR(edev[i]);
}
+
+ pr_info("exynos-ppmu: new PPMU device registered %s (%s)\n",
+ dev_name(&pdev->dev), desc[i].name);
}
clk_prepare_enable(info->ppmu.clk);
return 0;
-err:
- iounmap(info->ppmu.base);
-
- return ret;
}
static int exynos_ppmu_remove(struct platform_device *pdev)
@@ -506,7 +658,6 @@ static int exynos_ppmu_remove(struct platform_device *pdev)
struct exynos_ppmu *info = platform_get_drvdata(pdev);
clk_disable_unprepare(info->ppmu.clk);
- iounmap(info->ppmu.base);
return 0;
}
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 9af86f46fbec..49f68929e024 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -103,18 +103,17 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
int ret = 0;
/* Get new opp-bus instance according to new bus clock */
- rcu_read_lock();
new_opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(new_opp)) {
dev_err(dev, "failed to get recommended opp instance\n");
- rcu_read_unlock();
return PTR_ERR(new_opp);
}
new_freq = dev_pm_opp_get_freq(new_opp);
new_volt = dev_pm_opp_get_voltage(new_opp);
+ dev_pm_opp_put(new_opp);
+
old_freq = bus->curr_freq;
- rcu_read_unlock();
if (old_freq == new_freq)
return 0;
@@ -147,8 +146,8 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
}
bus->curr_freq = new_freq;
- dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
- old_freq/1000, new_freq/1000);
+ dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+ old_freq, new_freq, clk_get_rate(bus->clk));
out:
mutex_unlock(&bus->lock);
@@ -214,17 +213,16 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
int ret = 0;
/* Get new opp-bus instance according to new bus clock */
- rcu_read_lock();
new_opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(new_opp)) {
dev_err(dev, "failed to get recommended opp instance\n");
- rcu_read_unlock();
return PTR_ERR(new_opp);
}
new_freq = dev_pm_opp_get_freq(new_opp);
+ dev_pm_opp_put(new_opp);
+
old_freq = bus->curr_freq;
- rcu_read_unlock();
if (old_freq == new_freq)
return 0;
@@ -241,8 +239,8 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
*freq = new_freq;
bus->curr_freq = new_freq;
- dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
- old_freq/1000, new_freq/1000);
+ dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+ old_freq, new_freq, clk_get_rate(bus->clk));
out:
mutex_unlock(&bus->lock);
@@ -358,16 +356,14 @@ static int exynos_bus_parse_of(struct device_node *np,
rate = clk_get_rate(bus->clk);
- rcu_read_lock();
opp = devfreq_recommended_opp(dev, &rate, 0);
if (IS_ERR(opp)) {
dev_err(dev, "failed to find dev_pm_opp\n");
- rcu_read_unlock();
ret = PTR_ERR(opp);
goto err_opp;
}
bus->curr_freq = dev_pm_opp_get_freq(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
return 0;
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index fad7d6321978..71576b8bdfef 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -38,4 +38,6 @@ extern void devfreq_interval_update(struct devfreq *devfreq,
extern int devfreq_add_governor(struct devfreq_governor *governor);
extern int devfreq_remove_governor(struct devfreq_governor *governor);
+extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+
#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 9ef46e2592c4..673ad8cc9a1d 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -59,14 +59,14 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
* list of parent device. Because in this case, *freq is temporary
* value which is decided by ondemand governor.
*/
- rcu_read_lock();
opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
- rcu_read_unlock();
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
goto out;
}
+ dev_pm_opp_put(opp);
+
/*
* Get the OPP table's index of decided freqeuncy by governor
* of parent device.
@@ -112,6 +112,11 @@ static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
if (ret < 0)
goto out;
+ if (devfreq->profile->freq_table
+ && (devfreq_update_status(devfreq, freq)))
+ dev_err(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
+
devfreq->previous_freq = freq;
out:
@@ -179,6 +184,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
static struct devfreq_governor devfreq_passive = {
.name = "passive",
+ .immutable = 1,
.get_target_freq = devfreq_passive_get_target_freq,
.event_handler = devfreq_passive_event_handler,
};
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 35de6e83c1fe..176976068bcd 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/devfreq/governor_simpleondemand.c
+ * linux/drivers/devfreq/governor_userspace.c
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -50,7 +50,6 @@ static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
unsigned long wanted;
int err = 0;
-
mutex_lock(&devfreq->lock);
data = devfreq->data;
@@ -112,7 +111,13 @@ out:
static void userspace_exit(struct devfreq *devfreq)
{
- sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+ /*
+ * Remove the sysfs entry, unless this is being called after
+ * device_del(), which should have done this already via kobject_del().
+ */
+ if (devfreq->dev.kobj.sd)
+ sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+
kfree(devfreq->data);
devfreq->data = NULL;
}
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 27d2f349b53c..40a2499730fc 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -91,17 +91,13 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
unsigned long target_volt, target_rate;
int err;
- rcu_read_lock();
opp = devfreq_recommended_opp(dev, freq, flags);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
+ if (IS_ERR(opp))
return PTR_ERR(opp);
- }
target_rate = dev_pm_opp_get_freq(opp);
target_volt = dev_pm_opp_get_voltage(opp);
-
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
if (dmcfreq->rate == target_rate)
return 0;
@@ -422,15 +418,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->rate = clk_get_rate(data->dmc_clk);
- rcu_read_lock();
opp = devfreq_recommended_opp(dev, &data->rate, 0);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
+ if (IS_ERR(opp))
return PTR_ERR(opp);
- }
+
data->rate = dev_pm_opp_get_freq(opp);
data->volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
rk3399_devfreq_dmc_profile.initial_freq = data->rate;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index fe9dce0245bf..214fff96fa4a 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -487,15 +487,13 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
struct dev_pm_opp *opp;
unsigned long rate = *freq * KHZ;
- rcu_read_lock();
opp = devfreq_recommended_opp(dev, &rate, flags);
if (IS_ERR(opp)) {
- rcu_read_unlock();
dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
return PTR_ERR(opp);
}
rate = dev_pm_opp_get_freq(opp);
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
clk_set_min_rate(tegra->emc_clock, rate);
clk_set_rate(tegra->emc_clock, 0);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 263495d0adbd..d01d59812cf3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -157,7 +157,7 @@ config DMA_SUN4I
config DMA_SUN6I
tristate "Allwinner A31 SoCs DMA support"
- depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+ depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
depends on RESET_CONTROLLER
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -458,7 +458,7 @@ config STM32_DMA
help
Enable support for the on-chip DMA controller on STMicroelectronics
STM32 MCUs.
- If you have a board based on such a MCU and wish to use DMA say Y or M
+ If you have a board based on such a MCU and wish to use DMA say Y
here.
config S3C24XX_DMAC
@@ -571,12 +571,12 @@ config XILINX_ZYNQMP_DMA
Enable support for Xilinx ZynqMP DMA controller.
config ZX_DMA
- tristate "ZTE ZX296702 DMA support"
+ tristate "ZTE ZX DMA support"
depends on ARCH_ZX || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support the DMA engine for ZTE ZX296702 platform devices.
+ Support the DMA engine for ZTE ZX family platform devices.
# driver files
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a4fa3360e609..0b723e94d9e6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -66,7 +66,7 @@ obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
-obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-y += qcom/
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d5ba43a87a68..200828c60db9 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -153,6 +153,8 @@ struct cppi41_dd {
/* context for suspend/resume */
unsigned int dma_tdfdq;
+
+ bool is_suspended;
};
#define FIST_COMPLETION_QUEUE 93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
BUG_ON(desc_num >= ALLOC_DECS_NUM);
c = cdd->chan_busy[desc_num];
cdd->chan_busy[desc_num] = NULL;
+
+ /* Usecount for chan_busy[], paired with push_desc_queue() */
+ pm_runtime_put(cdd->ddev.dev);
+
return c;
}
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
while (val) {
u32 desc, len;
- int error;
- error = pm_runtime_get(cdd->ddev.dev);
- if (error < 0)
- dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
- __func__, error);
+ /*
+ * This should never trigger, see the comments in
+ * push_desc_queue()
+ */
+ WARN_ON(cdd->is_suspended);
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
- pm_runtime_mark_last_busy(cdd->ddev.dev);
- pm_runtime_put_autosuspend(cdd->ddev.dev);
}
}
return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
*/
__iowmb();
+ /*
+ * DMA transfers can take at least 200ms to complete with USB mass
+ * storage connected. To prevent autosuspend timeouts, we must use
+ * pm_runtime_get/put() when chan_busy[] is modified. This will get
+ * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+ * outcome of the transfer.
+ */
+ pm_runtime_get(cdd->ddev.dev);
+
desc_phys = lower_32_bits(c->desc_phys);
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
}
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
{
- struct cppi41_dd *cdd = c->cdd;
- unsigned long flags;
+ struct cppi41_channel *c, *_c;
- spin_lock_irqsave(&cdd->lock, flags);
- list_add_tail(&c->node, &cdd->pending);
- spin_unlock_irqrestore(&cdd->lock, flags);
+ list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+ push_desc_queue(c);
+ list_del(&c->node);
+ }
}
static void cppi41_dma_issue_pending(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
struct cppi41_dd *cdd = c->cdd;
+ unsigned long flags;
int error;
error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
return;
}
- if (likely(pm_runtime_active(cdd->ddev.dev)))
- push_desc_queue(c);
- else
- pending_desc(c);
+ spin_lock_irqsave(&cdd->lock, flags);
+ list_add_tail(&c->node, &cdd->pending);
+ if (!cdd->is_suspended)
+ cppi41_run_queue(cdd);
+ spin_unlock_irqrestore(&cdd->lock, flags);
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
WARN_ON(!cdd->chan_busy[desc_num]);
cdd->chan_busy[desc_num] = NULL;
+ /* Usecount for chan_busy[], paired with push_desc_queue() */
+ pm_runtime_put(cdd->ddev.dev);
+
return 0;
}
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&cdd->lock, flags);
+ cdd->is_suspended = true;
WARN_ON(!list_empty(&cdd->pending));
+ spin_unlock_irqrestore(&cdd->lock, flags);
return 0;
}
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
static int __maybe_unused cppi41_runtime_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
- struct cppi41_channel *c, *_c;
unsigned long flags;
spin_lock_irqsave(&cdd->lock, flags);
- list_for_each_entry_safe(c, _c, &cdd->pending, node) {
- push_desc_queue(c);
- list_del(&c->node);
- }
+ cdd->is_suspended = false;
+ cppi41_run_queue(cdd);
spin_unlock_irqrestore(&cdd->lock, flags);
return 0;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6b535262ac5d..24e0221fd66d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -65,7 +65,7 @@
#include <linux/mempool.h>
static DEFINE_MUTEX(dma_list_mutex);
-static DEFINE_IDR(dma_idr);
+static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
@@ -162,7 +162,7 @@ static void chan_dev_release(struct device *dev)
chan_dev = container_of(dev, typeof(*chan_dev), device);
if (atomic_dec_and_test(chan_dev->idr_ref)) {
mutex_lock(&dma_list_mutex);
- idr_remove(&dma_idr, chan_dev->dev_id);
+ ida_remove(&dma_ida, chan_dev->dev_id);
mutex_unlock(&dma_list_mutex);
kfree(chan_dev->idr_ref);
}
@@ -898,14 +898,15 @@ static int get_dma_id(struct dma_device *device)
{
int rc;
- mutex_lock(&dma_list_mutex);
-
- rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
- if (rc >= 0)
- device->dev_id = rc;
+ do {
+ if (!ida_pre_get(&dma_ida, GFP_KERNEL))
+ return -ENOMEM;
+ mutex_lock(&dma_list_mutex);
+ rc = ida_get_new(&dma_ida, &device->dev_id);
+ mutex_unlock(&dma_list_mutex);
+ } while (rc == -EAGAIN);
- mutex_unlock(&dma_list_mutex);
- return rc < 0 ? rc : 0;
+ return rc;
}
/**
@@ -1035,7 +1036,7 @@ err_out:
/* if we never registered a channel just release the idr */
if (atomic_read(idr_ref) == 0) {
mutex_lock(&dma_list_mutex);
- idr_remove(&dma_idr, device->dev_id);
+ ida_remove(&dma_ida, device->dev_id);
mutex_unlock(&dma_list_mutex);
kfree(idr_ref);
return rc;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e5adf5d1c34f..e500950dad82 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -138,16 +138,32 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
dwc->descs_allocated--;
}
-static void dwc_initialize(struct dw_dma_chan *dwc)
+static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
+{
+ u32 cfghi = 0;
+ u32 cfglo = 0;
+
+ /* Set default burst alignment */
+ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+ /* Low 4 bits of the request lines */
+ cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
+ cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
+
+ /* Request line extension (2 bits) */
+ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
+ cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
bool hs_polarity = dwc->dws.hs_polarity;
- if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
- return;
-
cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
@@ -156,6 +172,19 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
+ return;
+
+ if (dw->pdata->is_idma32)
+ dwc_initialize_chan_idma32(dwc);
+ else
+ dwc_initialize_chan_dw(dwc);
/* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask);
@@ -184,6 +213,37 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
cpu_relax();
}
+static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
+ unsigned int width, size_t *len)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ u32 block;
+
+ /* Always in bytes for iDMA 32-bit */
+ if (dw->pdata->is_idma32)
+ width = 0;
+
+ if ((bytes >> width) > dwc->block_size) {
+ block = dwc->block_size;
+ *len = block << width;
+ } else {
+ block = bytes >> width;
+ *len = bytes;
+ }
+
+ return block;
+}
+
+static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ if (dw->pdata->is_idma32)
+ return IDMA32C_CTLH_BLOCK_TS(block);
+
+ return DWC_CTLH_BLOCK_TS(block) << width;
+}
+
/*----------------------------------------------------------------------*/
/* Perform single block transfer */
@@ -332,7 +392,7 @@ static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
u32 ctlhi = channel_readl(dwc, CTL_HI);
u32 ctllo = channel_readl(dwc, CTL_LO);
- return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+ return block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -692,10 +752,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
| DWC_CTLL_FC_M2M;
prev = first = NULL;
- for (offset = 0; offset < len; offset += xfer_count << src_width) {
- xfer_count = min_t(size_t, (len - offset) >> src_width,
- dwc->block_size);
-
+ for (offset = 0; offset < len; offset += xfer_count) {
desc = dwc_desc_get(dwc);
if (!desc)
goto err_desc_get;
@@ -703,8 +760,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
lli_write(desc, sar, src + offset);
lli_write(desc, dar, dest + offset);
lli_write(desc, ctllo, ctllo);
- lli_write(desc, ctlhi, xfer_count);
- desc->len = xfer_count << src_width;
+ lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count));
+ desc->len = xfer_count;
if (!first) {
first = desc;
@@ -775,7 +832,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len, dlen, mem;
+ u32 len, mem;
+ size_t dlen;
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
@@ -789,17 +847,8 @@ slave_sg_todev_fill_desc:
lli_write(desc, sar, mem);
lli_write(desc, dar, reg);
+ lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen));
lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
- if ((len >> mem_width) > dwc->block_size) {
- dlen = dwc->block_size << mem_width;
- mem += dlen;
- len -= dlen;
- } else {
- dlen = len;
- len = 0;
- }
-
- lli_write(desc, ctlhi, dlen >> mem_width);
desc->len = dlen;
if (!first) {
@@ -809,6 +858,9 @@ slave_sg_todev_fill_desc:
list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
+
+ mem += dlen;
+ len -= dlen;
total_len += dlen;
if (len)
@@ -828,13 +880,12 @@ slave_sg_todev_fill_desc:
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len, dlen, mem;
+ u32 len, mem;
+ size_t dlen;
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = __ffs(data_width | mem | len);
-
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc)
@@ -842,16 +893,9 @@ slave_sg_fromdev_fill_desc:
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
+ lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen));
+ mem_width = __ffs(data_width | mem | dlen);
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
- if ((len >> reg_width) > dwc->block_size) {
- dlen = dwc->block_size << reg_width;
- mem += dlen;
- len -= dlen;
- } else {
- dlen = len;
- len = 0;
- }
- lli_write(desc, ctlhi, dlen >> reg_width);
desc->len = dlen;
if (!first) {
@@ -861,6 +905,9 @@ slave_sg_fromdev_fill_desc:
list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
+
+ mem += dlen;
+ len -= dlen;
total_len += dlen;
if (len)
@@ -903,25 +950,20 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(dw_dma_filter);
-/*
- * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
- *
- * NOTE: burst size 2 is not supported by controller.
- *
- * This can be done by finding least significant bit set: n & (n - 1)
- */
-static inline void convert_burst(u32 *maxburst)
-{
- if (*maxburst > 1)
- *maxburst = fls(*maxburst) - 2;
- else
- *maxburst = 0;
-}
-
static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dma_slave_config *sc = &dwc->dma_sconfig;
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ /*
+ * Fix sconfig's burst size according to dw_dmac. We need to convert
+ * them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * NOTE: burst size 2 is not supported by DesignWare controller.
+ * iDMA 32-bit supports it.
+ */
+ u32 s = dw->pdata->is_idma32 ? 1 : 2;
/* Check if chan will be configured for slave transfers */
if (!is_slave_direction(sconfig->direction))
@@ -930,28 +972,39 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
dwc->direction = sconfig->direction;
- convert_burst(&dwc->dma_sconfig.src_maxburst);
- convert_burst(&dwc->dma_sconfig.dst_maxburst);
+ sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
+ sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
return 0;
}
-static int dwc_pause(struct dma_chan *chan)
+static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- unsigned long flags;
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
unsigned int count = 20; /* timeout iterations */
u32 cfglo;
- spin_lock_irqsave(&dwc->lock, flags);
-
cfglo = channel_readl(dwc, CFG_LO);
+ if (dw->pdata->is_idma32) {
+ if (drain)
+ cfglo |= IDMA32C_CFGL_CH_DRAIN;
+ else
+ cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
+ }
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
udelay(2);
set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
+}
+static int dwc_pause(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc_chan_pause(dwc, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
@@ -993,6 +1046,8 @@ static int dwc_terminate_all(struct dma_chan *chan)
clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+ dwc_chan_pause(dwc, true);
+
dwc_chan_disable(dw, dwc);
dwc_chan_resume(dwc);
@@ -1085,6 +1140,32 @@ static void dwc_issue_pending(struct dma_chan *chan)
/*----------------------------------------------------------------------*/
+/*
+ * Program FIFO size of channels.
+ *
+ * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
+ * slice FIFO on equal parts between channels.
+ */
+static void idma32_fifo_partition(struct dw_dma *dw)
+{
+ u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
+ IDMA32C_FP_UPDATE;
+ u64 fifo_partition = 0;
+
+ if (!dw->pdata->is_idma32)
+ return;
+
+ /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
+ fifo_partition |= value << 0;
+
+ /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
+ fifo_partition |= value << 32;
+
+ /* Program FIFO Partition registers - 128 bytes for each channel */
+ idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
+ idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
+}
+
static void dw_dma_off(struct dw_dma *dw)
{
unsigned int i;
@@ -1504,8 +1585,16 @@ int dw_dma_probe(struct dw_dma_chip *chip)
/* Force dma off, just in case */
dw_dma_off(dw);
+ idma32_fifo_partition(dw);
+
+ /* Device and instance ID for IRQ and DMA pool */
+ if (pdata->is_idma32)
+ snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
+ else
+ snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
+
/* Create a pool of consistent memory blocks for hardware descriptors */
- dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+ dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
dev_err(chip->dev, "No memory for descriptors dma pool\n");
@@ -1516,7 +1605,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
- "dw_dmac", dw);
+ dw->name, dw);
if (err)
goto err_pdata;
@@ -1665,6 +1754,8 @@ int dw_dma_enable(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
+ idma32_fifo_partition(dw);
+
dw_dma_on(dw);
return 0;
}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 0ae6c3b1d34e..7778ed705a1a 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -15,6 +15,18 @@
#include "internal.h"
+static struct dw_dma_platform_data mrfld_pdata = {
+ .nr_channels = 8,
+ .is_private = true,
+ .is_memcpy = true,
+ .is_idma32 = true,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 131071,
+ .nr_masters = 1,
+ .data_width = {4},
+};
+
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
@@ -47,6 +59,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
return -ENOMEM;
chip->dev = &pdev->dev;
+ chip->id = pdev->devfn;
chip->regs = pcim_iomap_table(pdev)[0];
chip->irq = pdev->irq;
chip->pdata = pdata;
@@ -95,14 +108,16 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
};
static const struct pci_device_id dw_pci_id_table[] = {
- /* Medfield */
+ /* Medfield (GPDMA) */
{ PCI_VDEVICE(INTEL, 0x0827) },
- { PCI_VDEVICE(INTEL, 0x0830) },
/* BayTrail */
{ PCI_VDEVICE(INTEL, 0x0f06) },
{ PCI_VDEVICE(INTEL, 0x0f40) },
+ /* Merrifield iDMA 32-bit (GPDMA) */
+ { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata },
+
/* Braswell */
{ PCI_VDEVICE(INTEL, 0x2286) },
{ PCI_VDEVICE(INTEL, 0x22c0) },
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index b1655e40cfa2..c639c60b825a 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -202,6 +202,7 @@ static int dw_probe(struct platform_device *pdev)
pdata = dw_dma_parse_dt(pdev);
chip->dev = dev;
+ chip->id = pdev->id;
chip->pdata = pdata;
chip->clk = devm_clk_get(chip->dev, "hclk");
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 4e0128c62704..32a328721c88 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -3,15 +3,19 @@
*
* Copyright (C) 2005-2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2016 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+
#include "internal.h"
#define DW_DMA_MAX_NR_REQUESTS 16
@@ -85,9 +89,9 @@ struct dw_dma_regs {
DW_REG(ID);
DW_REG(TEST);
- /* reserved */
- DW_REG(__reserved0);
- DW_REG(__reserved1);
+ /* iDMA 32-bit support */
+ DW_REG(CLASS_PRIORITY0);
+ DW_REG(CLASS_PRIORITY1);
/* optional encoded params, 0x3c8..0x3f7 */
u32 __reserved;
@@ -99,6 +103,17 @@ struct dw_dma_regs {
/* top-level parameters */
u32 DW_PARAMS;
+
+ /* component ID */
+ u32 COMP_TYPE;
+ u32 COMP_VERSION;
+
+ /* iDMA 32-bit support */
+ DW_REG(FIFO_PARTITION0);
+ DW_REG(FIFO_PARTITION1);
+
+ DW_REG(SAI_ERR);
+ DW_REG(GLOBAL_CFG);
};
/*
@@ -170,8 +185,9 @@ enum dw_dma_msize {
#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
/* Bitfields in CTL_HI */
-#define DWC_CTLH_DONE 0x00001000
-#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
+#define DWC_CTLH_BLOCK_TS_MASK GENMASK(11, 0)
+#define DWC_CTLH_BLOCK_TS(x) ((x) & DWC_CTLH_BLOCK_TS_MASK)
+#define DWC_CTLH_DONE (1 << 12)
/* Bitfields in CFG_LO */
#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
@@ -214,6 +230,33 @@ enum dw_dma_msize {
/* Bitfields in CFG */
#define DW_CFG_DMA_EN (1 << 0)
+/* iDMA 32-bit support */
+
+/* Bitfields in CTL_HI */
+#define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0)
+#define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK)
+#define IDMA32C_CTLH_DONE (1 << 17)
+
+/* Bitfields in CFG_LO */
+#define IDMA32C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */
+#define IDMA32C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */
+#define IDMA32C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */
+#define IDMA32C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */
+#define IDMA32C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */
+
+/* Bitfields in CFG_HI */
+#define IDMA32C_CFGH_SRC_PER(x) ((x) << 0)
+#define IDMA32C_CFGH_DST_PER(x) ((x) << 4)
+#define IDMA32C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
+#define IDMA32C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
+#define IDMA32C_CFGH_SRC_PER_EXT(x) ((x) << 28) /* src peripheral extension */
+#define IDMA32C_CFGH_DST_PER_EXT(x) ((x) << 30) /* dst peripheral extension */
+
+/* Bitfields in FIFO_PARTITION */
+#define IDMA32C_FP_PSIZE_CH0(x) ((x) << 0)
+#define IDMA32C_FP_PSIZE_CH1(x) ((x) << 13)
+#define IDMA32C_FP_UPDATE (1 << 26)
+
enum dw_dmac_flags {
DW_DMA_IS_CYCLIC = 0,
DW_DMA_IS_SOFT_LLP = 1,
@@ -270,6 +313,7 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
struct dw_dma {
struct dma_device dma;
+ char name[20];
void __iomem *regs;
struct dma_pool *desc_pool;
struct tasklet_struct tasklet;
@@ -293,6 +337,11 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
#define dma_writel(dw, name, val) \
dma_writel_native((val), &(__dw_regs(dw)->name))
+#define idma32_readq(dw, name) \
+ hi_lo_readq(&(__dw_regs(dw)->name))
+#define idma32_writeq(dw, name, val) \
+ hi_lo_writeq((val), &(__dw_regs(dw)->name))
+
#define channel_set_bit(dw, reg, mask) \
dma_writel(dw, reg, ((mask) << 8) | (mask))
#define channel_clear_bit(dw, reg, mask) \
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index dd184b50e5b4..284627806b88 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -272,7 +272,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
u32 status;
int i, line;
- for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
+ for (i = 0; i < IPU_IRQ_NR_BANKS; i++) {
struct ipu_irq_bank *bank = irq_bank + i;
raw_spin_lock(&bank_lock);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 740bbb942594..f37f4978dabb 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
{
struct pl330_thread *thrd = NULL;
- unsigned long flags;
int chans, i;
if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
chans = pl330->pcfg.num_chan;
- spin_lock_irqsave(&pl330->lock, flags);
-
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
thrd = NULL;
}
- spin_unlock_irqrestore(&pl330->lock, flags);
-
return thrd;
}
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
static void pl330_release_channel(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330;
- unsigned long flags;
if (!thrd || thrd->free)
return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
pl330 = thrd->dmac;
- spin_lock_irqsave(&pl330->lock, flags);
_free_event(thrd, thrd->ev);
thrd->free = true;
- spin_unlock_irqrestore(&pl330->lock, flags);
}
/* Initialize the structure for PL330 configuration, that can be used
@@ -1867,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
* Alloc MicroCode buffer for 'chans' Channel threads.
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
*/
- pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
+ pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz,
- &pl330->mcode_bus, GFP_KERNEL);
+ &pl330->mcode_bus, GFP_KERNEL,
+ DMA_ATTR_PRIVILEGED);
if (!pl330->mcode_cpu) {
dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
__func__, __LINE__);
@@ -2122,20 +2115,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
- spin_lock_irqsave(&pch->lock, flags);
+ spin_lock_irqsave(&pl330->lock, flags);
dma_cookie_init(chan);
pch->cyclic = false;
pch->thread = pl330_request_channel(pl330);
if (!pch->thread) {
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
return -ENOMEM;
}
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
return 1;
}
@@ -2238,12 +2231,13 @@ static int pl330_pause(struct dma_chan *chan)
static void pl330_free_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
+ struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
tasklet_kill(&pch->task);
pm_runtime_get_sync(pch->dmac->ddma.dev);
- spin_lock_irqsave(&pch->lock, flags);
+ spin_lock_irqsave(&pl330->lock, flags);
pl330_release_channel(pch->thread);
pch->thread = NULL;
@@ -2251,7 +2245,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 4c357d475465..48b22d5c8602 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1724,6 +1724,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
+ dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
if (ret < 0)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 8684d11b29bb..a6620b671d1d 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2809,12 +2809,14 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
{
- if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
+ if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
dev->device_prep_slave_sg = d40_prep_slave_sg;
+ dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ }
if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
dev->device_prep_dma_memcpy = d40_prep_memcpy;
-
+ dev->directions = BIT(DMA_MEM_TO_MEM);
/*
* This controller can only access address at even
* 32bit boundaries, i.e. 2^2
@@ -2836,6 +2838,7 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
dev->device_pause = d40_pause;
dev->device_resume = d40_resume;
dev->device_terminate_all = d40_terminate_all;
+ dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dev->dev = base->dev;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 3056ce7f8c69..49f86cabcfec 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -114,6 +114,7 @@
#define STM32_DMA_MAX_CHANNELS 0x08
#define STM32_DMA_MAX_REQUEST_ID 0x08
#define STM32_DMA_MAX_DATA_PARAM 0x03
+#define STM32_DMA_MAX_BURST 16
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -403,6 +404,13 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
return 0;
}
+static void stm32_dma_synchronize(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+
+ vchan_synchronize(&chan->vchan);
+}
+
static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
@@ -421,7 +429,7 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
}
-static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct virt_dma_desc *vdesc;
@@ -432,12 +440,12 @@ static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
ret = stm32_dma_disable_chan(chan);
if (ret < 0)
- return ret;
+ return;
if (!chan->desc) {
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
- return -EPERM;
+ return;
chan->desc = to_stm32_dma_desc(vdesc);
chan->next_sg = 0;
@@ -471,7 +479,7 @@ static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
chan->busy = true;
- return 0;
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -500,8 +508,6 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
}
-
- chan->next_sg++;
}
}
@@ -510,6 +516,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
if (chan->desc) {
if (chan->desc->cyclic) {
vchan_cyclic_callback(&chan->desc->vdesc);
+ chan->next_sg++;
stm32_dma_configure_next_sg(chan);
} else {
chan->busy = false;
@@ -552,15 +559,13 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
{
struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
unsigned long flags;
- int ret;
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (!chan->busy) {
- if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
- ret = stm32_dma_start_transfer(chan);
- if ((!ret) && (chan->desc->cyclic))
- stm32_dma_configure_next_sg(chan);
- }
+ if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+ stm32_dma_start_transfer(chan);
+ if (chan->desc->cyclic)
+ stm32_dma_configure_next_sg(chan);
}
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
@@ -848,26 +853,40 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
+static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
+{
+ u32 dma_scr, width, ndtr;
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
+ ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+ return ndtr << width;
+}
+
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
struct stm32_dma_desc *desc,
u32 next_sg)
{
- struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- u32 dma_scr, width, residue, count;
+ u32 residue = 0;
int i;
- residue = 0;
+ /*
+ * In cyclic mode, for the last period, residue = remaining bytes from
+ * NDTR
+ */
+ if (chan->desc->cyclic && next_sg == 0)
+ return stm32_dma_get_remaining_bytes(chan);
+ /*
+ * For all other periods in cyclic mode, and in sg mode,
+ * residue = remaining bytes from NDTR + remaining periods/sg to be
+ * transferred
+ */
for (i = next_sg; i < desc->num_sgs; i++)
residue += desc->sg_req[i].len;
-
- if (next_sg != 0) {
- dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
- width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
- count = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
-
- residue += count << width;
- }
+ residue += stm32_dma_get_remaining_bytes(chan);
return residue;
}
@@ -964,27 +983,36 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct stm32_dma_device *dmadev = ofdma->of_dma_data;
+ struct device *dev = dmadev->ddev.dev;
struct stm32_dma_cfg cfg;
struct stm32_dma_chan *chan;
struct dma_chan *c;
- if (dma_spec->args_count < 4)
+ if (dma_spec->args_count < 4) {
+ dev_err(dev, "Bad number of cells\n");
return NULL;
+ }
cfg.channel_id = dma_spec->args[0];
cfg.request_line = dma_spec->args[1];
cfg.stream_config = dma_spec->args[2];
cfg.threshold = dma_spec->args[3];
- if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
- STM32_DMA_MAX_REQUEST_ID))
+ if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) ||
+ (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) {
+ dev_err(dev, "Bad channel and/or request id\n");
return NULL;
+ }
chan = &dmadev->chan[cfg.channel_id];
c = dma_get_slave_channel(&chan->vchan.chan);
- if (c)
- stm32_dma_set_config(chan, &cfg);
+ if (!c) {
+ dev_err(dev, "No more channel avalaible\n");
+ return NULL;
+ }
+
+ stm32_dma_set_config(chan, &cfg);
return c;
}
@@ -1048,6 +1076,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
dd->device_config = stm32_dma_slave_config;
dd->device_terminate_all = stm32_dma_terminate_all;
+ dd->device_synchronize = stm32_dma_synchronize;
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -1056,6 +1085,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dd->max_burst = STM32_DMA_MAX_BURST;
dd->dev = &pdev->dev;
INIT_LIST_HEAD(&dd->channels);
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx_dma.c
index 380276d078b2..2bb695315300 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx_dma.c
@@ -26,7 +26,7 @@
#define DRIVER_NAME "zx-dma"
#define DMA_ALIGN 4
-#define DMA_MAX_SIZE (0x10000 - PAGE_SIZE)
+#define DMA_MAX_SIZE (0x10000 - 512)
#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
#define REG_ZX_SRC_ADDR 0x00
@@ -365,7 +365,8 @@ static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
bytes = 0;
clli = zx_dma_get_curr_lli(p);
- index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
+ index = (clli - ds->desc_hw_lli) /
+ sizeof(struct zx_desc_hw) + 1;
for (; index < ds->desc_num; index++) {
bytes += ds->desc_hw[index].src_x;
/* end of lli */
@@ -812,6 +813,7 @@ static int zx_dma_probe(struct platform_device *op)
INIT_LIST_HEAD(&d->slave.channels);
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
d->slave.dev = &op->dev;
d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 260251177830..82dab1692264 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3065,6 +3065,8 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
/* Check whether at least one UMC is enabled: */
if (umc_en_mask)
ecc_en = umc_en_mask == ecc_en_mask;
+ else
+ edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
/* Assume UMC MCA banks are enabled. */
nb_mce_en = true;
@@ -3075,14 +3077,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
nb_mce_en = nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en)
- amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
+ edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
}
- amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
+ amd64_info("Node %d: DRAM ECC %s.\n",
+ nid, (ecc_en ? "enabled" : "disabled"));
if (!ecc_en || !nb_mce_en) {
- amd64_notice("%s", ecc_msg);
+ amd64_info("%s", ecc_msg);
return false;
}
return true;
@@ -3300,15 +3303,6 @@ static int init_one_instance(unsigned int nid)
goto err_add_mc;
}
- /* register stuff with EDAC MCE */
- if (report_gart_errors)
- amd_report_gart_errors(true);
-
- if (pvt->umc)
- amd_register_ecc_decoder(decode_umc_error);
- else
- amd_register_ecc_decoder(decode_bus_error);
-
return 0;
err_add_mc:
@@ -3342,7 +3336,7 @@ static int probe_one_instance(unsigned int nid)
ecc_stngs[nid] = s;
if (!ecc_enabled(F3, nid)) {
- ret = -ENODEV;
+ ret = 0;
if (!ecc_enable_override)
goto err_enable;
@@ -3363,6 +3357,8 @@ static int probe_one_instance(unsigned int nid)
if (boot_cpu_data.x86 < 0x17)
restore_ecc_error_reporting(s, nid, F3);
+
+ goto err_enable;
}
return ret;
@@ -3396,14 +3392,6 @@ static void remove_one_instance(unsigned int nid)
free_mc_sibling_devs(pvt);
- /* unregister from EDAC MCE */
- amd_report_gart_errors(false);
-
- if (pvt->umc)
- amd_unregister_ecc_decoder(decode_umc_error);
- else
- amd_unregister_ecc_decoder(decode_bus_error);
-
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
@@ -3452,8 +3440,11 @@ static int __init amd64_edac_init(void)
int err = -ENODEV;
int i;
+ if (!x86_match_cpu(amd64_cpuids))
+ return -ENODEV;
+
if (amd_cache_northbridges() < 0)
- goto err_ret;
+ return -ENODEV;
opstate_init();
@@ -3466,14 +3457,30 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
- for (i = 0; i < amd_nb_num(); i++)
- if (probe_one_instance(i)) {
+ for (i = 0; i < amd_nb_num(); i++) {
+ err = probe_one_instance(i);
+ if (err) {
/* unwind properly */
while (--i >= 0)
remove_one_instance(i);
goto err_pci;
}
+ }
+
+ if (!edac_has_mcs()) {
+ err = -ENODEV;
+ goto err_pci;
+ }
+
+ /* register stuff with EDAC MCE */
+ if (report_gart_errors)
+ amd_report_gart_errors(true);
+
+ if (boot_cpu_data.x86 >= 0x17)
+ amd_register_ecc_decoder(decode_umc_error);
+ else
+ amd_register_ecc_decoder(decode_bus_error);
setup_pci_device();
@@ -3493,7 +3500,6 @@ err_free:
kfree(ecc_stngs);
ecc_stngs = NULL;
-err_ret:
return err;
}
@@ -3504,6 +3510,14 @@ static void __exit amd64_edac_exit(void)
if (pci_ctl)
edac_pci_release_generic_ctl(pci_ctl);
+ /* unregister from EDAC MCE */
+ amd_report_gart_errors(false);
+
+ if (boot_cpu_data.x86 >= 0x17)
+ amd_unregister_ecc_decoder(decode_umc_error);
+ else
+ amd_unregister_ecc_decoder(decode_bus_error);
+
for (i = 0; i < amd_nb_num(); i++)
remove_one_instance(i);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 496603d8f3d2..1d4b74e9a037 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -16,19 +16,14 @@
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/edac.h>
+#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#include "edac_module.h"
#include "mce_amd.h"
-#define amd64_debug(fmt, arg...) \
- edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
-
#define amd64_info(fmt, arg...) \
edac_printk(KERN_INFO, "amd64", fmt, ##arg)
-#define amd64_notice(fmt, arg...) \
- edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
-
#define amd64_warn(fmt, arg...) \
edac_printk(KERN_WARNING, "amd64", "Warning: " fmt, ##arg)
@@ -90,7 +85,7 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION "3.4.0"
+#define EDAC_AMD64_VERSION "3.5.0"
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 750891ea07de..e5573c56b15e 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -453,6 +453,20 @@ void edac_mc_free(struct mem_ctl_info *mci)
}
EXPORT_SYMBOL_GPL(edac_mc_free);
+bool edac_has_mcs(void)
+{
+ bool ret;
+
+ mutex_lock(&mem_ctls_mutex);
+
+ ret = list_empty(&mc_devices);
+
+ mutex_unlock(&mem_ctls_mutex);
+
+ return !ret;
+}
+EXPORT_SYMBOL_GPL(edac_has_mcs);
+
/* Caller must hold mem_ctls_mutex */
static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
{
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 50fc1dc9c0d8..5357800e418d 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -149,6 +149,15 @@ extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
extern void edac_mc_free(struct mem_ctl_info *mci);
/**
+ * edac_has_mcs() - Check if any MCs have been allocated.
+ *
+ * Returns:
+ * True if MC instances have been registered successfully.
+ * False otherwise.
+ */
+extern bool edac_has_mcs(void);
+
+/**
* edac_mc_find() - Search for a mem_ctl_info structure whose index is @idx.
*
* @idx: index to be seek
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 39dbab7d62f1..445862dac273 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -569,6 +569,40 @@ static ssize_t dimmdev_edac_mode_show(struct device *dev,
return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
}
+static ssize_t dimmdev_ce_count_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+ u32 count;
+ int off;
+
+ off = EDAC_DIMM_OFF(dimm->mci->layers,
+ dimm->mci->n_layers,
+ dimm->location[0],
+ dimm->location[1],
+ dimm->location[2]);
+ count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][off];
+ return sprintf(data, "%u\n", count);
+}
+
+static ssize_t dimmdev_ue_count_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+ u32 count;
+ int off;
+
+ off = EDAC_DIMM_OFF(dimm->mci->layers,
+ dimm->mci->n_layers,
+ dimm->location[0],
+ dimm->location[1],
+ dimm->location[2]);
+ count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][off];
+ return sprintf(data, "%u\n", count);
+}
+
/* dimm/rank attribute files */
static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
dimmdev_label_show, dimmdev_label_store);
@@ -577,6 +611,8 @@ static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
+static DEVICE_ATTR(dimm_ce_count, S_IRUGO, dimmdev_ce_count_show, NULL);
+static DEVICE_ATTR(dimm_ue_count, S_IRUGO, dimmdev_ue_count_show, NULL);
/* attributes of the dimm<id>/rank<id> object */
static struct attribute *dimm_attrs[] = {
@@ -586,6 +622,8 @@ static struct attribute *dimm_attrs[] = {
&dev_attr_dimm_mem_type.attr,
&dev_attr_dimm_dev_type.attr,
&dev_attr_dimm_edac_mode.attr,
+ &dev_attr_dimm_ce_count.attr,
+ &dev_attr_dimm_ue_count.attr,
NULL,
};
@@ -831,7 +869,7 @@ static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
/* memory scrubber attribute file */
-DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
+static DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
mci_sdram_scrub_rate_store); /* umode set later in is_visible */
static struct attribute *mci_attrs[] = {
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index 4e9608a958e7..efc8276d1d9c 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -145,12 +145,12 @@ static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
return 0;
}
-DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
- fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
-DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
- fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
-DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
- fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
+static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
+ fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
+static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
+ fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
+static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
+ fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
static struct attribute *fsl_ddr_dev_attrs[] = {
&dev_attr_inject_data_hi.attr,
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 0a912bf6de00..e391f5a716be 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -304,7 +304,6 @@ static const char *ferr_global_lo_name[] = {
#define REDMEMA 0xdc
#define REDMEMB 0x7c
- #define IS_SECOND_CH(v) ((v) * (1 << 17))
#define RECMEMA 0xe0
#define RECMEMA_BANK(v) (((v) >> 12) & 7)
@@ -483,8 +482,9 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
REDMEMB, &value);
channel = (branch << 1);
- if (IS_SECOND_CH(value))
- channel++;
+
+ /* Second channel ? */
+ channel += !!(value & BIT(17));
/* Clear the error bit */
pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 69b5adead0ad..75ad847593b7 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1835,6 +1835,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
static struct notifier_block i7_mce_dec = {
.notifier_call = i7core_mce_check_error,
+ .priority = MCE_PRIO_EDAC,
};
struct memdev_dmi_entry {
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 7baa8ace267b..9dcdab28f665 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -494,6 +494,10 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
}
mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
mch_window = ioremap_nocache(mchbar, 0x1000);
+ if (!mch_window) {
+ edac_dbg(3, "error ioremapping MCHBAR!\n");
+ goto fail0;
+ }
#ifdef i82975x_DEBUG_IOMEM
i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 34208f38c5b1..ba35b7ea3686 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -937,12 +937,13 @@ static const char *decode_error_status(struct mce *m)
}
if (m->status & MCI_STATUS_DEFERRED)
- return "Deferred error.";
+ return "Deferred error, no action required.";
return "Corrected error, no action required.";
}
-int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
+static int
+amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
{
struct mce *m = (struct mce *)data;
struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
@@ -991,20 +992,22 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_cont("]: 0x%016llx\n", m->status);
if (m->status & MCI_STATUS_ADDRV)
- pr_emerg(HW_ERR "Error Addr: 0x%016llx", m->addr);
+ pr_emerg(HW_ERR "Error Addr: 0x%016llx\n", m->addr);
if (boot_cpu_has(X86_FEATURE_SMCA)) {
+ pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
+
if (m->status & MCI_STATUS_SYNDV)
pr_cont(", Syndrome: 0x%016llx", m->synd);
- pr_cont(", IPID: 0x%016llx", m->ipid);
-
pr_cont("\n");
decode_smca_errors(m);
goto err_code;
- } else
- pr_cont("\n");
+ }
+
+ if (m->tsc)
+ pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
if (!fam_ops)
goto err_code;
@@ -1047,10 +1050,10 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
return NOTIFY_STOP;
}
-EXPORT_SYMBOL_GPL(amd_decode_mce);
static struct notifier_block amd_mce_dec_nb = {
.notifier_call = amd_decode_mce,
+ .priority = MCE_PRIO_EDAC,
};
static int __init mce_amd_init(void)
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index c2359a1ea6b3..0b6a68673e0e 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -79,6 +79,5 @@ struct amd_decoder_ops {
void amd_report_gart_errors(bool);
void amd_register_ecc_decoder(void (*f)(int, struct mce *));
void amd_unregister_ecc_decoder(void (*f)(int, struct mce *));
-int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data);
#endif /* _EDAC_MCE_AMD_H */
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8f66cbed70b7..67f7bc3fe5b3 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -629,6 +629,7 @@ static const struct of_device_id mpc85xx_l2_err_of_match[] = {
{ .compatible = "fsl,p1020-l2-cache-controller", },
{ .compatible = "fsl,p1021-l2-cache-controller", },
{ .compatible = "fsl,p2020-l2-cache-controller", },
+ { .compatible = "fsl,t2080-l2-cache-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 54ae6dc45ab2..a65ea44e3b0b 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -304,7 +304,6 @@ struct sbridge_info {
u64 (*rir_limit)(u32 reg);
u64 (*sad_limit)(u32 reg);
u32 (*interleave_mode)(u32 reg);
- char* (*show_interleave_mode)(u32 reg);
u32 (*dram_attr)(u32 reg);
const u32 *dram_rule;
const u32 *interleave_list;
@@ -811,11 +810,6 @@ static u32 interleave_mode(u32 reg)
return GET_BITFIELD(reg, 1, 1);
}
-char *show_interleave_mode(u32 reg)
-{
- return interleave_mode(reg) ? "8:6" : "[8:6]XOR[18:16]";
-}
-
static u32 dram_attr(u32 reg)
{
return GET_BITFIELD(reg, 2, 3);
@@ -831,29 +825,16 @@ static u32 knl_interleave_mode(u32 reg)
return GET_BITFIELD(reg, 1, 2);
}
-static char *knl_show_interleave_mode(u32 reg)
-{
- char *s;
-
- switch (knl_interleave_mode(reg)) {
- case 0:
- s = "use address bits [8:6]";
- break;
- case 1:
- s = "use address bits [10:8]";
- break;
- case 2:
- s = "use address bits [14:12]";
- break;
- case 3:
- s = "use address bits [32:30]";
- break;
- default:
- WARN_ON(1);
- break;
- }
+static const char * const knl_intlv_mode[] = {
+ "[8:6]", "[10:8]", "[14:12]", "[32:30]"
+};
- return s;
+static const char *get_intlv_mode_str(u32 reg, enum type t)
+{
+ if (t == KNIGHTS_LANDING)
+ return knl_intlv_mode[knl_interleave_mode(reg)];
+ else
+ return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
}
static u32 dram_attr_knl(u32 reg)
@@ -1810,7 +1791,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
show_dram_attr(pvt->info.dram_attr(reg)),
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- pvt->info.show_interleave_mode(reg),
+ get_intlv_mode_str(reg, pvt->info.type),
reg);
prv = limit;
@@ -3136,7 +3117,8 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
}
static struct notifier_block sbridge_mce_dec = {
- .notifier_call = sbridge_mce_check_error,
+ .notifier_call = sbridge_mce_check_error,
+ .priority = MCE_PRIO_EDAC,
};
/****************************************************************************
@@ -3227,7 +3209,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
- pvt->info.show_interleave_mode = show_interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
pvt->info.interleave_list = ibridge_interleave_list;
@@ -3251,7 +3232,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
- pvt->info.show_interleave_mode = show_interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
pvt->info.interleave_list = sbridge_interleave_list;
@@ -3275,7 +3255,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
- pvt->info.show_interleave_mode = show_interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
pvt->info.interleave_list = ibridge_interleave_list;
@@ -3299,7 +3278,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
- pvt->info.show_interleave_mode = show_interleave_mode;
pvt->info.dram_attr = dram_attr;
pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
pvt->info.interleave_list = ibridge_interleave_list;
@@ -3323,7 +3301,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.rir_limit = NULL;
pvt->info.sad_limit = knl_sad_limit;
pvt->info.interleave_mode = knl_interleave_mode;
- pvt->info.show_interleave_mode = knl_show_interleave_mode;
pvt->info.dram_attr = dram_attr_knl;
pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
pvt->info.interleave_list = knl_interleave_list;
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 79ef675e4d6f..1159dba4671f 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -1007,7 +1007,8 @@ static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
}
static struct notifier_block skx_mce_dec = {
- .notifier_call = skx_mce_check_error,
+ .notifier_call = skx_mce_check_error,
+ .priority = MCE_PRIO_EDAC,
};
static void skx_remove(void)
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index f853ad2c4ca0..1027d7b44358 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -250,7 +250,6 @@ void __init efi_init(void)
}
reserve_regions();
- efi_memattr_init();
efi_esrt_init();
efi_memmap_unmap();
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 92914801e388..e7d404059b73 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -529,6 +529,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
}
}
+ efi_memattr_init();
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 14914074f716..08b026864d4e 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -269,7 +269,7 @@ void __init efi_esrt_init(void)
max -= efi.esrt;
if (max < size) {
- pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n",
+ pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
size, max);
return;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d564d25df8ab..f7425960f6a5 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
+cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
@@ -28,7 +28,7 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o gop.o
+lib-y := efi-stub-helper.o gop.o secureboot.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
@@ -60,7 +60,7 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
-STUBCOPY_FLAGS-y := -R .debug* -R *ksymtab* -R *kcrctab*
+STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab*
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -68,17 +68,25 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,stubcopy)
+#
+# Strip debug sections and some other sections that may legally contain
+# absolute relocations, so that we can inspect the remaining sections for
+# such relocations. If none are found, regenerate the output object, but
+# this time, use objcopy and leave all sections in place.
+#
quiet_cmd_stubcopy = STUBCPY $@
- cmd_stubcopy = if $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; then \
- $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y) \
- && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
- rm -f $@; /bin/false); else /bin/false; fi
+ cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
+ then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
+ then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
+ rm -f $@; /bin/false); \
+ else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
+ else /bin/false; fi
#
# ARM discards the .data section because it disallows r/w data in the
# decompressor. So move our .data to .data.efistub, which is preserved
# explicitly by the decompressor linker script.
#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
- -R ___ksymtab+sort -R ___kcrctab+sort
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
+STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index b4f7d78f9e8b..d4056c6be1ec 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -20,52 +20,6 @@
bool __nokaslr;
-static int efi_get_secureboot(efi_system_table_t *sys_table_arg)
-{
- static efi_char16_t const sb_var_name[] = {
- 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
- static efi_char16_t const sm_var_name[] = {
- 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 };
-
- efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID;
- efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
- u8 val;
- unsigned long size = sizeof(val);
- efi_status_t status;
-
- status = f_getvar((efi_char16_t *)sb_var_name, (efi_guid_t *)&var_guid,
- NULL, &size, &val);
-
- if (status != EFI_SUCCESS)
- goto out_efi_err;
-
- if (val == 0)
- return 0;
-
- status = f_getvar((efi_char16_t *)sm_var_name, (efi_guid_t *)&var_guid,
- NULL, &size, &val);
-
- if (status != EFI_SUCCESS)
- goto out_efi_err;
-
- if (val == 1)
- return 0;
-
- return 1;
-
-out_efi_err:
- switch (status) {
- case EFI_NOT_FOUND:
- return 0;
- case EFI_DEVICE_ERROR:
- return -EIO;
- case EFI_SECURITY_VIOLATION:
- return -EACCES;
- default:
- return -EINVAL;
- }
-}
-
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
void *__image, void **__fh)
{
@@ -91,75 +45,6 @@ efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
return status;
}
-efi_status_t efi_file_close(void *handle)
-{
- efi_file_handle_t *fh = handle;
-
- return fh->close(handle);
-}
-
-efi_status_t
-efi_file_read(void *handle, unsigned long *size, void *addr)
-{
- efi_file_handle_t *fh = handle;
-
- return fh->read(handle, size, addr);
-}
-
-
-efi_status_t
-efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
- efi_char16_t *filename_16, void **handle, u64 *file_sz)
-{
- efi_file_handle_t *h, *fh = __fh;
- efi_file_info_t *info;
- efi_status_t status;
- efi_guid_t info_guid = EFI_FILE_INFO_ID;
- unsigned long info_sz;
-
- status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open file: ");
- efi_char16_printk(sys_table_arg, filename_16);
- efi_printk(sys_table_arg, "\n");
- return status;
- }
-
- *handle = h;
-
- info_sz = 0;
- status = h->get_info(h, &info_guid, &info_sz, NULL);
- if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk(sys_table_arg, "Failed to get file info size\n");
- return status;
- }
-
-grow:
- status = sys_table_arg->boottime->allocate_pool(EFI_LOADER_DATA,
- info_sz, (void **)&info);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
- return status;
- }
-
- status = h->get_info(h, &info_guid, &info_sz,
- info);
- if (status == EFI_BUFFER_TOO_SMALL) {
- sys_table_arg->boottime->free_pool(info);
- goto grow;
- }
-
- *file_sz = info->file_size;
- sys_table_arg->boottime->free_pool(info);
-
- if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to get initrd info\n");
-
- return status;
-}
-
-
-
void efi_char16_printk(efi_system_table_t *sys_table_arg,
efi_char16_t *str)
{
@@ -226,7 +111,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
unsigned long reserve_addr = 0;
unsigned long reserve_size = 0;
- int secure_boot = 0;
+ enum efi_secureboot_mode secure_boot;
struct screen_info *si;
/* Check if we were booted by the EFI firmware */
@@ -296,19 +181,14 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
secure_boot = efi_get_secureboot(sys_table);
- if (secure_boot > 0)
- pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
-
- if (secure_boot < 0) {
- pr_efi_err(sys_table,
- "could not determine UEFI Secure Boot status.\n");
- }
/*
- * Unauthenticated device tree data is a security hazard, so
- * ignore 'dtb=' unless UEFI Secure Boot is disabled.
+ * Unauthenticated device tree data is a security hazard, so ignore
+ * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
+ * boot is enabled if we can't determine its state.
*/
- if (secure_boot != 0 && strstr(cmdline_ptr, "dtb=")) {
+ if (secure_boot != efi_secureboot_mode_disabled &&
+ strstr(cmdline_ptr, "dtb=")) {
pr_efi(sys_table, "Ignoring DTB from command line.\n");
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 757badc1debb..919822b7773d 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -338,6 +338,69 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
efi_call_early(free_pages, addr, nr_pages);
}
+static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
+ efi_char16_t *filename_16, void **handle,
+ u64 *file_sz)
+{
+ efi_file_handle_t *h, *fh = __fh;
+ efi_file_info_t *info;
+ efi_status_t status;
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ unsigned long info_sz;
+
+ status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16,
+ EFI_FILE_MODE_READ, (u64)0);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to open file: ");
+ efi_char16_printk(sys_table_arg, filename_16);
+ efi_printk(sys_table_arg, "\n");
+ return status;
+ }
+
+ *handle = h;
+
+ info_sz = 0;
+ status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
+ &info_sz, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ efi_printk(sys_table_arg, "Failed to get file info size\n");
+ return status;
+ }
+
+grow:
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ info_sz, (void **)&info);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+ return status;
+ }
+
+ status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
+ &info_sz, info);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_call_early(free_pool, info);
+ goto grow;
+ }
+
+ *file_sz = info->file_size;
+ efi_call_early(free_pool, info);
+
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table_arg, "Failed to get initrd info\n");
+
+ return status;
+}
+
+static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr)
+{
+ return efi_call_proto(efi_file_handle, read, handle, size, addr);
+}
+
+static efi_status_t efi_file_close(void *handle)
+{
+ return efi_call_proto(efi_file_handle, close, handle);
+}
+
/*
* Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
@@ -351,6 +414,14 @@ efi_status_t efi_parse_options(char *cmdline)
char *str;
/*
+ * Currently, the only efi= option we look for is 'nochunk', which
+ * is intended to work around known issues on certain x86 UEFI
+ * versions. So ignore for now on other architectures.
+ */
+ if (!IS_ENABLED(CONFIG_X86))
+ return EFI_SUCCESS;
+
+ /*
* If no EFI parameters were specified on the cmdline we've got
* nothing to do.
*/
@@ -523,7 +594,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
size = files[j].size;
while (size) {
unsigned long chunksize;
- if (size > __chunk_size)
+
+ if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
chunksize = __chunk_size;
else
chunksize = size;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 0e2a96b12cb3..71c4d0e3c4ed 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -29,14 +29,6 @@ void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
void **__fh);
-efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
- efi_char16_t *filename_16, void **handle,
- u64 *file_sz);
-
-efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr);
-
-efi_status_t efi_file_close(void *handle);
-
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 921dfa047202..260c4b4b492e 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
struct exit_boot_struct {
efi_memory_desc_t *runtime_map;
int *runtime_entry_count;
+ void *new_fdt_addr;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
p->runtime_map, p->runtime_entry_count);
- return EFI_SUCCESS;
+ return update_fdt_memmap(p->new_fdt_addr, map);
}
/*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
priv.runtime_map = runtime_map;
priv.runtime_entry_count = &runtime_entry_count;
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
- status = update_fdt_memmap((void *)*new_fdt_addr, &map);
- if (status != EFI_SUCCESS) {
- /*
- * The kernel won't get far without the memory map, but
- * may still be able to print something meaningful so
- * return success here.
- */
- return EFI_SUCCESS;
- }
-
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
new file mode 100644
index 000000000000..6def402bf569
--- /dev/null
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -0,0 +1,84 @@
+/*
+ * Secure boot handling.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ * Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ * Mark Salter <msalter@redhat.com>
+ *
+ * This file is part of the Linux kernel, and is made available under the
+ * terms of the GNU General Public License version 2.
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+/* BIOS variables */
+static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+static const efi_char16_t const efi_SecureBoot_name[] = {
+ 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
+};
+static const efi_char16_t const efi_SetupMode_name[] = {
+ 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
+};
+
+/* SHIM variables */
+static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
+static efi_char16_t const shim_MokSBState_name[] = {
+ 'M', 'o', 'k', 'S', 'B', 'S', 't', 'a', 't', 'e', 0
+};
+
+#define get_efi_var(name, vendor, ...) \
+ efi_call_runtime(get_variable, \
+ (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+ __VA_ARGS__);
+
+/*
+ * Determine whether we're in secure boot mode.
+ */
+enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
+{
+ u32 attr;
+ u8 secboot, setupmode, moksbstate;
+ unsigned long size;
+ efi_status_t status;
+
+ size = sizeof(secboot);
+ status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
+ NULL, &size, &secboot);
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ size = sizeof(setupmode);
+ status = get_efi_var(efi_SetupMode_name, &efi_variable_guid,
+ NULL, &size, &setupmode);
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ if (secboot == 0 || setupmode == 1)
+ return efi_secureboot_mode_disabled;
+
+ /*
+ * See if a user has put the shim into insecure mode. If so, and if the
+ * variable doesn't have the runtime attribute set, we might as well
+ * honor that.
+ */
+ size = sizeof(moksbstate);
+ status = get_efi_var(shim_MokSBState_name, &shim_guid,
+ &attr, &size, &moksbstate);
+
+ /* If it fails, we don't care why. Default to secure */
+ if (status != EFI_SUCCESS)
+ goto secure_boot_enabled;
+ if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+ return efi_secureboot_mode_disabled;
+
+secure_boot_enabled:
+ pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n");
+ return efi_secureboot_mode_enabled;
+
+out_efi_err:
+ pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
+ return efi_secureboot_mode_unknown;
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 236004b9a50d..8986757eafaf 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -43,6 +43,7 @@ int __init efi_memattr_init(void)
tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
memblock_reserve(efi.mem_attr_table, tbl_size);
+ set_bit(EFI_MEM_ATTR, &efi.flags);
unmap:
early_memunmap(tbl, sizeof(*tbl));
@@ -174,8 +175,11 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
md.phys_addr + size - 1,
efi_md_typeattr_format(buf, sizeof(buf), &md));
- if (valid)
+ if (valid) {
ret = fn(mm, &md);
+ if (ret)
+ pr_err("Error updating mappings, skipping subsequent md's\n");
+ }
}
memunmap(tbl);
return ret;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 03a5925a423c..fb16cc771c0d 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -18,55 +18,72 @@
#include <linux/gpio/driver.h>
#include <linux/pinctrl/consumer.h>
+struct aspeed_bank_props {
+ unsigned int bank;
+ u32 input;
+ u32 output;
+};
+
+struct aspeed_gpio_config {
+ unsigned int nr_gpios;
+ const struct aspeed_bank_props *props;
+};
+
struct aspeed_gpio {
struct gpio_chip chip;
spinlock_t lock;
void __iomem *base;
int irq;
+ const struct aspeed_gpio_config *config;
};
struct aspeed_gpio_bank {
uint16_t val_regs;
uint16_t irq_regs;
- const char names[4];
+ const char names[4][3];
};
static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
{
.val_regs = 0x0000,
.irq_regs = 0x0008,
- .names = { 'A', 'B', 'C', 'D' },
+ .names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
.irq_regs = 0x0028,
- .names = { 'E', 'F', 'G', 'H' },
+ .names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
.irq_regs = 0x0098,
- .names = { 'I', 'J', 'K', 'L' },
+ .names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
.irq_regs = 0x00e8,
- .names = { 'M', 'N', 'O', 'P' },
+ .names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
.irq_regs = 0x0118,
- .names = { 'Q', 'R', 'S', 'T' },
+ .names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
.irq_regs = 0x0148,
- .names = { 'U', 'V', 'W', 'X' },
+ .names = { "U", "V", "W", "X" },
+ },
+ {
+ .val_regs = 0x01E0,
+ .irq_regs = 0x0178,
+ .names = { "Y", "Z", "AA", "AB" },
+ },
+ {
+ .val_regs = 0x01E8,
+ .irq_regs = 0x01A8,
+ .names = { "AC", "", "", "" },
},
- /*
- * A bank exists for { 'Y', 'Z', "AA", "AB" }, but is not implemented.
- * Only half of GPIOs Y support interrupt configuration, and none of Z,
- * AA or AB do as they are output only.
- */
};
#define GPIO_BANK(x) ((x) >> 5)
@@ -90,6 +107,51 @@ static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
return &aspeed_gpio_banks[bank];
}
+static inline bool is_bank_props_sentinel(const struct aspeed_bank_props *props)
+{
+ return !(props->input || props->output);
+}
+
+static inline const struct aspeed_bank_props *find_bank_props(
+ struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = gpio->config->props;
+
+ while (!is_bank_props_sentinel(props)) {
+ if (props->bank == GPIO_BANK(offset))
+ return props;
+ props++;
+ }
+
+ return NULL;
+}
+
+static inline bool have_gpio(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned int group = GPIO_OFFSET(offset) / 8;
+
+ return bank->names[group][0] != '\0' &&
+ (!props || ((props->input | props->output) & GPIO_BIT(offset)));
+}
+
+static inline bool have_input(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
+
+ return !props || (props->input & GPIO_BIT(offset));
+}
+
+#define have_irq(g, o) have_input((g), (o))
+
+static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
+
+ return !props || (props->output & GPIO_BIT(offset));
+}
+
static void __iomem *bank_val_reg(struct aspeed_gpio *gpio,
const struct aspeed_gpio_bank *bank,
unsigned int reg)
@@ -152,6 +214,9 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
unsigned long flags;
u32 reg;
+ if (!have_input(gpio, offset))
+ return -ENOTSUPP;
+
spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
@@ -170,6 +235,9 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
unsigned long flags;
u32 reg;
+ if (!have_output(gpio, offset))
+ return -ENOTSUPP;
+
spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
@@ -189,6 +257,12 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
unsigned long flags;
u32 val;
+ if (!have_input(gpio, offset))
+ return 0;
+
+ if (!have_output(gpio, offset))
+ return 1;
+
spin_lock_irqsave(&gpio->lock, flags);
val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset);
@@ -205,10 +279,17 @@ static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
u32 *bit)
{
int offset;
+ struct aspeed_gpio *internal;
offset = irqd_to_hwirq(d);
- *gpio = irq_data_get_irq_chip_data(d);
+ internal = irq_data_get_irq_chip_data(d);
+
+ /* This might be a bit of a questionable place to check */
+ if (!have_irq(internal, offset))
+ return -ENOTSUPP;
+
+ *gpio = internal;
*bank = to_bank(offset);
*bit = GPIO_BIT(offset);
@@ -364,6 +445,28 @@ static struct irq_chip aspeed_gpio_irqchip = {
.irq_set_type = aspeed_gpio_set_type,
};
+static void set_irq_valid_mask(struct aspeed_gpio *gpio)
+{
+ const struct aspeed_bank_props *props = gpio->config->props;
+
+ while (!is_bank_props_sentinel(props)) {
+ unsigned int offset;
+ const unsigned long int input = props->input;
+
+ /* Pretty crummy approach, but similar to GPIO core */
+ for_each_clear_bit(offset, &input, 32) {
+ unsigned int i = props->bank * 32 + offset;
+
+ if (i >= gpio->config->nr_gpios)
+ break;
+
+ clear_bit(i, gpio->chip.irq_valid_mask);
+ }
+
+ props++;
+ }
+}
+
static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
struct platform_device *pdev)
{
@@ -375,6 +478,8 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
gpio->irq = rc;
+ set_irq_valid_mask(gpio);
+
rc = gpiochip_irqchip_add(&gpio->chip, &aspeed_gpio_irqchip,
0, handle_bad_irq, IRQ_TYPE_NONE);
if (rc) {
@@ -390,6 +495,9 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
+ if (!have_gpio(gpiochip_get_data(chip), offset))
+ return -ENODEV;
+
return pinctrl_request_gpio(chip->base + offset);
}
@@ -398,8 +506,46 @@ static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
pinctrl_free_gpio(chip->base + offset);
}
+/*
+ * Any banks not specified in a struct aspeed_bank_props array are assumed to
+ * have the properties:
+ *
+ * { .input = 0xffffffff, .output = 0xffffffff }
+ */
+
+static const struct aspeed_bank_props ast2400_bank_props[] = {
+ /* input output */
+ { 5, 0xffffffff, 0x0000ffff }, /* U/V/W/X */
+ { 6, 0x0000000f, 0x0fffff0f }, /* Y/Z/AA/AB, two 4-GPIO holes */
+ { },
+};
+
+static const struct aspeed_gpio_config ast2400_config =
+ /* 220 for simplicity, really 216 with two 4-GPIO holes, four at end */
+ { .nr_gpios = 220, .props = ast2400_bank_props, };
+
+static const struct aspeed_bank_props ast2500_bank_props[] = {
+ /* input output */
+ { 5, 0xffffffff, 0x0000ffff }, /* U/V/W/X */
+ { 6, 0x0fffffff, 0x0fffffff }, /* Y/Z/AA/AB, 4-GPIO hole */
+ { 7, 0x000000ff, 0x000000ff }, /* AC */
+ { },
+};
+
+static const struct aspeed_gpio_config ast2500_config =
+ /* 232 for simplicity, actual number is 228 (4-GPIO hole in GPIOAB) */
+ { .nr_gpios = 232, .props = ast2500_bank_props, };
+
+static const struct of_device_id aspeed_gpio_of_table[] = {
+ { .compatible = "aspeed,ast2400-gpio", .data = &ast2400_config, },
+ { .compatible = "aspeed,ast2500-gpio", .data = &ast2500_config, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
+
static int __init aspeed_gpio_probe(struct platform_device *pdev)
{
+ const struct of_device_id *gpio_id;
struct aspeed_gpio *gpio;
struct resource *res;
int rc;
@@ -415,8 +561,13 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
spin_lock_init(&gpio->lock);
- gpio->chip.ngpio = ARRAY_SIZE(aspeed_gpio_banks) * 32;
+ gpio_id = of_match_node(aspeed_gpio_of_table, pdev->dev.of_node);
+ if (!gpio_id)
+ return -EINVAL;
+
+ gpio->config = gpio_id->data;
+ gpio->chip.ngpio = gpio->config->nr_gpios;
gpio->chip.parent = &pdev->dev;
gpio->chip.direction_input = aspeed_gpio_dir_in;
gpio->chip.direction_output = aspeed_gpio_dir_out;
@@ -427,6 +578,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.set = aspeed_gpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
+ gpio->chip.irq_need_valid_mask = true;
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (rc < 0)
@@ -435,13 +587,6 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
return aspeed_gpio_setup_irqs(gpio, pdev);
}
-static const struct of_device_id aspeed_gpio_of_table[] = {
- { .compatible = "aspeed,ast2400-gpio" },
- { .compatible = "aspeed,ast2500-gpio" },
- {}
-};
-MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
-
static struct platform_driver aspeed_gpio_driver = {
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 3d1cf018e8e7..41d0ac142580 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -308,6 +308,18 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
return 0;
}
+static int bcm_kona_gpio_set_config(struct gpio_chip *chip, unsigned gpio,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return bcm_kona_gpio_set_debounce(chip, gpio, debounce);
+}
+
static const struct gpio_chip template_chip = {
.label = "bcm-kona-gpio",
.owner = THIS_MODULE,
@@ -318,7 +330,7 @@ static const struct gpio_chip template_chip = {
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
.set = bcm_kona_gpio_set,
- .set_debounce = bcm_kona_gpio_set_debounce,
+ .set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
};
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 5d38b08d1ee2..aecb847166f5 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -272,12 +272,16 @@ static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
}
-static int dln2_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- unsigned debounce)
+static int dln2_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
- __le32 duration = cpu_to_le32(debounce);
+ __le32 duration;
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ duration = cpu_to_le32(pinconf_to_config_argument(config));
return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_SET_DEBOUNCE,
&duration, sizeof(duration));
}
@@ -474,7 +478,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.get_direction = dln2_gpio_get_direction;
dln2->gpio.direction_input = dln2_gpio_direction_input;
dln2->gpio.direction_output = dln2_gpio_direction_output;
- dln2->gpio.set_debounce = dln2_gpio_set_debounce;
+ dln2->gpio.set_config = dln2_gpio_set_config;
platform_set_drvdata(pdev, dln2);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 6193f62c0df4..9c15ee4ef4e9 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -279,6 +279,18 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
return 0;
}
+static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return dwapb_gpio_set_debounce(gc, offset, debounce);
+}
+
static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
{
u32 worked;
@@ -426,7 +438,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
/* Only port A support debounce */
if (pp->idx == 0)
- port->gc.set_debounce = dwapb_gpio_set_debounce;
+ port->gc.set_config = dwapb_gpio_set_config;
if (pp->irq)
dwapb_configure_irqs(gpio, port, pp);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index d054219e18b9..45d384039e9b 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -291,15 +291,20 @@ static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false),
};
-static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
- unsigned offset, unsigned debounce)
+static int ep93xx_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
int gpio = chip->base + offset;
int irq = gpio_to_irq(gpio);
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
if (irq < 0)
return -EINVAL;
+ debounce = pinconf_to_config_argument(config);
ep93xx_gpio_int_debounce(irq, debounce ? true : false);
return 0;
@@ -335,7 +340,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
gc->base = bank->base;
if (bank->has_debounce) {
- gc->set_debounce = ep93xx_gpio_set_debounce;
+ gc->set_config = ep93xx_gpio_set_config;
gc->to_irq = ep93xx_gpio_to_irq;
}
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index e8accde62aa7..56bd76c33767 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -131,9 +131,8 @@ static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
-static int f7188x_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned offset,
- enum single_ended_mode mode);
+static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config);
#define F7188X_GPIO_BANK(_base, _ngpio, _regbase) \
{ \
@@ -145,7 +144,7 @@ static int f7188x_gpio_set_single_ended(struct gpio_chip *gc,
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
.set = f7188x_gpio_set, \
- .set_single_ended = f7188x_gpio_set_single_ended, \
+ .set_config = f7188x_gpio_set_config, \
.base = _base, \
.ngpio = _ngpio, \
.can_sleep = true, \
@@ -326,17 +325,17 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
superio_exit(sio->addr);
}
-static int f7188x_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned offset,
- enum single_ended_mode mode)
+static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
int err;
+ enum pin_config_param param = pinconf_to_config_param(config);
struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
struct f7188x_sio *sio = bank->data->sio;
u8 data;
- if (mode != LINE_MODE_OPEN_DRAIN &&
- mode != LINE_MODE_PUSH_PULL)
+ if (param != PIN_CONFIG_DRIVE_OPEN_DRAIN &&
+ param != PIN_CONFIG_DRIVE_PUSH_PULL)
return -ENOTSUPP;
err = superio_enter(sio->addr);
@@ -345,7 +344,7 @@ static int f7188x_gpio_set_single_ended(struct gpio_chip *chip,
superio_select(sio->addr, SIO_LD_GPIO);
data = superio_inb(sio->addr, gpio_out_mode(bank->regbase));
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
data &= ~BIT(offset);
else
data |= BIT(offset);
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 218c706359aa..df0ad2cef0d2 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -100,21 +100,21 @@ static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
return 0;
}
-static int lp873x_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned int offset,
- enum single_ended_mode mode)
+static int lp873x_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
{
struct lp873x_gpio *gpio = gpiochip_get_data(gc);
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(gpio->lp873->regmap,
LP873X_REG_GPO_CTRL,
BIT(offset * BITS_PER_GPO +
LP873X_GPO_CTRL_OD),
BIT(offset * BITS_PER_GPO +
LP873X_GPO_CTRL_OD));
- case LINE_MODE_PUSH_PULL:
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(gpio->lp873->regmap,
LP873X_REG_GPO_CTRL,
BIT(offset * BITS_PER_GPO +
@@ -133,7 +133,7 @@ static const struct gpio_chip template_chip = {
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
.set = lp873x_gpio_set,
- .set_single_ended = lp873x_gpio_set_single_ended,
+ .set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index ec8de4190db9..743459d9477d 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -152,11 +152,10 @@ static int max77620_gpio_dir_output(struct gpio_chip *gc, unsigned int offset,
return ret;
}
-static int max77620_gpio_set_debounce(struct gpio_chip *gc,
+static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
unsigned int offset,
unsigned int debounce)
{
- struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
int ret;
@@ -202,21 +201,23 @@ static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
}
-static int max77620_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned int offset,
- enum single_ended_mode mode)
+static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DRV_MASK,
MAX77620_CNFG_GPIO_DRV_OPENDRAIN);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DRV_MASK,
MAX77620_CNFG_GPIO_DRV_PUSHPULL);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return max77620_gpio_set_debounce(mgpio, offset,
+ pinconf_to_config_argument(config));
default:
break;
}
@@ -257,9 +258,8 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set_debounce = max77620_gpio_set_debounce;
mgpio->gpio_chip.set = max77620_gpio_set;
- mgpio->gpio_chip.set_single_ended = max77620_gpio_set_single_ended;
+ mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.to_irq = max77620_gpio_to_irq;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a1210e330571..e1037582e34d 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -89,22 +89,18 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
static int men_z127_set_single_ended(struct gpio_chip *gc,
unsigned offset,
- enum single_ended_mode mode)
+ enum pin_config_param param)
{
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
- if (mode != LINE_MODE_OPEN_DRAIN &&
- mode != LINE_MODE_PUSH_PULL)
- return -ENOTSUPP;
-
spin_lock(&gc->bgpio_lock);
od_en = readl(priv->reg_base + MEN_Z127_ODER);
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
od_en |= BIT(offset);
else
- /* Implicitly LINE_MODE_PUSH_PULL */
+ /* Implicitly PIN_CONFIG_DRIVE_PUSH_PULL */
od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
@@ -113,6 +109,27 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
return 0;
}
+static int men_z127_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
+{
+ enum pin_config_param param = pinconf_to_config_param(config);
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return men_z127_set_single_ended(gc, offset, param);
+
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return men_z127_debounce(gc, offset,
+ pinconf_to_config_argument(config));
+
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
@@ -149,8 +166,7 @@ static int men_z127_probe(struct mcb_device *mdev,
if (ret)
goto err_unmap;
- men_z127_gpio->gc.set_debounce = men_z127_debounce;
- men_z127_gpio->gc.set_single_ended = men_z127_set_single_ended;
+ men_z127_gpio->gc.set_config = men_z127_set_config;
ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
if (ret) {
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 69e0f4ace465..f40088d268c1 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -190,6 +190,18 @@ static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
return 0;
}
+static int mrfld_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return mrfld_gpio_set_debounce(chip, offset, debounce);
+}
+
static void mrfld_irq_ack(struct irq_data *d)
{
struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
@@ -414,7 +426,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
priv->chip.get = mrfld_gpio_get;
priv->chip.set = mrfld_gpio_set;
priv->chip.get_direction = mrfld_gpio_get_direction;
- priv->chip.set_debounce = mrfld_gpio_set_debounce;
+ priv->chip.set_config = mrfld_gpio_set_config;
priv->chip.base = gpio_base;
priv->chip.ngpio = MRFLD_NGPIO;
priv->chip.can_sleep = false;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b98ede78c9d8..efc85a279d54 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -974,6 +974,18 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
return 0;
}
+static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return omap_gpio_debounce(chip, offset, debounce);
+}
+
static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_bank *bank;
@@ -1045,7 +1057,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.direction_input = omap_gpio_input;
bank->chip.get = omap_gpio_get;
bank->chip.direction_output = omap_gpio_output;
- bank->chip.set_debounce = omap_gpio_debounce;
+ bank->chip.set_config = omap_gpio_set_config;
bank->chip.set = omap_gpio_set;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index be97101c2c9a..433b45ef332e 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -100,9 +100,8 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
return !(ret & BIT(pos));
}
-static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
+static int tc3589x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -116,22 +115,22 @@ static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
unsigned int pos = offset % 8;
int ret;
- switch(mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
/* Set open drain mode */
ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), 0);
if (ret)
return ret;
/* Enable open drain/source mode */
return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
- case LINE_MODE_OPEN_SOURCE:
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
/* Set open source mode */
ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), BIT(pos));
if (ret)
return ret;
/* Enable open drain/source mode */
return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
/* Disable open drain/source mode */
return tc3589x_set_bits(tc3589x, odereg, BIT(pos), 0);
default:
@@ -148,7 +147,7 @@ static const struct gpio_chip template_chip = {
.direction_output = tc3589x_gpio_direction_output,
.direction_input = tc3589x_gpio_direction_input,
.get_direction = tc3589x_gpio_get_direction,
- .set_single_ended = tc3589x_gpio_set_single_ended,
+ .set_config = tc3589x_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 661b0e34e067..88529d3c06c9 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -238,6 +238,18 @@ static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
return 0;
}
+static int tegra_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return tegra_gpio_set_debounce(chip, offset, debounce);
+}
+
static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -615,7 +627,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tgi);
if (config->debounce_supported)
- tgi->gc.set_debounce = tegra_gpio_set_debounce;
+ tgi->gc.set_config = tegra_gpio_set_config;
tgi->bank_info = devm_kzalloc(&pdev->dev, tgi->bank_count *
sizeof(*tgi->bank_info), GFP_KERNEL);
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 46e6dcc089cb..a379bba57d31 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -139,28 +139,28 @@ static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static int tps65218_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned offset,
- enum single_ended_mode mode)
+static int tps65218_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+ enum pin_config_param param = pinconf_to_config_param(config);
switch (offset) {
case 0:
case 2:
/* GPO1 is hardwired to be open drain */
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
return 0;
return -ENOTSUPP;
case 1:
/* GPO2 is push-pull by default, can be set as open drain. */
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
return tps65218_clear_bits(tps65218,
TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_GPO2_BUF,
TPS65218_PROTECT_L1);
- if (mode == LINE_MODE_PUSH_PULL)
+ if (param == PIN_CONFIG_DRIVE_PUSH_PULL)
return tps65218_set_bits(tps65218,
TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_GPO2_BUF,
@@ -181,7 +181,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65218_gpio_input,
.get = tps65218_gpio_get,
.set = tps65218_gpio_set,
- .set_single_ended = tps65218_gpio_set_single_ended,
+ .set_config = tps65218_gpio_set_config,
.can_sleep = true,
.ngpio = 3,
.base = -1,
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 4e450121129b..98a6f1fcc561 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -186,23 +186,24 @@ static int vx855gpio_direction_output(struct gpio_chip *gpio,
return 0;
}
-static int vx855gpio_set_single_ended(struct gpio_chip *gpio,
- unsigned int nr,
- enum single_ended_mode mode)
+static int vx855gpio_set_config(struct gpio_chip *gpio, unsigned int nr,
+ unsigned long config)
{
+ enum pin_config_param param = pinconf_to_config_param(config);
+
/* The GPI cannot be single-ended */
if (nr < NR_VX855_GPI)
return -EINVAL;
/* The GPO's are push-pull */
if (nr < NR_VX855_GPInO) {
- if (mode != LINE_MODE_PUSH_PULL)
+ if (param != PIN_CONFIG_DRIVE_PUSH_PULL)
return -ENOTSUPP;
return 0;
}
/* The GPIO's are open drain */
- if (mode != LINE_MODE_OPEN_DRAIN)
+ if (param != PIN_CONFIG_DRIVE_OPEN_DRAIN)
return -ENOTSUPP;
return 0;
@@ -231,7 +232,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
c->set = vx855gpio_set;
- c->set_single_ended = vx855gpio_set_single_ended;
+ c->set_config = vx855gpio_set_config,
c->dbg_show = NULL;
c->base = 0;
c->ngpio = NR_VX855_GP;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 34baee5b1dd6..97613de5304e 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -202,17 +202,16 @@ static void wcove_gpio_set(struct gpio_chip *chip,
regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
}
-static int wcove_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int gpio,
- enum single_ended_mode mode)
+static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
+ unsigned long config)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
CTLO_DRV_MASK, CTLO_DRV_OD);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
CTLO_DRV_MASK, CTLO_DRV_CMOS);
default:
@@ -411,7 +410,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
wg->chip.set = wcove_gpio_set;
- wg->chip.set_single_ended = wcove_gpio_set_single_ended,
+ wg->chip.set_config = wcove_gpio_set_config,
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
wg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 533707f943f4..00e3839b3f96 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -101,11 +101,9 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
WM831X_IRQ_GPIO_1 + offset);
}
-static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+static int wm831x_gpio_set_debounce(struct wm831x *wm831x, unsigned offset,
unsigned debounce)
{
- struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
- struct wm831x *wm831x = wm831x_gpio->wm831x;
int reg = WM831X_GPIO1_CONTROL + offset;
int ret, fn;
@@ -132,21 +130,23 @@ static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn);
}
-static int wm831x_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
+static int wm831x_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
{
struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int reg = WM831X_GPIO1_CONTROL + offset;
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return wm831x_set_bits(wm831x, reg,
WM831X_GPN_OD_MASK, WM831X_GPN_OD);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return wm831x_set_bits(wm831x, reg,
WM831X_GPN_OD_MASK, 0);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return wm831x_gpio_set_debounce(wm831x, offset,
+ pinconf_to_config_argument(config));
default:
break;
}
@@ -255,8 +255,7 @@ static const struct gpio_chip template_chip = {
.direction_output = wm831x_gpio_direction_out,
.set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
- .set_debounce = wm831x_gpio_set_debounce,
- .set_single_ended = wm831x_set_single_ended,
+ .set_config = wm831x_set_config,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 68410fda6138..1e35756ac55b 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -103,19 +103,18 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
-static int wm8994_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
+static int wm8994_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
{
struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_OP_CFG_MASK,
WM8994_GPN_OP_CFG);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_OP_CFG_MASK, 0);
default:
@@ -257,7 +256,7 @@ static const struct gpio_chip template_chip = {
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
.set = wm8994_gpio_set,
- .set_single_ended = wm8994_gpio_set_single_ended,
+ .set_config = wm8994_gpio_set_config,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
.can_sleep = true,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 86bf3b84ada5..d0478f1853db 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
/**
- * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
* @gpiochip: the gpiochip to add the irqchip to
* @irqchip: the irqchip to add to the gpiochip
* @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* the pins on the gpiochip can generate a unique IRQ. Everything else
* need to be open coded.
*/
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key)
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ bool nested,
+ struct lock_class_key *lock_key)
{
struct device_node *of_node;
bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
return 0;
}
-EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
@@ -1876,6 +1876,19 @@ void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
+/**
+ * gpiochip_generic_config() - apply configuration for a pin
+ * @chip: the gpiochip owning the GPIO
+ * @offset: the offset of the GPIO to apply the configuration
+ * @config: the configuration to be applied
+ */
+int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ return pinctrl_gpio_set_config(chip->gpiodev->base + offset, config);
+}
+EXPORT_SYMBOL_GPL(gpiochip_generic_config);
+
#ifdef CONFIG_PINCTRL
/**
@@ -2264,6 +2277,14 @@ int gpiod_direction_input(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_direction_input);
+static int gpio_set_drive_single_ended(struct gpio_chip *gc, unsigned offset,
+ enum pin_config_param mode)
+{
+ unsigned long config = { PIN_CONF_PACKED(mode, 0) };
+
+ return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
+}
+
static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
struct gpio_chip *gc = desc->gdev->chip;
@@ -2280,32 +2301,25 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
- if (gc->set_single_ended) {
- ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
- LINE_MODE_OPEN_DRAIN);
- if (!ret)
- goto set_output_value;
- }
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_DRAIN);
+ if (!ret)
+ goto set_output_value;
/* Emulate open drain by not actively driving the line high */
if (val)
return gpiod_direction_input(desc);
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- if (gc->set_single_ended) {
- ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
- LINE_MODE_OPEN_SOURCE);
- if (!ret)
- goto set_output_value;
- }
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_SOURCE);
+ if (!ret)
+ goto set_output_value;
/* Emulate open source by not actively driving the line low */
if (!val)
return gpiod_direction_input(desc);
} else {
- /* Make sure to disable open drain/source hardware, if any */
- if (gc->set_single_ended)
- gc->set_single_ended(gc,
- gpio_chip_hwgpio(desc),
- LINE_MODE_PUSH_PULL);
+ gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_PUSH_PULL);
}
set_output_value:
@@ -2376,17 +2390,19 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
struct gpio_chip *chip;
+ unsigned long config;
VALIDATE_DESC(desc);
chip = desc->gdev->chip;
- if (!chip->set || !chip->set_debounce) {
+ if (!chip->set || !chip->set_config) {
gpiod_dbg(desc,
- "%s: missing set() or set_debounce() operations\n",
+ "%s: missing set() or set_config() operations\n",
__func__);
return -ENOTSUPP;
}
- return chip->set_debounce(chip, gpio_chip_hwgpio(desc), debounce);
+ config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
+ return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 29d6d84d1c28..41e41f90265d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
break;
}
+
+ if (!(*out_ring && (*out_ring)->adev)) {
+ DRM_ERROR("Ring %d is not initialized on IP %d\n",
+ ring, ip_type);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9999dc71b998..ccb5e02e7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v10_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v10_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 2006abbbfb62..a7af5b33a5e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v11_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v11_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v11_0_show_cursor(crtc);
@@ -2640,7 +2637,6 @@ unpin:
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v11_0_lock_cursor(crtc, true);
@@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b4e4ec630e8c..39df6a50637f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ int w = amdgpu_crtc->cursor_width;
+
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 584abe834a3c..28102bb1704d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 762f8e82ceb7..e9a176891e13 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- kfree(amdgpu_encoder->enc_priv);
drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
+ kfree(encoder);
}
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 45a573e63d4a..0635829b18cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
const char *chip_name;
char fw_name[30];
int err;
+ bool is_58_fw = false;
DRM_DEBUG("\n");
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ /* this memory configuration requires special firmware */
+ if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+ is_58_fw = true;
+
+ if (is_58_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -245,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_set_vga_render_state(adev, false);
+
gmc_v6_0_mc_stop(adev, &save);
if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -274,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v6_0_mc_resume(adev, &save);
- amdgpu_display_set_vga_render_state(adev, false);
}
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -463,19 +474,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
- ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
- VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
+ ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+ gmc_v6_0_set_fault_enable_default(adev, false);
+ else
+ gmc_v6_0_set_fault_enable_default(adev, true);
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +757,10 @@ static int gmc_v6_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v6_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 10bedfac27b8..6e150db8f380 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
union power_info {
struct _ATOM_POWERPLAY_INFO info;
@@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6817) ||
(adev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (adev->asic_type == CHIP_OLAND) {
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
@@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
- max_mclk = 80000;
}
}
/* Apply dpm quirks */
@@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
((adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6663) ||
(adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667))) ||
- ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)))
+ (adev->pdev->device == 0x6667))))
chip_name = "hainan_k";
+ else if ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665))
+ chip_name = "banks_k_2";
else
chip_name = "hainan";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 96444e4d862a..7fb9137dd89b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -40,13 +40,14 @@
#include "smu/smu_7_0_1_sh_mask.h"
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+ bool sw_mode);
/**
* uvd_v4_2_ring_get_rptr - get read pointer
*
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
return r;
}
-
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+ bool enable);
/**
* uvd_v4_2_hw_init - start and test UVD block
*
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
uint32_t tmp;
int r;
- uvd_v4_2_init_cg(adev);
- uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
r = uvd_v4_2_start(adev);
if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t rb_bufsz;
int i, j, r;
-
/* disable byte swapping */
u32 lmi_swap_cntl = 0;
u32 mp_swap_cntl = 0;
+ WREG32(mmUVD_CGC_GATE, 0);
+ uvd_v4_2_set_dcm(adev, true);
+
uvd_v4_2_mc_resume(adev);
/* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ uvd_v4_2_set_dcm(adev, false);
}
/**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
}
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
-{
- bool hw_mode = true;
-
- if (hw_mode) {
- uvd_v4_2_set_dcm(adev, false);
- } else {
- u32 tmp = RREG32(mmUVD_CGC_CTRL);
- tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- WREG32(mmUVD_CGC_CTRL, tmp);
- }
-}
-
static bool uvd_v4_2_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
- bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
- if (state == AMD_CG_STATE_GATE)
- gate = true;
-
- uvd_v4_2_enable_mgcg(adev, gate);
-
return 0;
}
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
- return 0;
-
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 5fb0b7f5c065..37ca685e5a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,9 +43,13 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
+
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
@@ -54,6 +58,9 @@
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+ | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- data &= ~0xffc00000;
+ data &= ~0x3ff;
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
}
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
* VCE team suggest use bit 3--bit 6 for busy status check
*/
mutex_lock(&adev->grbm_idx_mutex);
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
@@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b0c63c5f54c9..6bb79c94cb9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
+ AMD_PG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 4b14f259a147..0fb4e8c8f5e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
- /*EPR# 419220 -HW limitation to to */
- cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
-
+ /*Program HardMin based on the vce_arbiter.ecclk */
+ if (hwmgr->vce_arbiter.ecclk == 0) {
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin, 0);
+ /* disable ECLK DPM 0. Otherwise VCE could hang if
+ * switching SCLK from DPM 0 to 6/7 */
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkSoftMin, 1);
+ } else {
+ cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d2c8f5..7abda94fc2cf 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
+ bool DisableP2A;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c6421db62..533e762d036d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
} else
*need_post = false;
+ /* Check P2A Access */
+ ast->DisableP2A = true;
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF)
+ ast->DisableP2A = false;
+
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
- ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
- ast->support_wide_screen = true;
+ if (ast->DisableP2A == false) {
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x1207c);
+ data &= 0x300;
+ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
+ }
}
break;
}
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
uint32_t data, data2;
uint32_t denum, num, div, ref_pll;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
-
- ast_write32(ast, 0x10000, 0xfc600309);
-
- do {
- if (pci_channel_offline(dev->pdev))
- return -EIO;
- } while (ast_read32(ast, 0x10000) != 0x01);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
+ if (ast->DisableP2A)
+ {
ast->dram_bus_width = 16;
+ ast->dram_type = AST_DRAM_1Gx16;
+ ast->mclk = 396;
+ }
else
- ast->dram_bus_width = 32;
+ {
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x40)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
- if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (data & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
+ div = 0x4;
break;
- case 8:
- if (data & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
+ case 2:
+ case 1:
+ div = 0x2;
break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
+ default:
+ div = 0x1;
break;
}
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
-
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 810c51d92b99..5331ee1df086 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
ast_open_key(ast);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2300 || ast->chip == AST2400)
- ast_init_dram_2300(dev);
- else
- ast_init_dram_reg(dev);
+ if (ast->DisableP2A == false)
+ {
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
- ast_init_3rdtx(dev);
+ ast_init_3rdtx(dev);
+ }
+ else
+ {
+ if (ast->tx_chip_type != AST_TX_NONE)
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ }
}
/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index eb9bf8786c24..18eefdcbf1ba 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
goto err_disable_pm_runtime;
}
+ phy_power_off(dp->phy);
+ pm_runtime_put(dev);
+
return 0;
err_disable_pm_runtime:
+
+ phy_power_off(dp->phy);
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 04b3c161dfae..7f4cc6e172ab 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
This is a KMS driver for emulated cirrus device in qemu.
It is *NOT* intended for real cirrus devices. This requires
the modesetting userspace X.org driver.
+
+ Cirrus is obsolete, the hardware was designed in the 90ies
+ and can't keep up with todays needs. More background:
+ https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+ Better alternatives are:
+ - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+ - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+ - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 60697482b94c..fdfb1ec17e66 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
static void set_out_fence_for_crtc(struct drm_atomic_state *state,
- struct drm_crtc *crtc, s64 __user *fence_ptr)
+ struct drm_crtc *crtc, s32 __user *fence_ptr)
{
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
}
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- s64 __user *fence_ptr;
+ s32 __user *fence_ptr;
fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->prop_out_fence_ptr) {
- s64 __user *fence_ptr = u64_to_user_ptr(val);
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
if (!fence_ptr)
return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
*/
struct drm_out_fence_state {
- s64 __user *out_fence_ptr;
+ s32 __user *out_fence_ptr;
struct sync_file *sync_file;
int fd;
};
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- u64 __user *fence_ptr;
+ s32 __user *fence_ptr;
fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct drm_pending_vblank_event *event = crtc_state->event;
/*
- * TEST_ONLY and PAGE_FLIP_EVENT are mutually
- * exclusive, if they weren't, this code should be
- * called on success for TEST_ONLY too.
+ * Free the allocated event. drm_atomic_helper_setup_commit
+ * can allocate an event too, so only free it if it's ours
+ * to prevent a double free in drm_atomic_state_clear.
*/
- if (crtc_state->event)
- drm_event_cancel_free(dev, &crtc_state->event->base);
+ if (event && (event->base.fence || event->base.file_priv)) {
+ drm_event_cancel_free(dev, &event->base);
+ crtc_state->event = NULL;
+ }
}
if (!fence_state)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 34f757bcabae..4594477dee00 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
funcs = plane->helper_private;
- if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
- continue;
-
if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, plane_state);
if (ret)
@@ -1685,9 +1682,6 @@ fail:
if (j >= i)
continue;
- if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
- continue;
-
funcs = plane->helper_private;
if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
for_each_plane_in_state(old_state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
- if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
- continue;
-
funcs = plane->helper_private;
if (funcs->cleanup_fb)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5a4526289392..7a7019ac9388 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
+ mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->funcs->atomic_destroy_state(connector,
connector->state);
+ mutex_destroy(&connector->mutex);
+
memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
*/
int drm_connector_register(struct drm_connector *connector)
{
- int ret;
+ int ret = 0;
- if (connector->registered)
+ if (!connector->dev->registered)
return 0;
+ mutex_lock(&connector->mutex);
+ if (connector->registered)
+ goto unlock;
+
ret = drm_sysfs_connector_add(connector);
if (ret)
- return ret;
+ goto unlock;
ret = drm_debugfs_connector_add(connector);
if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registered = true;
- return 0;
+ goto unlock;
err_debugfs:
drm_debugfs_connector_remove(connector);
err_sysfs:
drm_sysfs_connector_remove(connector);
+unlock:
+ mutex_unlock(&connector->mutex);
return ret;
}
EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
*/
void drm_connector_unregister(struct drm_connector *connector)
{
- if (!connector->registered)
+ mutex_lock(&connector->mutex);
+ if (!connector->registered) {
+ mutex_unlock(&connector->mutex);
return;
+ }
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
drm_debugfs_connector_remove(connector);
connector->registered = false;
+ mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index aa644487749c..f59771da52ee 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1817,7 +1817,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
- drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
+ drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state;
mgr->payloads[i].start_slot = 0;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a525751b4559..6594b4088f11 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
+ dev->registered = true;
+
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
drm_lastclose(dev);
+ dev->registered = false;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1d6c335584ec..33cd51632721 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -376,7 +376,7 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
- obj->name, obj->refcount.refcount.counter,
+ obj->name, kref_read(&obj->refcount),
off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index ffb2ab389d1d..6b68e9088436 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -118,7 +118,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
obj->handle_count,
- atomic_read(&obj->refcount.refcount));
+ kref_read(&obj->refcount));
return 0;
}
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 9f17085b1fdd..c6885a4911c0 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
void drm_mode_object_unreference(struct drm_mode_object *obj)
{
if (obj->free_cb) {
- DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+ DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount));
kref_put(&obj->refcount, obj->free_cb);
}
}
@@ -176,7 +176,7 @@ EXPORT_SYMBOL(drm_mode_object_unreference);
void drm_mode_object_reference(struct drm_mode_object *obj)
{
if (obj->free_cb) {
- DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+ DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, kref_read(&obj->refcount));
kref_get(&obj->refcount);
}
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac6a35212501..e6b19bc9021a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
return NULL;
mode->type |= DRM_MODE_TYPE_USERDEF;
+ /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
+ if (cmd->xres == 1366 && mode->hdisplay == 1368) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ac953f037be7..cf8f0128c161 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
}
if (dev->mode_config.delayed_event) {
+ /*
+ * FIXME:
+ *
+ * Use short (1s) delay to handle the initial delayed event.
+ * This delay should not be needed, but Optimus/nouveau will
+ * fail in a mysterious way if the delayed event is handled as
+ * soon as possible like it is done in
+ * drm_helper_probe_single_connector_modes() in case the poll
+ * was enabled before.
+ */
poll = true;
- delay = 0;
+ delay = HZ;
}
if (poll)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 114dddbd297b..aa6e35ddc87f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -486,7 +486,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
- obj->name, obj->refcount.refcount.counter,
+ obj->name, kref_read(&obj->refcount),
off, etnaviv_obj->vaddr, obj->size);
rcu_read_lock();
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 169ac96e8f08..fe0e85b41310 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
+ /*
+ * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+ * drm_mm into giving out a low IOVA after address space
+ * rollover. This needs a proper fix.
+ */
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
size, 0, mmu->last_iova, ~0UL,
- DRM_MM_SEARCH_DEFAULT);
+ mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
if (ret != -ENOSPC)
break;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f3117fe8..75eeb831ed6a 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@ enum decon_flag_bits {
BIT_CLKS_ENABLED,
BIT_IRQS_ENABLED,
BIT_WIN_UPDATED,
- BIT_SUSPENDED
+ BIT_SUSPENDED,
+ BIT_REQUEST_UPDATE
};
struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
m->crtc_vsync_end = m->crtc_vsync_start + 1;
}
- decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
-
- /* enable clock gate */
- val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
- writel(val, ctx->addr + DECON_CMU);
-
if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
decon_setup_trigger(ctx);
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
/* window enable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
return;
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = ctx->first_win; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
- /* standalone update */
- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+ if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
if (ctx->out_type & IFTYPE_I80)
set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 0d41ebc4aea6..f7bce8603958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -37,13 +37,6 @@
#include "i915_drv.h"
#include "gvt.h"
-#define MB_TO_BYTES(mb) ((mb) << 20ULL)
-#define BYTES_TO_MB(b) ((b) >> 20ULL)
-
-#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
-#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
-#define HOST_FENCE 4
-
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
POSTING_READ(fence_reg_lo);
}
+static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+ intel_vgpu_write_fence(vgpu, i, 0);
+}
+
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
+ _clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
- intel_vgpu_write_fence(vgpu, i, 0);
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
continue;
list_del(pos);
vgpu->fence.regs[i] = reg;
- intel_vgpu_write_fence(vgpu, i, 0);
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
+ _clear_vgpu_fence(vgpu);
+
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
}
/**
+ * intel_vgpu_reset_resource - reset resource state owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset resource state owned by a vGPU.
+ *
+ */
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ intel_runtime_pm_get(dev_priv);
+ _clear_vgpu_fence(vgpu);
+ intel_runtime_pm_put(dev_priv);
+}
+
+/**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 711c31c8d8b4..4a6a2ed65732 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
return 0;
}
+
+/**
+ * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
+ *
+ * @vgpu: a vGPU
+ * @primary: is the vGPU presented as primary
+ *
+ */
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ bool primary)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+ int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+
+ if (!primary) {
+ vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ }
+
+ /* Show guest that there isn't any stolen memory.*/
+ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+ *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+ intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+ gvt_aperture_pa_base(gvt), true);
+
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER);
+ /*
+ * Clear the bar upper 32bit and let guest to assign the new value
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+ for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+ vgpu->cfg_space.bar[i].size = pci_resource_len(
+ gvt->dev_priv->drm.pdev, i * 2);
+ vgpu->cfg_space.bar[i].tracked = false;
+ }
+}
+
+/**
+ * intel_vgpu_reset_cfg_space - reset vGPU configuration space
+ *
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
+{
+ u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
+ bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+
+ if (cmd & PCI_COMMAND_MEMORY) {
+ trap_gttmmio(vgpu, false);
+ map_aperture(vgpu, false);
+ }
+
+ /**
+ * Currently we only do such reset when vGPU is not
+ * owned by any VM, so we simply restore entire cfg
+ * space to default value.
+ */
+ intel_vgpu_init_cfg_space(vgpu, primary);
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d26a092c70e8..e4563984cb1e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -481,7 +481,6 @@ struct parser_exec_state {
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
/* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
- if (bypass_batch_buffer_scan)
- return 0;
-
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f32bb6f6495c..34083731669d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
- unsigned long add, int gmadr_bytes)
-{
- if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
- return -1;
-
- *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
- BATCH_BUFFER_ADDR_MASK;
- if (gmadr_bytes == 8) {
- *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
- add & BATCH_BUFFER_ADDR_HIGH_MASK;
- }
-
- return 0;
-}
-
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
- int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_shadow_bb_entry *entry_obj;
/* pin the gem object to ggtt */
- if (!list_empty(&workload->shadow_bb)) {
- struct intel_shadow_bb_entry *entry_obj =
- list_first_entry(&workload->shadow_bb,
- struct intel_shadow_bb_entry,
- list);
- struct intel_shadow_bb_entry *temp;
+ list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+ struct i915_vma *vma;
- list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
- list) {
- struct i915_vma *vma;
-
- vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
- 4, 0);
- if (IS_ERR(vma)) {
- gvt_err("Cannot pin\n");
- return;
- }
-
- /* FIXME: we are not tracking our pinned VMA leaving it
- * up to the core to fix up the stray pin_count upon
- * free.
- */
-
- /* update the relocate gma with shadow batch buffer*/
- set_gma_to_bb_cmd(entry_obj,
- i915_ggtt_offset(vma),
- gmadr_bytes);
+ vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin\n");
+ return;
}
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ /* update the relocate gma with shadow batch buffer*/
+ entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+ if (gmadr_bytes == 8)
+ entry_obj->bb_start_cmd_va[2] = 0;
}
}
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
- vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+ vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6c5fdf5b2ce2..47dec4acf7ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
{
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
- u64 pte;
-#ifdef readq
- pte = readq(addr);
-#else
- pte = ioread32(addr);
- pte |= (u64)ioread32(addr + 4) << 32;
-#endif
- return pte;
+ return readq(addr);
}
static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
{
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
-#ifdef writeq
writeq(pte, addr);
-#else
- iowrite32((u32)pte, addr);
- iowrite32(pte >> 32, addr + 4);
-#endif
+
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
info->gtt_entry_size;
mem = kzalloc(mm->has_shadow_page_table ?
mm->page_table_entry_size * 2
- : mm->page_table_entry_size,
- GFP_ATOMIC);
+ : mm->page_table_entry_size, GFP_KERNEL);
if (!mem)
return -ENOMEM;
mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *mm;
int ret;
- mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
- struct page *scratch_pt;
+ void *scratch_pt;
unsigned long mfn;
int i;
- void *p;
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
- scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!scratch_pt) {
gvt_err("fail to allocate scratch page\n");
return -ENOMEM;
}
- p = kmap_atomic(scratch_pt);
- mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
- kunmap_atomic(p);
- __free_page(scratch_pt);
+ gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
+ free_page((unsigned long)scratch_pt);
return -EFAULT;
}
gtt->scratch_pt[type].page_mfn = mfn;
- gtt->scratch_pt[type].page = scratch_pt;
+ gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, mfn);
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
* scratch_pt[type] indicate the scratch pt/scratch page used by the
* 'type' pt.
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
- * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
*/
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
se.val64 |= PPAT_CACHED_INDEX;
for (i = 0; i < page_entry_num; i++)
- ops->set_entry(p, &se, i, false, 0, vgpu);
+ ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
}
- kunmap_atomic(p);
-
return 0;
}
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
- void *page_addr;
+ void *page;
gvt_dbg_core("init gtt\n");
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENODEV;
}
- gvt->gtt.scratch_ggtt_page =
- alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
- if (!gvt->gtt.scratch_ggtt_page) {
+ page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!page) {
gvt_err("fail to allocate scratch ggtt page\n");
return -ENOMEM;
}
+ gvt->gtt.scratch_ggtt_page = virt_to_page(page);
- page_addr = page_address(gvt->gtt.scratch_ggtt_page);
-
- gvt->gtt.scratch_ggtt_mfn =
- intel_gvt_hypervisor_virt_to_mfn(page_addr);
+ gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate scratch ggtt page\n");
__free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
}
+
+/**
+ * intel_vgpu_reset_gtt - reset the all GTT related status
+ * @vgpu: a vGPU
+ * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
+ *
+ * This function is called from vfio core to reset reset all
+ * GTT related status, including GGTT, PPGTT, scratch page.
+ *
+ */
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+{
+ int i;
+
+ ppgtt_free_all_shadow_page(vgpu);
+ if (!dmlr)
+ return;
+
+ intel_vgpu_reset_ggtt(vgpu);
+
+ /* clear scratch page for security */
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL)
+ memset(page_address(vgpu->gtt.scratch_pt[i].page),
+ 0, PAGE_SIZE);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b315ab3593ec..f88eb5e89bea 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 398877c3d2fd..e6bf5c533fbe 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_clean_vgpu_types(gvt);
+ idr_destroy(&gvt->vgpu_idr);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("init gvt device\n");
+ idr_init(&gvt->vgpu_idr);
+
mutex_init(&gvt->lock);
gvt->dev_priv = dev_priv;
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
ret = intel_gvt_setup_mmio_info(gvt);
if (ret)
- return ret;
+ goto out_clean_idr;
ret = intel_gvt_load_firmware(gvt);
if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+ idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0af17016f33f..e227caf5859e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param);
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+ unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ bool primary);
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
+
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
-int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
struct intel_gvt_ops {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 522809710312..ab2ea157da4c 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
static int new_mmio_info(struct intel_gvt *gvt,
u32 offset, u32 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
- void *read, void *write)
+ int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
+ int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
{
struct intel_gvt_mmio_info *info, *p;
u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
default:
/*should not hit here*/
gvt_err("invalid forcewake offset 0x%x\n", offset);
- return 1;
+ return -EINVAL;
}
} else {
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
return 0;
}
-static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes, unsigned long bitmap)
-{
- struct intel_gvt_workload_scheduler *scheduler =
- &vgpu->gvt->scheduler;
-
- vgpu->resetting = true;
-
- intel_vgpu_stop_schedule(vgpu);
- /*
- * The current_vgpu will set to NULL after stopping the
- * scheduler when the reset is triggered by current vgpu.
- */
- if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&vgpu->gvt->lock);
- intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&vgpu->gvt->lock);
- }
-
- intel_vgpu_reset_execlist(vgpu, bitmap);
-
- /* full GPU reset */
- if (bitmap == 0xff) {
- mutex_unlock(&vgpu->gvt->lock);
- intel_vgpu_clean_gtt(vgpu);
- mutex_lock(&vgpu->gvt->lock);
- setup_vgpu_mmio(vgpu);
- populate_pvinfo_page(vgpu);
- intel_vgpu_init_gtt(vgpu);
- }
-
- vgpu->resetting = false;
-
- return 0;
-}
-
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
+ void *p_data, unsigned int bytes)
{
+ unsigned int engine_mask = 0;
u32 data;
- u64 bitmap = 0;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
- bitmap = 0xff;
- }
- if (data & GEN6_GRDOM_RENDER) {
- gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- bitmap |= (1 << RCS);
- }
- if (data & GEN6_GRDOM_MEDIA) {
- gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- bitmap |= (1 << VCS);
- }
- if (data & GEN6_GRDOM_BLT) {
- gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- bitmap |= (1 << BCS);
- }
- if (data & GEN6_GRDOM_VECS) {
- gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- bitmap |= (1 << VECS);
- }
- if (data & GEN8_GRDOM_MEDIA2) {
- gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- bitmap |= (1 << VCS2);
+ engine_mask = ALL_ENGINES;
+ } else {
+ if (data & GEN6_GRDOM_RENDER) {
+ gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+ engine_mask |= (1 << RCS);
+ }
+ if (data & GEN6_GRDOM_MEDIA) {
+ gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+ engine_mask |= (1 << VCS);
+ }
+ if (data & GEN6_GRDOM_BLT) {
+ gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+ engine_mask |= (1 << BCS);
+ }
+ if (data & GEN6_GRDOM_VECS) {
+ gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+ engine_mask |= (1 << VECS);
+ }
+ if (data & GEN8_GRDOM_MEDIA2) {
+ gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+ if (HAS_BSD2(vgpu->gvt->dev_priv))
+ engine_mask |= (1 << VCS2);
+ }
}
- return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+
+ intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
+
+ return 0;
}
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- int rc = 0;
unsigned int id = 0;
write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
id = VECS;
break;
default:
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
set_bit(id, (void *)vgpu->tlb_handle_pending);
- return rc;
+ return 0;
}
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index faaae07ae487..3f656e3a6e5a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL;
}
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
- char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
type->fence);
}
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *type_attrs[] = {
- &mdev_type_attr_available_instance.attr,
+ &mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
struct intel_vgpu_type *type;
struct device *pdev;
void *gvt;
+ int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
if (!type) {
gvt_err("failed to find type %s to create\n",
kobject_name(kobj));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
- gvt_err("create intel vgpu failed\n");
- return -EINVAL;
+ ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
+ gvt_err("failed to create intel vgpu: %d\n", ret);
+ goto out;
}
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
- return 0;
+ ret = 0;
+
+out:
+ return ret;
}
static int intel_vgpu_remove(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 09c9450a1946..4df078bc5d04 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
- mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
- if (!mmio && !vgpu->mmio.disable_warn_untrack) {
- gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
- vgpu->id, offset, bytes, *(u32 *)p_data);
-
- if (offset == 0x206c) {
- gvt_err("------------------------------------------\n");
- gvt_err("vgpu%d: likely triggers a gfx reset\n",
- vgpu->id);
- gvt_err("------------------------------------------\n");
- vgpu->mmio.disable_warn_untrack = true;
- }
- }
-
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (mmio) {
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
}
ret = mmio->read(vgpu, offset, p_data, bytes);
- } else
+ } else {
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+ if (!vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ gvt_err("------------------------------------------\n");
+ gvt_err("vgpu%d: likely triggers a gfx reset\n",
+ vgpu->id);
+ gvt_err("------------------------------------------\n");
+ vgpu->mmio.disable_warn_untrack = true;
+ }
+ }
+ }
+
if (ret)
goto err;
@@ -302,3 +303,56 @@ err:
mutex_unlock(&gvt->lock);
return ret;
}
+
+
+/**
+ * intel_vgpu_reset_mmio - reset virtual MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+
+ memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+ memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+ vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+ /* set the bit 0:2(Core C-State ) to C0 */
+ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+}
+
+/**
+ * intel_vgpu_init_mmio - init MMIO space
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+
+ vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+ intel_vgpu_reset_mmio(vgpu);
+
+ return 0;
+}
+
+/**
+ * intel_vgpu_clean_mmio - clean MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
+{
+ vfree(vgpu->mmio.vreg);
+ vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 87d5b5e366a3..3bc620f56f35 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
*offset; \
})
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 81cd921770c6..d9fb41ab7119 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
vgpu->id))
return -EINVAL;
- vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
- GFP_DMA32 | __GFP_ZERO,
- INTEL_GVT_OPREGION_PORDER);
+ vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+ __GFP_ZERO,
+ get_order(INTEL_GVT_OPREGION_SIZE));
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
- INTEL_GVT_OPREGION_PORDER);
+ get_order(INTEL_GVT_OPREGION_SIZE));
vgpu_opregion(vgpu)->va = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 0dfe789d8f02..fbd023a16f18 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -50,8 +50,7 @@
#define INTEL_GVT_OPREGION_PARM 0x204
#define INTEL_GVT_OPREGION_PAGES 2
-#define INTEL_GVT_OPREGION_PORDER 1
-#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
+#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4db242250235..e91885dffeff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
+ struct intel_vgpu *vgpu;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
+ vgpu = workload->vgpu;
- if (!workload->status && !workload->vgpu->resetting) {
+ if (!workload->status && !vgpu->resetting) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
- intel_vgpu_trigger_virtual_event(workload->vgpu,
- event);
+ intel_vgpu_trigger_virtual_event(vgpu, event);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id] = NULL;
- atomic_dec(&workload->vgpu->running_workload_num);
-
list_del_init(&workload->list);
workload->complete(workload);
+ atomic_dec(&vgpu->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
mutex_unlock(&gvt->lock);
}
@@ -459,11 +459,11 @@ complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
- complete_current_workload(gvt, ring_id);
-
if (workload->req)
i915_gem_request_put(fetch_and_zero(&workload->req));
+ complete_current_workload(gvt, ring_id);
+
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3b30c28bff51..2833dfa8c9ae 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
struct drm_i915_gem_object *obj;
void *va;
unsigned long len;
- void *bb_start_cmd_va;
+ u32 *bb_start_cmd_va;
};
#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 536d2b9d5777..7295bc8e12fb 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -35,79 +35,6 @@
#include "gvt.h"
#include "i915_pvinfo.h"
-static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
-{
- vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
-}
-
-int setup_vgpu_mmio(struct intel_vgpu *vgpu)
-{
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
-
- if (vgpu->mmio.vreg)
- memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
- else {
- vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
- if (!vgpu->mmio.vreg)
- return -ENOMEM;
- }
-
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
- memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
-
- vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
-
- /* set the bit 0:2(Core C-State ) to C0 */
- vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
- return 0;
-}
-
-static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
-{
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
- u16 *gmch_ctl;
- int i;
-
- memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
- info->cfg_space_size);
-
- if (!param->primary) {
- vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
- INTEL_GVT_PCI_CLASS_VGA_OTHER;
- vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
- INTEL_GVT_PCI_CLASS_VGA_OTHER;
- }
-
- /* Show guest that there isn't any stolen memory.*/
- gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
- *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
-
- intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
- gvt_aperture_pa_base(gvt), true);
-
- vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
- | PCI_COMMAND_MEMORY
- | PCI_COMMAND_MASTER);
- /*
- * Clear the bar upper 32bit and let guest to assign the new value
- */
- memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
- memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
- memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
-
- for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
- vgpu->cfg_space.bar[i].size = pci_resource_len(
- gvt->dev_priv->drm.pdev, i * 2);
- vgpu->cfg_space.bar[i].tracked = false;
- }
-}
-
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
if (low_avail / min_low == 0)
break;
gvt->types[i].low_gm_size = min_low;
- gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
gvt->types[i].fence = 4;
gvt->types[i].max_instance = low_avail / min_low;
gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
*/
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
gvt->gm.vgpu_allocated_high_gm_size;
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
- clean_vgpu_mmio(vgpu);
+ intel_vgpu_clean_mmio(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
- setup_vgpu_cfg_space(vgpu, param);
+ intel_vgpu_init_cfg_space(vgpu, param->primary);
- ret = setup_vgpu_mmio(vgpu);
+ ret = intel_vgpu_init_mmio(vgpu);
if (ret)
- goto out_free_vgpu;
+ goto out_clean_idr;
ret = intel_vgpu_alloc_resource(vgpu, param);
if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
- clean_vgpu_mmio(vgpu);
+ intel_vgpu_clean_mmio(vgpu);
+out_clean_idr:
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
}
/**
- * intel_gvt_reset_vgpu - reset a virtual GPU
+ * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
+ * @vgpu: virtual GPU
+ * @dmlr: vGPU Device Model Level Reset or GT Reset
+ * @engine_mask: engines to reset for GT reset
+ *
+ * This function is called when user wants to reset a virtual GPU through
+ * device model reset or GT reset. The caller should hold the gvt lock.
+ *
+ * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
+ * the whole vGPU to default state as when it is created. This vGPU function
+ * is required both for functionary and security concerns.The ultimate goal
+ * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
+ * assign a vGPU to a virtual machine we must isse such reset first.
+ *
+ * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
+ * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
+ * Unlike the FLR, GT reset only reset particular resource of a vGPU per
+ * the reset request. Guest driver can issue a GT reset by programming the
+ * virtual GDRST register to reset specific virtual GPU engine or all
+ * engines.
+ *
+ * The parameter dev_level is to identify if we will do DMLR or GT reset.
+ * The parameter engine_mask is to specific the engines that need to be
+ * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * the caller requests a full GT reset that we will reset all virtual
+ * GPU engines. For FLR, engine_mask is ignored.
+ */
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+ unsigned int engine_mask)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ gvt_dbg_core("------------------------------------------\n");
+ gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
+ vgpu->id, dmlr, engine_mask);
+ vgpu->resetting = true;
+
+ intel_vgpu_stop_schedule(vgpu);
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
+ mutex_unlock(&gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&gvt->lock);
+ }
+
+ intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+
+ /* full GPU reset or device model level reset */
+ if (engine_mask == ALL_ENGINES || dmlr) {
+ intel_vgpu_reset_gtt(vgpu, dmlr);
+ intel_vgpu_reset_resource(vgpu);
+ intel_vgpu_reset_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+
+ if (dmlr)
+ intel_vgpu_reset_cfg_space(vgpu);
+ }
+
+ vgpu->resetting = false;
+ gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
+ gvt_dbg_core("------------------------------------------\n");
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
* @vgpu: virtual GPU
*
* This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->lock);
+ intel_gvt_reset_vgpu_locked(vgpu, true, 0);
+ mutex_unlock(&vgpu->gvt->lock);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 445fec9c2841..728ca3ea74d2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
- WARN_ON(!IS_KABYLAKE(dev_priv));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -2378,7 +2379,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(dev_priv);
- if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
DRM_DEBUG_KMS("Device suspended\n");
@@ -2427,6 +2428,7 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev_priv);
+ i915_gem_restore_fences(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 243224aeabf8..8493e19b563a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1012,6 +1012,8 @@ struct intel_fbc {
struct work_struct underrun_work;
struct intel_fbc_state_cache {
+ struct i915_vma *vma;
+
struct {
unsigned int mode_flags;
uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
} plane;
struct {
- u64 ilk_ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
- int fence_reg;
- unsigned int tiling_mode;
} fb;
} state_cache;
struct intel_fbc_reg_params {
+ struct i915_vma *vma;
+
struct {
enum pipe pipe;
enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
} crtc;
struct {
- u64 ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
- int fence_reg;
} fb;
int cfb_size;
@@ -1977,6 +1976,11 @@ struct drm_i915_private {
struct i915_frontbuffer_tracking fb_tracking;
+ struct intel_atomic_helper {
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } atomic_helper;
+
u16 orig_clock;
bool mchbar_need_disable;
@@ -3163,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
}
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view)
-{
- return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dd7fc662859..24b5b046754b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
- if (ret)
- return ret;
-
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
- unsigned long unwritten;
-
- /* The physical object once assigned is fixed for the lifetime
- * of the obj, so we can safely drop the lock and continue
- * to access vaddr.
- */
- mutex_unlock(&dev->struct_mutex);
- unwritten = copy_from_user(vaddr, user_data, args->size);
- mutex_lock(&dev->struct_mutex);
- if (unwritten) {
- ret = -EFAULT;
- goto out;
- }
- }
+ if (copy_from_user(vaddr, user_data, args->size))
+ return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(to_i915(dev));
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
-out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
- return ret;
+ return 0;
}
void *i915_gem_object_alloc(struct drm_device *dev)
@@ -2036,8 +2010,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- if (WARN_ON(reg->pin_count))
- continue;
+ /* Ideally we want to assert that the fence register is not
+ * live at this point (i.e. that no piece of code will be
+ * trying to write through fence + GTT, as that both violates
+ * our tracking of activity and associated locking/barriers,
+ * but also is illegal given that the hw is powered down).
+ *
+ * Previously we used reg->pin_count as a "liveness" indicator.
+ * That is not sufficient, and we need a more fine-grained
+ * tool if we want to have a sanity check here.
+ */
if (!reg->vma)
continue;
@@ -3504,7 +3486,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
- if (obj->cache_dirty) {
+ if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bd08814b015c..d534a316a16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -199,6 +199,7 @@ found:
}
/* Unbinding will emit any required flushes */
+ ret = 0;
while (!list_empty(&eviction_list)) {
vma = list_first_entry(&eviction_list,
struct i915_vma,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 097d9d8c2315..b8b877c91b0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1181,14 +1181,14 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL;
-
- /* From drm_mm perspective address space is continuous,
- * so from this point we're always using non-canonical
- * form internally.
- */
- exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
}
+ /* From drm_mm perspective address space is continuous,
+ * so from this point we're always using non-canonical
+ * form internally.
+ */
+ exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
+
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 4b3ff3e5b911..d09c74973cb3 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -66,8 +66,16 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
- max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+ if (swiotlb_nr_tbl()) {
+ unsigned int max_segment;
+
+ max_segment = swiotlb_max_segment();
+ if (max_segment) {
+ max_segment = max_t(unsigned int, max_segment,
+ PAGE_SIZE) >> PAGE_SHIFT;
+ max_order = min(max_order, ilog2(max_segment));
+ }
+ }
#endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 6a368de9d81e..ecfefb9d42e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -256,7 +256,7 @@ extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
static inline bool
i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
{
- return atomic_read(&obj->base.refcount.refcount) == 0;
+ return kref_read(&obj->base.refcount) == 0;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a792dcb902b5..e924a9516079 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return ret;
}
+ trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dbe9fb41ae53..8d3e515f27ba 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, state);
+ intel_state->vma = NULL;
+
return state;
}
@@ -100,6 +102,24 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+ /*
+ * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+ * We currently don't clear all planes during driver unload, so we have
+ * to be able to unpin vma here for now.
+ *
+ * Normally this can only happen during unload when kmscon is disabled
+ * and userspace doesn't attempt to set a framebuffer at all.
+ */
+ if (vma) {
+ mutex_lock(&plane->dev->struct_mutex);
+ intel_unpin_fb_vma(vma);
+ mutex_unlock(&plane->dev->struct_mutex);
+ }
+
drm_atomic_helper_plane_destroy_state(plane, state);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 86ecec5601d4..588470eb8d39 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
+ bool ret = false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
*/
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
- return true;
+ ret = true;
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
-
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
- return false;
+ return ret;
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3dc8724df400..891c86aef99d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2235,24 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
i915_vma_pin_fence(vma);
}
+ i915_vma_get(vma);
err:
intel_runtime_pm_put(dev_priv);
return vma;
}
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_ggtt_view view;
- struct i915_vma *vma;
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
- WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
- intel_fill_fb_ggtt_view(&view, fb, rotation);
- vma = i915_gem_object_to_ggtt(obj, &view);
+ if (WARN_ON_ONCE(!vma))
+ return;
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_put(vma);
}
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2585,8 +2583,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* We only keep the x/y offsets, so push all of the
* gtt offset into the x/y offsets.
*/
- _intel_adjust_tile_offset(&x, &y, tile_size,
- tile_width, tile_height, pitch_tiles,
+ _intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
gtt_offset_rotated * tile_size, 0);
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2746,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
- struct intel_crtc *i;
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
@@ -2771,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* an fb with another CRTC instead
*/
for_each_crtc(dev, c) {
- i = to_intel_crtc(c);
+ struct intel_plane_state *state;
if (c == &intel_crtc->base)
continue;
- if (!i->active)
+ if (!to_intel_crtc(c)->active)
continue;
- fb = c->primary->fb;
- if (!fb)
+ state = to_intel_plane_state(c->primary->state);
+ if (!state->vma)
continue;
- obj = intel_fb_obj(fb);
- if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+ if (intel_plane_ggtt_offset(state) == plane_config->base) {
+ fb = c->primary->fb;
drm_framebuffer_reference(fb);
goto valid_fb;
}
@@ -2805,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
+ mutex_lock(&dev->struct_mutex);
+ intel_state->vma =
+ intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(intel_state->vma)) {
+ DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+ intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+ intel_state->vma = NULL;
+ drm_framebuffer_unreference(fb);
+ return;
+ }
+
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = fb->width << 16;
@@ -2967,6 +2978,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
int ret;
+ if (!plane_state->base.visible)
+ return 0;
+
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
@@ -3097,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
- intel_fb_gtt_offset(fb, rotation) +
+ intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else {
I915_WRITE(DSPADDR(plane),
- intel_fb_gtt_offset(fb, rotation) +
+ intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
}
POSTING_READ(reg);
@@ -3200,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
- intel_fb_gtt_offset(fb, rotation) +
+ intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3223,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
}
}
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
- unsigned int rotation)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_ggtt_view view;
- struct i915_vma *vma;
-
- intel_fill_fb_ggtt_view(&view, fb, rotation);
-
- vma = i915_gem_object_to_ggtt(obj, &view);
- if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
- view.type))
- return -1;
-
- return i915_ggtt_offset(vma);
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -3434,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
}
I915_WRITE(PLANE_SURF(pipe, 0),
- intel_fb_gtt_offset(fb, rotation) + surf_addr);
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@@ -4265,10 +4262,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
drm_crtc_vblank_put(&intel_crtc->base);
wake_up_all(&dev_priv->pending_flip_queue);
- queue_work(dev_priv->wq, &work->unpin_work);
-
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
+
+ queue_work(dev_priv->wq, &work->unpin_work);
}
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
@@ -6846,6 +6843,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
}
state = drm_atomic_state_alloc(crtc->dev);
+ if (!state) {
+ DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.id, crtc->name);
+ return;
+ }
+
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
/* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11246,7 @@ found:
}
old->restore_state = restore_state;
+ drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -11522,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
flush_work(&work->mmio_work);
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+ intel_unpin_fb_vma(work->old_vma);
i915_gem_object_put(work->pending_flip_obj);
mutex_unlock(&dev->struct_mutex);
@@ -12232,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
}
- work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
- work->gtt_offset += intel_crtc->dspaddr_offset;
+ work->old_vma = to_intel_plane_state(primary->state)->vma;
+ to_intel_plane_state(primary->state)->vma = vma;
+
+ work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
work->rotation = crtc->primary->state->rotation;
/*
@@ -12287,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_request:
i915_add_request_no_flush(request);
cleanup_unpin:
- intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+ to_intel_plane_state(primary->state)->vma = work->old_vma;
+ intel_unpin_fb_vma(vma);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
unlock:
@@ -14512,8 +14519,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break;
case FENCE_FREE:
- drm_atomic_state_put(&state->base);
- break;
+ {
+ struct intel_atomic_helper *helper =
+ &to_i915(state->base.dev)->atomic_helper;
+
+ if (llist_add(&state->freed, &helper->free_list))
+ schedule_work(&helper->free_work);
+ break;
+ }
}
return NOTIFY_DONE;
@@ -14774,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
DRM_DEBUG_KMS("failed to pin object\n");
return PTR_ERR(vma);
}
+
+ to_intel_plane_state(new_state)->vma = vma;
}
return 0;
@@ -14792,19 +14807,12 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct intel_plane_state *old_intel_state;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
- old_intel_state = to_intel_plane_state(old_state);
-
- if (!obj && !old_obj)
- return;
+ struct i915_vma *vma;
- if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
- !INTEL_INFO(dev_priv)->cursor_needs_physical))
- intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+ /* Should only be called after a successful intel_prepare_plane_fb()! */
+ vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+ if (vma)
+ intel_unpin_fb_vma(vma);
}
int
@@ -15146,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
- addr = i915_gem_object_ggtt_offset(obj, NULL);
+ addr = intel_plane_ggtt_offset(state);
else
addr = obj->phys_handle->busaddr;
@@ -16392,6 +16400,18 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+ struct intel_atomic_state *state, *next;
+ struct llist_node *freed;
+
+ freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ llist_for_each_entry_safe(state, next, freed, freed)
+ drm_atomic_state_put(&state->base);
+}
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16431,9 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs;
+ INIT_WORK(&dev_priv->atomic_helper.free_work,
+ intel_atomic_helper_free_state);
+
intel_init_quirks(dev);
intel_init_pm(dev_priv);
@@ -17024,47 +17047,19 @@ void intel_display_resume(struct drm_device *dev)
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_put(state);
+ if (state)
+ drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *c;
- struct drm_i915_gem_object *obj;
intel_init_gt_powersave(dev_priv);
intel_modeset_init_hw(dev);
intel_setup_overlay(dev_priv);
-
- /*
- * Make sure any fbs we allocated at startup are properly
- * pinned & fenced. When we do the allocation it's too early
- * for this.
- */
- for_each_crtc(dev, c) {
- struct i915_vma *vma;
-
- obj = intel_fb_obj(c->primary->fb);
- if (obj == NULL)
- continue;
-
- mutex_lock(&dev->struct_mutex);
- vma = intel_pin_and_fence_fb_obj(c->primary->fb,
- c->primary->state->rotation);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR(vma)) {
- DRM_ERROR("failed to pin boot fb on pipe %d\n",
- to_intel_crtc(c)->pipe);
- drm_framebuffer_unreference(c->primary->fb);
- c->primary->fb = NULL;
- c->primary->crtc = c->primary->state->crtc = NULL;
- update_state_fb(c->primary);
- c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
- }
- }
}
int intel_connector_register(struct drm_connector *connector)
@@ -17094,6 +17089,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_work(&dev_priv->atomic_helper.free_work);
+ WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
intel_disable_gt_powersave(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 58a756f2f224..a2f0e070d38d 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1730,7 +1730,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
return NULL;
if ((encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP) &&
+ encoder->type == INTEL_OUTPUT_EDP ||
+ encoder->type == INTEL_OUTPUT_DP_MST) &&
!bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd132c216a67..03a2112004f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -370,11 +370,14 @@ struct intel_atomic_state {
struct skl_wm_values wm_results;
struct i915_sw_fence commit_ready;
+
+ struct llist_node freed;
};
struct intel_plane_state {
struct drm_plane_state base;
struct drm_rect clip;
+ struct i915_vma *vma;
struct {
u32 offset;
@@ -1044,6 +1047,7 @@ struct intel_flip_work {
struct work_struct mmio_work;
struct drm_crtc *crtc;
+ struct i915_vma *old_vma;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
@@ -1271,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1360,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+ return i915_ggtt_offset(state->vma);
+}
u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 62f215b12eb5..f3a1d6a5cabe 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= params->fb.fence_reg;
+ fbc_ctl |= params->vma->fence->id;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+ if (params->vma->fence) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+ if (params->vma->fence) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
- dpfc_ctl |= params->fb.fence_reg;
+ dpfc_ctl |= params->vma->fence->id;
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ SNB_CPU_FENCE_ENABLE |
+ params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
}
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
- I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE,
+ i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+ if (params->vma->fence) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ SNB_CPU_FENCE_ENABLE |
+ params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
} else {
I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
- struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
- return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj;
+
+ cache->vma = NULL;
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
if (!cache->plane.visible)
return;
- obj = intel_fb_obj(fb);
-
- /* FIXME: We lack the proper locking here, so only run this on the
- * platforms that need. */
- if (IS_GEN(dev_priv, 5, 6))
- cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
- cache->fb.fence_reg = get_fence_id(fb);
- cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+ cache->vma = plane_state->vma;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!cache->plane.visible) {
+ if (!cache->vma) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
}
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* so have no fence associated with it) due to aperture constaints
* at the time of pinning.
*/
- if (cache->fb.tiling_mode != I915_TILING_X ||
- cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+ if (!cache->vma->fence) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
* zero. */
memset(params, 0, sizeof(*params));
+ params->vma = cache->vma;
+
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
params->fb.pixel_format = cache->fb.pixel_format;
params->fb.stride = cache->fb.stride;
- params->fb.fence_reg = cache->fb.fence_reg;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
- params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index beb08982dc0b..f4a8c4fc57c4 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
- intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+ intel_unpin_fb_vma(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
- intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+ intel_unpin_fb_vma(ifbdev->vma);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
drm_framebuffer_remove(&ifbdev->fb->base);
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ if (!ifbdev)
+ return;
+
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d4961fa20c73..beabc17e7c8a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *batch,
uint32_t index)
{
- struct drm_i915_private *dev_priv = engine->i915;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
- /*
- * WaDisableLSQCROPERFforOCL:kbl
- * This WA is implemented in skl_init_clock_gating() but since
- * this batch updates GEN8_L3SQCREG4 with default value we need to
- * set this bit here to retain the WA during flush.
- */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
- l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
-
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index aeb637dc1fdf..91cb4c422ad5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE);
- /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
- * involving this register should also be added to WA batch as required.
- */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
- /* WaDisableLSQCROPERFforOCL:kbl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_RO_PERF_DIS);
-
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8f131a08d440..242a73e66d82 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane),
- intel_fb_gtt_offset(fb, rotation) + surf_addr);
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane),
- intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
- intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
- intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+ intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 14ff87686a36..686a580c711a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct msm_mmu *mmu;
int ret;
adreno_gpu->funcs = funcs;
@@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->aspace->mmu;
- if (mmu) {
+ if (gpu->aspace && gpu->aspace->mmu) {
+ struct msm_mmu *mmu = gpu->aspace->mmu;
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5f6cd8745dbc..c396d459a9d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
- int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
-
- for_each_plane_in_state(state, plane, plane_state, i)
- mdp5_plane_complete_commit(plane, plane_state);
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17b0cc101171..cdfc63d90c7b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
/* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
-
- bool pending : 1;
};
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_commit(struct drm_plane *plane,
- struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index c099da7bc212..25d9d0a97156 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
drm_printf(p, "\talpha=%u\n", pstate->alpha);
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
- drm_printf(p, "\tpending=%u\n", pstate->pending);
}
static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
- mdp5_state->pending = false;
-
return &mdp5_state->base;
}
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
- /* We don't allow faster-than-vblank updates.. if we did add this
- * some day, we would need to disallow in cases where hwpipe
- * changes
- */
- if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
- return -EBUSY;
-
max_width = config->hw->lm.max_width << 16;
max_height = config->hw->lm.max_height << 16;
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = plane->state;
- struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
DBG("%s: update", plane->name);
- mdp5_state->pending = true;
-
if (plane_enabled(state)) {
int ret;
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return pstate->hwpipe->flush_mask;
}
-/* called after vsync in thread context */
-void mdp5_plane_complete_commit(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
-
- pstate->pending = false;
-}
-
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8bc59c7e261..1974ccb781de 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ if (!priv->aspace[id])
+ continue;
msm_gem_unmap_vma(priv->aspace[id],
&msm_obj->domain[id], msm_obj->sgt);
}
@@ -640,7 +642,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
- obj->name, obj->refcount.refcount.counter,
+ obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
for (id = 0; id < priv->num_aspaces; id++)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 74856a8b8f35..e64f52464ecf 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+ mpllP = (mpllP >> 8) & 0xf;
if (!mpllP)
mpllP = 4;
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
- return clock;
+ return clock / 1000;
}
ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cef08da1da4e..6a157763dfc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
+ if (!dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 59348fc41c77..bc85a45f91cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
+
+ if (!drm_dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(drm_dev);
+
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8d5ed5bfdacb..42c1fa53d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -165,6 +165,8 @@ struct nouveau_drm {
struct backlight_device *backlight;
struct list_head bl_connectors;
struct work_struct hpd_work;
+ struct work_struct fbcon_work;
+ int fbcon_new_state;
#ifdef CONFIG_ACPI
struct notifier_block acpi_nb;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2f2a3dcd4ad7..fa2d0a978ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+ int state = READ_ONCE(drm->fbcon_new_state);
+
+ if (state == FBINFO_STATE_RUNNING)
+ pm_runtime_get_sync(drm->dev->dev);
+
+ console_lock();
+ if (state == FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_restore(drm->dev);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+ if (state != FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_save_disable(drm->dev);
+ console_unlock();
+
+ if (state == FBINFO_STATE_RUNNING) {
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+ }
+}
+
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- console_lock();
- if (state == FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_restore(dev);
- drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
- if (state != FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_save_disable(dev);
- console_unlock();
- }
+
+ if (!drm->fbcon)
+ return;
+
+ drm->fbcon_new_state = state;
+ /* Since runtime resume can happen as a result of a sysfs operation,
+ * it's possible we already have the console locked. So handle fbcon
+ * init/deinit from a seperate work thread
+ */
+ schedule_work(&drm->fbcon_work);
}
int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
return -ENOMEM;
drm->fbcon = fbcon;
+ INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index a6126c93f215..88ee60d1b907 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -527,7 +527,7 @@ static bool nouveau_fence_no_signaling(struct dma_fence *f)
* caller should have a reference on the fence,
* else fence could get freed here
*/
- WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
+ WARN_ON(kref_read(&fence->base.refcount) <= 1);
/*
* This needs uevents to work correctly, but dma_fence_add_callback relies on
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index ccdce1b4eec4..d5e58a38f160 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend;
+ struct mutex mutex;
};
int nv84_fence_context_new(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
index 187ecdb82002..21a5775028cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.h
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
}
/* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
int nouveau_led_init(struct drm_device *dev);
void nouveau_led_suspend(struct drm_device *dev);
void nouveau_led_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6fa0f7f..1fba38622744 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
- if (argv->v0.object == 0ULL)
+ if (argv->v0.object == 0ULL &&
+ argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2c2c64507661..32097fd615fd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
}
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc->state->event)
+ drm_crtc_vblank_get(crtc);
+ }
+
/* Update plane(s). */
for_each_plane_in_state(state, plane, plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
+ drm_crtc_vblank_put(crtc);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 52b87ae83e7b..f0b322bec7df 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
struct nv84_fence_chan *fctx = chan->fence;
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+ mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
+ mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
+ mutex_unlock(&priv->mutex);
if (ret)
nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
+ mutex_init(&priv->mutex);
+
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 6f0436df0219..f8f2f16c22a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
);
}
for (i = 0; i < size; i++)
- nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+ nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
for (; i < 0x60; i++)
nvkm_wr32(device, 0x61c440 + soff, (i << 8));
nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 567466f93cd5..0db8efbf1c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
case 0x94:
case 0x96:
case 0x98:
- case 0xaa:
- case 0xac:
return true;
default:
break;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 4a90c690f09e..74a9968df421 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1033,7 +1033,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
- omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+ omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->paddr, omap_obj->paddr_cnt,
omap_obj->vaddr, omap_obj->roll);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index fb16070b266e..4a4f9533c53b 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -205,8 +205,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
}
if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
- x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
- y >= (crtc->y + crtc->mode.crtc_vdisplay))
+ x >= (crtc->x + crtc->mode.hdisplay) ||
+ y >= (crtc->y + crtc->mode.vdisplay))
goto out_of_bounds;
x += xorigin;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 00ea0002b539..30bd4a6a9d46 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
* 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 48
+#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -366,11 +367,10 @@ static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
/* if we are running in a VM, make sure the device
- * torn down properly on reboot/shutdown.
- * unfortunately we can't detect certain
- * hypervisors so just do this all the time.
+ * torn down properly on reboot/shutdown
*/
- radeon_pci_remove(pdev);
+ if (radeon_device_is_virtual())
+ radeon_pci_remove(pdev);
}
static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0bcffd8a7bd3..96683f5b2b1b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
- args->vram_size = rdev->mc.real_vram_size;
- args->vram_visible = (u64)man->size << PAGE_SHIFT;
+ args->vram_size = (u64)man->size << PAGE_SHIFT;
+ args->vram_visible = rdev->mc.visible_vram_size;
args->vram_visible -= rdev->vram_pin_size;
args->gart_size = rdev->mc.gtt_size;
args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e8a38d296855..414776811e71 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev)
int err;
int new_fw = 0;
bool new_smc = false;
+ bool si58_fw = false;
+ bool banks2_fw = false;
DRM_DEBUG("\n");
@@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev)
((rdev->pdev->device == 0x6660) ||
(rdev->pdev->device == 0x6663) ||
(rdev->pdev->device == 0x6665) ||
- (rdev->pdev->device == 0x6667))) ||
- ((rdev->pdev->revision == 0xc3) &&
- (rdev->pdev->device == 0x6665)))
+ (rdev->pdev->device == 0x6667))))
new_smc = true;
+ else if ((rdev->pdev->revision == 0xc3) &&
+ (rdev->pdev->device == 0x6665))
+ banks2_fw = true;
new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
default: BUG();
}
+ /* this memory configuration requires special firmware */
+ if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+ si58_fw = true;
+
DRM_INFO("Loading %s Microcode\n", new_chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+ if (si58_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- if (new_smc)
+ if (banks2_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+ else if (new_smc)
snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 13ba73fd9b68..2944916f7102 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6817) ||
(rdev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (rdev->family == CHIP_OLAND) {
- if ((rdev->pdev->revision == 0xC7) ||
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
@@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6665) ||
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
- max_mclk = 80000;
}
}
/* Apply dpm quirks */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d5063618efa7..ffc6cb55c78c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -140,8 +140,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
- BUG_ON(atomic_read(&bo->list_kref.refcount));
- BUG_ON(atomic_read(&bo->kref.refcount));
+ BUG_ON(kref_read(&bo->list_kref));
+ BUG_ON(kref_read(&bo->kref));
BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
@@ -181,61 +181,46 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
-int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- int put_count = 0;
if (bdev->driver->lru_removal)
bdev->driver->lru_removal(bo);
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
- ++put_count;
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
if (!list_empty(&bo->lru)) {
list_del_init(&bo->lru);
- ++put_count;
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
-
- return put_count;
-}
-
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
-void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
- bool never_free)
-{
- kref_sub(&bo->list_kref, count,
- (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
}
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
- int put_count;
-
spin_lock(&bo->glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_del_from_lru(bo);
spin_unlock(&bo->glob->lru_lock);
- ttm_bo_list_ref_sub(bo, put_count, true);
}
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- int put_count = 0;
lockdep_assert_held(&bo->resv->lock.base);
if (bdev->driver->lru_removal)
bdev->driver->lru_removal(bo);
- put_count = ttm_bo_del_from_lru(bo);
- ttm_bo_list_ref_sub(bo, put_count, true);
+ ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
@@ -447,7 +432,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- int put_count;
int ret;
spin_lock(&glob->lru_lock);
@@ -455,13 +439,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
if (!ret) {
if (!ttm_bo_wait(bo, false, true)) {
- put_count = ttm_bo_del_from_lru(bo);
-
+ ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- ttm_bo_list_ref_sub(bo, put_count, true);
-
return;
} else
ttm_bo_flush_all_fences(bo);
@@ -504,7 +485,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
- int put_count;
int ret;
ret = ttm_bo_wait(bo, false, true);
@@ -554,15 +534,13 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
}
- put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
- ++put_count;
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- ttm_bo_list_ref_sub(bo, put_count, true);
-
return 0;
}
@@ -740,7 +718,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
- int ret = -EBUSY, put_count;
+ int ret = -EBUSY;
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
@@ -771,13 +749,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
- put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
BUG_ON(ret != 0);
- ttm_bo_list_ref_sub(bo, put_count, true);
-
ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
ttm_bo_unreserve(bo);
@@ -1669,7 +1645,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
container_of(shrink, struct ttm_bo_global, shrink);
struct ttm_buffer_object *bo;
int ret = -EBUSY;
- int put_count;
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
@@ -1692,11 +1667,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
return ret;
}
- put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- ttm_bo_list_ref_sub(bo, put_count, true);
-
/**
* Move to system cached
*/
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index d35bc491e8de..5e1bcabffef5 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -48,9 +48,7 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list)
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- unsigned put_count = ttm_bo_del_from_lru(bo);
-
- ttm_bo_list_ref_sub(bo, put_count, true);
+ ttm_bo_del_from_lru(bo);
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 4f5fa8d65fe9..fdb451e3ec01 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -304,7 +304,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
* Verify that the ref->obj pointer was actually valid!
*/
rmb();
- if (unlikely(atomic_read(&ref->kref.refcount) == 0))
+ if (unlikely(kref_read(&ref->kref) == 0))
goto out_false;
rcu_read_unlock();
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index a0fd3e66bc4b..7aadce1f7e7a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
}
- __drm_atomic_helper_crtc_destroy_state(state);
+ drm_atomic_helper_crtc_destroy_state(crtc, state);
}
static const struct drm_crtc_funcs vc4_crtc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index db920771bfb5..ab3016982466 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
args->shader_rec_count);
struct vc4_bo *bo;
- if (uniforms_offset < shader_rec_offset ||
+ if (shader_rec_offset < args->bin_cl_size ||
+ uniforms_offset < shader_rec_offset ||
exec_size < uniforms_offset ||
args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) {
DRM_ERROR("overflow in exec arguments\n");
+ ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 881bf489478b..686cdd3c86f2 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -858,7 +858,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
}
}
plane = &vc4_plane->base;
- ret = drm_universal_plane_init(dev, plane, 0xff,
+ ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
formats, num_formats,
type, NULL);
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 08886a309757..5cdd003605f5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
}
ret = vc4_full_res_bounds_check(exec, *obj, surf);
- if (!ret)
+ if (ret)
return ret;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index dd21f950e129..cde9f3758106 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
info->fbops = &virtio_gpufb_ops;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->screen_base = obj->vmap;
+ info->screen_buffer = obj->vmap;
info->screen_size = obj->gem_base.size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 723fd763da8e..7a96798b9c0a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -481,8 +481,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
mode_cmd.height = var->yres;
mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
mode_cmd.pixel_format =
- drm_mode_legacy_fb_format(var->bits_per_pixel,
- ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
+ drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4070b7386e9d..1aeb80e52424 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -785,6 +785,11 @@ config HID_SUNPLUS
config HID_RMI
tristate "Synaptics RMI4 device support"
depends on HID
+ select RMI4_CORE
+ select RMI4_F03
+ select RMI4_F11
+ select RMI4_F12
+ select RMI4_F30
---help---
Support for Synaptics RMI4 touchpads.
Say Y here if you have a Synaptics RMI4 touchpads over i2c-hid or usbhid
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ea36b557d5ee..e9e87d337446 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -43,7 +43,6 @@
*/
#define DRIVER_DESC "HID core driver"
-#define DRIVER_LICENSE "GPL"
int hid_debug = 0;
module_param_named(debug, hid_debug, int, 0600);
@@ -724,13 +723,7 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
hid->group = HID_GROUP_SENSOR_HUB;
if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
- (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
+ hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -826,14 +819,16 @@ static int hid_scan_report(struct hid_device *hid)
hid->group = HID_GROUP_WACOM;
break;
case USB_VENDOR_ID_SYNAPTICS:
- if (hid->group == HID_GROUP_GENERIC)
+ if (hid->group == HID_GROUP_GENERIC ||
+ hid->group == HID_GROUP_MULTITOUCH_WIN_8)
if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
&& (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
/*
* hid-rmi should take care of them,
* not hid-generic
*/
- hid->group = HID_GROUP_RMI;
+ if (IS_ENABLED(CONFIG_HID_RMI))
+ hid->group = HID_GROUP_RMI;
break;
}
@@ -1887,6 +1882,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
#if IS_ENABLED(CONFIG_HID_MAYFLASH)
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
@@ -1933,6 +1931,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
#endif
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1985,12 +1984,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2126,6 +2119,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ }
};
@@ -2314,7 +2308,7 @@ __ATTRIBUTE_GROUPS(hid_dev);
static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct hid_device *hdev = to_hid_device(dev);
+ struct hid_device *hdev = to_hid_device(dev);
if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
hdev->bus, hdev->vendor, hdev->product))
@@ -2867,5 +2861,5 @@ module_exit(hid_exit);
MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 717704e9ae07..c0303f61c26a 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int brightness;
- char data[8];
+ char *data;
+
+ data = kmalloc(8, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 5) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
brightness = data[4];
if (brightness < 0 || brightness > 3) {
dev_warn(dev,
"Read invalid backlight brightness: %02hhx.\n",
data[4]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return brightness;
+ ret = brightness;
+out:
+ kfree(data);
+
+ return ret;
}
static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
const char *macro_mode;
- char data[8];
+ char *data;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_GET_MODE,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 2,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 1) {
dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
default:
dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
data[0]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+out:
+ kfree(data);
+
+ return ret;
}
static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int current_profile;
- char data[8];
+ char *data;
+
+ data = kmalloc(8, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 8) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
current_profile = data[7];
if (current_profile < 1 || current_profile > 3) {
dev_warn(dev, "Read invalid current profile: %02hhx.\n",
data[7]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+out:
+ kfree(data);
+
+ return ret;
}
static ssize_t k90_store_current_profile(struct device *dev,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f31a778b0851..b22d0f83f8e3 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -168,7 +168,7 @@ struct cp2112_device {
atomic_t xfer_avail;
struct gpio_chip gc;
u8 *in_out_buffer;
- spinlock_t lock;
+ struct mutex lock;
struct gpio_desc *desc[8];
bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ret = 0;
exit:
- spin_unlock_irqrestore(&dev->lock, flags);
- return ret <= 0 ? ret : -EIO;
+ mutex_unlock(&dev->lock);
+ return ret < 0 ? ret : -EIO;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
}
static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
ret = buf[1];
exit:
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
return ret;
}
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
goto fail;
}
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
/*
* Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
return 0;
fail:
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
return ret < 0 ? ret : -EIO;
}
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!dev->in_out_buffer)
return -ENOMEM;
- spin_lock_init(&dev->lock);
+ mutex_init(&dev->lock);
ret = hid_parse(hdev);
if (ret) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f46f2c5117fa..86c95d30ac80 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,6 +76,9 @@
#define USB_VENDOR_ID_ALPS_JP 0x044E
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
+#define USB_VENDOR_ID_AMI 0x046b
+#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
+
#define USB_VENDOR_ID_ANTON 0x1130
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
@@ -320,7 +323,8 @@
#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
-#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -627,9 +631,11 @@
#define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
+#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
+#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -722,12 +728,6 @@
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c5c5fbe9d605..52026dc94d5c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
.driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
- .driver_data = LG_FF2 },
+ .driver_data = LG_NOGET | LG_FF2 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
.driver_data = LG_FF3 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
index d9090765a6e5..03f10516131d 100644
--- a/drivers/hid/hid-mf.c
+++ b/drivers/hid/hid-mf.c
@@ -6,12 +6,14 @@
*
* Tested with:
* 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ * 0079:1803 "DragonRise Inc. Mayflash Wireless Sensor DolphinBar"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ * 0079:1844 "DragonRise Inc. Mayflash GameCube Game Controller Adapter (v04)"
*
* The following adapters probably work too, but need to be tested:
* 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
- * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
*
- * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ * Copyright (c) 2016-2017 Marcel Hasler <mahasler@gmail.com>
*/
/*
@@ -125,8 +127,8 @@ static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
- /* Split device into four inputs */
- hid->quirks |= HID_QUIRK_MULTI_INPUT;
+ /* Apply quirks as needed */
+ hid->quirks |= id->driver_data;
error = hid_parse(hid);
if (error) {
@@ -151,7 +153,14 @@ static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
}
static const struct hid_device_id mf_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
+ .driver_data = 0 }, /* No quirk required */
{ }
};
MODULE_DEVICE_TABLE(hid, mf_devices);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 74b7b84a0420..96e7d3231d2f 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -274,18 +274,6 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
.driver_data = MS_DUPLICATE_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
- .driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6dca66806844..692647485a53 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -68,6 +68,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_HOVERING (1 << 11)
#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12)
#define MT_QUIRK_FORCE_GET_FEATURE (1 << 13)
+#define MT_QUIRK_FIX_CONST_CONTACT_ID (1 << 14)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -157,6 +158,7 @@ static void mt_post_parse(struct mt_device *td);
#define MT_CLS_FLATFROG 0x0107
#define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108
#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109
+#define MT_CLS_LG 0x010a
#define MT_CLS_VTL 0x0110
#define MT_DEFAULT_MAXCONTACT 10
@@ -263,6 +265,12 @@ static struct mt_class mt_classes[] = {
.sn_move = 2048,
.maxcontacts = 40,
},
+ { .name = MT_CLS_LG,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_FIX_CONST_CONTACT_ID |
+ MT_QUIRK_IGNORE_DUPLICATES |
+ MT_QUIRK_HOVERING |
+ MT_QUIRK_CONTACT_CNT_ACCURATE },
{ .name = MT_CLS_VTL,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE |
@@ -1078,6 +1086,34 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
return 0;
}
+static void mt_fix_const_field(struct hid_field *field, unsigned int usage)
+{
+ if (field->usage[0].hid != usage ||
+ !(field->flags & HID_MAIN_ITEM_CONSTANT))
+ return;
+
+ field->flags &= ~HID_MAIN_ITEM_CONSTANT;
+ field->flags |= HID_MAIN_ITEM_VARIABLE;
+}
+
+static void mt_fix_const_fields(struct hid_device *hdev, unsigned int usage)
+{
+ struct hid_report *report;
+ int i;
+
+ list_for_each_entry(report,
+ &hdev->report_enum[HID_INPUT_REPORT].report_list,
+ list) {
+
+ if (!report->maxfield)
+ continue;
+
+ for (i = 0; i < report->maxfield; i++)
+ if (report->field[i]->maxusage >= 1)
+ mt_fix_const_field(report->field[i], usage);
+ }
+}
+
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret, i;
@@ -1151,6 +1187,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret != 0)
return ret;
+ if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID)
+ mt_fix_const_fields(hdev, HID_DG_CONTACTID);
+
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
return ret;
@@ -1398,6 +1437,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
USB_DEVICE_ID_ILITEK_MULTITOUCH) },
+ /* LG Melfas panel */
+ { .driver_data = MT_CLS_LG,
+ HID_USB_DEVICE(USB_VENDOR_ID_LG,
+ USB_DEVICE_ID_LG_MELFAS_MT) },
+
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 96286510f42e..8ffbb6f65a65 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -108,13 +108,12 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
struct rc_dev *rdev;
int ret = 0;
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
return -ENOMEM;
rdev->priv = data;
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rdev->open = picolcd_cir_open;
rdev->close = picolcd_cir_close;
rdev->input_name = data->hdev->name;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index be89bcbf6a71..5b40c2614599 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -14,11 +14,14 @@
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/rmi.h>
#include "hid-ids.h"
#define RMI_MOUSE_REPORT_ID 0x01 /* Mouse emulation Report */
@@ -33,9 +36,6 @@
#define RMI_READ_DATA_PENDING 1
#define RMI_STARTED 2
-#define RMI_SLEEP_NORMAL 0x0
-#define RMI_SLEEP_DEEP_SLEEP 0x1
-
/* device flags */
#define RMI_DEVICE BIT(0)
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
@@ -54,25 +54,12 @@ enum rmi_mode_type {
RMI_MODE_NO_PACKED_ATTN_REPORTS = 2,
};
-struct rmi_function {
- unsigned page; /* page of the function */
- u16 query_base_addr; /* base address for queries */
- u16 command_base_addr; /* base address for commands */
- u16 control_base_addr; /* base address for controls */
- u16 data_base_addr; /* base address for datas */
- unsigned int interrupt_base; /* cross-function interrupt number
- * (uniq in the device)*/
- unsigned int interrupt_count; /* number of interrupts */
- unsigned int report_size; /* size of a report */
- unsigned long irq_mask; /* mask of the interrupts
- * (to be applied against ATTN IRQ) */
-};
-
/**
* struct rmi_data - stores information for hid communication
*
* @page_mutex: Locks current page to avoid changing pages in unexpected ways.
* @page: Keeps track of the current virtual page
+ * @xport: transport device to be registered with the RMI4 core.
*
* @wait: Used for waiting for read data
*
@@ -84,26 +71,18 @@ struct rmi_function {
*
* @flags: flags for the current device (started, reading, etc...)
*
- * @f11: placeholder of internal RMI function F11 description
- * @f30: placeholder of internal RMI function F30 description
- *
- * @max_fingers: maximum finger count reported by the device
- * @max_x: maximum x value reported by the device
- * @max_y: maximum y value reported by the device
- *
- * @gpio_led_count: count of GPIOs + LEDs reported by F30
- * @button_count: actual physical buttons count
- * @button_mask: button mask used to decode GPIO ATTN reports
- * @button_state_mask: pull state of the buttons
- *
- * @input: pointer to the kernel input device
- *
* @reset_work: worker which will be called in case of a mouse report
* @hdev: pointer to the struct hid_device
+ *
+ * @device_flags: flags which describe the device
+ *
+ * @domain: the IRQ domain allocated for this RMI4 device
+ * @rmi_irq: the irq that will be used to generate events to rmi-core
*/
struct rmi_data {
struct mutex page_mutex;
int page;
+ struct rmi_transport_dev xport;
wait_queue_head_t wait;
@@ -115,34 +94,13 @@ struct rmi_data {
unsigned long flags;
- struct rmi_function f01;
- struct rmi_function f11;
- struct rmi_function f30;
-
- unsigned int max_fingers;
- unsigned int max_x;
- unsigned int max_y;
- unsigned int x_size_mm;
- unsigned int y_size_mm;
- bool read_f11_ctrl_regs;
- u8 f11_ctrl_regs[RMI_F11_CTRL_REG_COUNT];
-
- unsigned int gpio_led_count;
- unsigned int button_count;
- unsigned long button_mask;
- unsigned long button_state_mask;
-
- struct input_dev *input;
-
struct work_struct reset_work;
struct hid_device *hdev;
unsigned long device_flags;
- unsigned long firmware_id;
- u8 f01_ctrl0;
- u8 interrupt_enable_mask;
- bool restore_interrupt_mask;
+ struct irq_domain *domain;
+ int rmi_irq;
};
#define RMI_PAGE(addr) (((addr) >> 8) & 0xff)
@@ -220,10 +178,11 @@ static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
return ret;
}
-static int rmi_read_block(struct hid_device *hdev, u16 addr, void *buf,
- const int len)
+static int rmi_hid_read_block(struct rmi_transport_dev *xport, u16 addr,
+ void *buf, size_t len)
{
- struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+ struct hid_device *hdev = data->hdev;
int ret;
int bytes_read;
int bytes_needed;
@@ -292,15 +251,11 @@ exit:
return ret;
}
-static inline int rmi_read(struct hid_device *hdev, u16 addr, void *buf)
-{
- return rmi_read_block(hdev, addr, buf, 1);
-}
-
-static int rmi_write_block(struct hid_device *hdev, u16 addr, void *buf,
- const int len)
+static int rmi_hid_write_block(struct rmi_transport_dev *xport, u16 addr,
+ const void *buf, size_t len)
{
- struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+ struct hid_device *hdev = data->hdev;
int ret;
mutex_lock(&data->page_mutex);
@@ -332,62 +287,20 @@ exit:
return ret;
}
-static inline int rmi_write(struct hid_device *hdev, u16 addr, void *buf)
-{
- return rmi_write_block(hdev, addr, buf, 1);
-}
-
-static void rmi_f11_process_touch(struct rmi_data *hdata, int slot,
- u8 finger_state, u8 *touch_data)
-{
- int x, y, wx, wy;
- int wide, major, minor;
- int z;
-
- input_mt_slot(hdata->input, slot);
- input_mt_report_slot_state(hdata->input, MT_TOOL_FINGER,
- finger_state == 0x01);
- if (finger_state == 0x01) {
- x = (touch_data[0] << 4) | (touch_data[2] & 0x0F);
- y = (touch_data[1] << 4) | (touch_data[2] >> 4);
- wx = touch_data[3] & 0x0F;
- wy = touch_data[3] >> 4;
- wide = (wx > wy);
- major = max(wx, wy);
- minor = min(wx, wy);
- z = touch_data[4];
-
- /* y is inverted */
- y = hdata->max_y - y;
-
- input_event(hdata->input, EV_ABS, ABS_MT_POSITION_X, x);
- input_event(hdata->input, EV_ABS, ABS_MT_POSITION_Y, y);
- input_event(hdata->input, EV_ABS, ABS_MT_ORIENTATION, wide);
- input_event(hdata->input, EV_ABS, ABS_MT_PRESSURE, z);
- input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
- input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
- }
-}
-
static int rmi_reset_attn_mode(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_device *rmi_dev = data->xport.rmi_dev;
int ret;
ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
if (ret)
return ret;
- if (data->restore_interrupt_mask) {
- ret = rmi_write(hdev, data->f01.control_base_addr + 1,
- &data->interrupt_enable_mask);
- if (ret) {
- hid_err(hdev, "can not write F01 control register\n");
- return ret;
- }
- }
+ if (test_bit(RMI_STARTED, &data->flags))
+ ret = rmi_dev->driver->reset_handler(rmi_dev);
- return 0;
+ return ret;
}
static void rmi_reset_work(struct work_struct *work)
@@ -399,102 +312,22 @@ static void rmi_reset_work(struct work_struct *work)
rmi_reset_attn_mode(hdata->hdev);
}
-static inline int rmi_schedule_reset(struct hid_device *hdev)
-{
- struct rmi_data *hdata = hid_get_drvdata(hdev);
- return schedule_work(&hdata->reset_work);
-}
-
-static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data,
- int size)
-{
- struct rmi_data *hdata = hid_get_drvdata(hdev);
- int offset;
- int i;
-
- if (!(irq & hdata->f11.irq_mask) || size <= 0)
- return 0;
-
- offset = (hdata->max_fingers >> 2) + 1;
- for (i = 0; i < hdata->max_fingers; i++) {
- int fs_byte_position = i >> 2;
- int fs_bit_position = (i & 0x3) << 1;
- int finger_state = (data[fs_byte_position] >> fs_bit_position) &
- 0x03;
- int position = offset + 5 * i;
-
- if (position + 5 > size) {
- /* partial report, go on with what we received */
- printk_once(KERN_WARNING
- "%s %s: Detected incomplete finger report. Finger reports may occasionally get dropped on this platform.\n",
- dev_driver_string(&hdev->dev),
- dev_name(&hdev->dev));
- hid_dbg(hdev, "Incomplete finger report\n");
- break;
- }
-
- rmi_f11_process_touch(hdata, i, finger_state, &data[position]);
- }
- input_mt_sync_frame(hdata->input);
- input_sync(hdata->input);
- return hdata->f11.report_size;
-}
-
-static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data,
- int size)
+static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
- int i;
- int button = 0;
- bool value;
+ struct rmi_device *rmi_dev = hdata->xport.rmi_dev;
+ unsigned long flags;
- if (!(irq & hdata->f30.irq_mask))
+ if (!(test_bit(RMI_STARTED, &hdata->flags)))
return 0;
- if (size < (int)hdata->f30.report_size) {
- hid_warn(hdev, "Click Button pressed, but the click data is missing\n");
- return 0;
- }
+ local_irq_save(flags);
- for (i = 0; i < hdata->gpio_led_count; i++) {
- if (test_bit(i, &hdata->button_mask)) {
- value = (data[i / 8] >> (i & 0x07)) & BIT(0);
- if (test_bit(i, &hdata->button_state_mask))
- value = !value;
- input_event(hdata->input, EV_KEY, BTN_LEFT + button++,
- value);
- }
- }
- return hdata->f30.report_size;
-}
-
-static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
-{
- struct rmi_data *hdata = hid_get_drvdata(hdev);
- unsigned long irq_mask = 0;
- unsigned index = 2;
+ rmi_set_attn_data(rmi_dev, data[1], &data[2], size - 2);
- if (!(test_bit(RMI_STARTED, &hdata->flags)))
- return 0;
+ generic_handle_irq(hdata->rmi_irq);
- irq_mask |= hdata->f11.irq_mask;
- irq_mask |= hdata->f30.irq_mask;
-
- if (data[1] & ~irq_mask)
- hid_dbg(hdev, "unknown intr source:%02lx %s:%d\n",
- data[1] & ~irq_mask, __FILE__, __LINE__);
-
- if (hdata->f11.interrupt_base < hdata->f30.interrupt_base) {
- index += rmi_f11_input_event(hdev, data[1], &data[index],
- size - index);
- index += rmi_f30_input_event(hdev, data[1], &data[index],
- size - index);
- } else {
- index += rmi_f30_input_event(hdev, data[1], &data[index],
- size - index);
- index += rmi_f11_input_event(hdev, data[1], &data[index],
- size - index);
- }
+ local_irq_restore(flags);
return 1;
}
@@ -568,7 +401,7 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
- rmi_schedule_reset(hdev);
+ schedule_work(&data->reset_work);
return 1;
}
@@ -576,637 +409,71 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
}
#ifdef CONFIG_PM
-static int rmi_set_sleep_mode(struct hid_device *hdev, int sleep_mode)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- int ret;
- u8 f01_ctrl0;
-
- f01_ctrl0 = (data->f01_ctrl0 & ~0x3) | sleep_mode;
-
- ret = rmi_write(hdev, data->f01.control_base_addr,
- &f01_ctrl0);
- if (ret) {
- hid_err(hdev, "can not write sleep mode\n");
- return ret;
- }
-
- return 0;
-}
-
static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
{
struct rmi_data *data = hid_get_drvdata(hdev);
- int ret;
- u8 buf[RMI_F11_CTRL_REG_COUNT];
-
- if (!(data->device_flags & RMI_DEVICE))
- return 0;
-
- ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
- RMI_F11_CTRL_REG_COUNT);
- if (ret)
- hid_warn(hdev, "can not read F11 control registers\n");
- else
- memcpy(data->f11_ctrl_regs, buf, RMI_F11_CTRL_REG_COUNT);
-
-
- if (!device_may_wakeup(hdev->dev.parent))
- return rmi_set_sleep_mode(hdev, RMI_SLEEP_DEEP_SLEEP);
-
- return 0;
-}
-
-static int rmi_post_reset(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_device *rmi_dev = data->xport.rmi_dev;
int ret;
if (!(data->device_flags & RMI_DEVICE))
return 0;
- ret = rmi_reset_attn_mode(hdev);
+ ret = rmi_driver_suspend(rmi_dev, false);
if (ret) {
- hid_err(hdev, "can not set rmi mode\n");
+ hid_warn(hdev, "Failed to suspend device: %d\n", ret);
return ret;
}
- if (data->read_f11_ctrl_regs) {
- ret = rmi_write_block(hdev, data->f11.control_base_addr,
- data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
- if (ret)
- hid_warn(hdev,
- "can not write F11 control registers after reset\n");
- }
-
- if (!device_may_wakeup(hdev->dev.parent)) {
- ret = rmi_set_sleep_mode(hdev, RMI_SLEEP_NORMAL);
- if (ret) {
- hid_err(hdev, "can not write sleep mode\n");
- return ret;
- }
- }
-
- return ret;
+ return 0;
}
static int rmi_post_resume(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_device *rmi_dev = data->xport.rmi_dev;
+ int ret;
if (!(data->device_flags & RMI_DEVICE))
return 0;
- return rmi_reset_attn_mode(hdev);
-}
-#endif /* CONFIG_PM */
-
-#define RMI4_MAX_PAGE 0xff
-#define RMI4_PAGE_SIZE 0x0100
-
-#define PDT_START_SCAN_LOCATION 0x00e9
-#define PDT_END_SCAN_LOCATION 0x0005
-#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff)
-
-struct pdt_entry {
- u8 query_base_addr:8;
- u8 command_base_addr:8;
- u8 control_base_addr:8;
- u8 data_base_addr:8;
- u8 interrupt_source_count:3;
- u8 bits3and4:2;
- u8 function_version:2;
- u8 bit7:1;
- u8 function_number:8;
-} __attribute__((__packed__));
-
-static inline unsigned long rmi_gen_mask(unsigned irq_base, unsigned irq_count)
-{
- return GENMASK(irq_count + irq_base - 1, irq_base);
-}
-
-static void rmi_register_function(struct rmi_data *data,
- struct pdt_entry *pdt_entry, int page, unsigned interrupt_count)
-{
- struct rmi_function *f = NULL;
- u16 page_base = page << 8;
-
- switch (pdt_entry->function_number) {
- case 0x01:
- f = &data->f01;
- break;
- case 0x11:
- f = &data->f11;
- break;
- case 0x30:
- f = &data->f30;
- break;
- }
-
- if (f) {
- f->page = page;
- f->query_base_addr = page_base | pdt_entry->query_base_addr;
- f->command_base_addr = page_base | pdt_entry->command_base_addr;
- f->control_base_addr = page_base | pdt_entry->control_base_addr;
- f->data_base_addr = page_base | pdt_entry->data_base_addr;
- f->interrupt_base = interrupt_count;
- f->interrupt_count = pdt_entry->interrupt_source_count;
- f->irq_mask = rmi_gen_mask(f->interrupt_base,
- f->interrupt_count);
- data->interrupt_enable_mask |= f->irq_mask;
- }
-}
-
-static int rmi_scan_pdt(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- struct pdt_entry entry;
- int page;
- bool page_has_function;
- int i;
- int retval;
- int interrupt = 0;
- u16 page_start, pdt_start , pdt_end;
-
- hid_info(hdev, "Scanning PDT...\n");
-
- for (page = 0; (page <= RMI4_MAX_PAGE); page++) {
- page_start = RMI4_PAGE_SIZE * page;
- pdt_start = page_start + PDT_START_SCAN_LOCATION;
- pdt_end = page_start + PDT_END_SCAN_LOCATION;
-
- page_has_function = false;
- for (i = pdt_start; i >= pdt_end; i -= sizeof(entry)) {
- retval = rmi_read_block(hdev, i, &entry, sizeof(entry));
- if (retval) {
- hid_err(hdev,
- "Read of PDT entry at %#06x failed.\n",
- i);
- goto error_exit;
- }
-
- if (RMI4_END_OF_PDT(entry.function_number))
- break;
-
- page_has_function = true;
-
- hid_info(hdev, "Found F%02X on page %#04x\n",
- entry.function_number, page);
-
- rmi_register_function(data, &entry, page, interrupt);
- interrupt += entry.interrupt_source_count;
- }
-
- if (!page_has_function)
- break;
- }
-
- hid_info(hdev, "%s: Done with PDT scan.\n", __func__);
- retval = 0;
-
-error_exit:
- return retval;
-}
-
-#define RMI_DEVICE_F01_BASIC_QUERY_LEN 11
-
-static int rmi_populate_f01(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- u8 basic_queries[RMI_DEVICE_F01_BASIC_QUERY_LEN];
- u8 info[3];
- int ret;
- bool has_query42;
- bool has_lts;
- bool has_sensor_id;
- bool has_ds4_queries = false;
- bool has_build_id_query = false;
- bool has_package_id_query = false;
- u16 query_offset = data->f01.query_base_addr;
- u16 prod_info_addr;
- u8 ds4_query_len;
-
- ret = rmi_read_block(hdev, query_offset, basic_queries,
- RMI_DEVICE_F01_BASIC_QUERY_LEN);
- if (ret) {
- hid_err(hdev, "Can not read basic queries from Function 0x1.\n");
- return ret;
- }
-
- has_lts = !!(basic_queries[0] & BIT(2));
- has_sensor_id = !!(basic_queries[1] & BIT(3));
- has_query42 = !!(basic_queries[1] & BIT(7));
-
- query_offset += 11;
- prod_info_addr = query_offset + 6;
- query_offset += 10;
-
- if (has_lts)
- query_offset += 20;
-
- if (has_sensor_id)
- query_offset++;
-
- if (has_query42) {
- ret = rmi_read(hdev, query_offset, info);
- if (ret) {
- hid_err(hdev, "Can not read query42.\n");
- return ret;
- }
- has_ds4_queries = !!(info[0] & BIT(0));
- query_offset++;
- }
-
- if (has_ds4_queries) {
- ret = rmi_read(hdev, query_offset, &ds4_query_len);
- if (ret) {
- hid_err(hdev, "Can not read DS4 Query length.\n");
- return ret;
- }
- query_offset++;
-
- if (ds4_query_len > 0) {
- ret = rmi_read(hdev, query_offset, info);
- if (ret) {
- hid_err(hdev, "Can not read DS4 query.\n");
- return ret;
- }
-
- has_package_id_query = !!(info[0] & BIT(0));
- has_build_id_query = !!(info[0] & BIT(1));
- }
- }
-
- if (has_package_id_query)
- prod_info_addr++;
-
- if (has_build_id_query) {
- ret = rmi_read_block(hdev, prod_info_addr, info, 3);
- if (ret) {
- hid_err(hdev, "Can not read product info.\n");
- return ret;
- }
-
- data->firmware_id = info[1] << 8 | info[0];
- data->firmware_id += info[2] * 65536;
- }
-
- ret = rmi_read_block(hdev, data->f01.control_base_addr, info,
- 2);
-
- if (ret) {
- hid_err(hdev, "can not read f01 ctrl registers\n");
- return ret;
- }
-
- data->f01_ctrl0 = info[0];
-
- if (!info[1]) {
- /*
- * Do to a firmware bug in some touchpads the F01 interrupt
- * enable control register will be cleared on reset.
- * This will stop the touchpad from reporting data, so
- * if F01 CTRL1 is 0 then we need to explicitly enable
- * interrupts for the functions we want data for.
- */
- data->restore_interrupt_mask = true;
-
- ret = rmi_write(hdev, data->f01.control_base_addr + 1,
- &data->interrupt_enable_mask);
- if (ret) {
- hid_err(hdev, "can not write to control reg 1: %d.\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int rmi_populate_f11(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- u8 buf[20];
- int ret;
- bool has_query9;
- bool has_query10 = false;
- bool has_query11;
- bool has_query12;
- bool has_query27;
- bool has_query28;
- bool has_query36 = false;
- bool has_physical_props;
- bool has_gestures;
- bool has_rel;
- bool has_data40 = false;
- bool has_dribble = false;
- bool has_palm_detect = false;
- unsigned x_size, y_size;
- u16 query_offset;
-
- if (!data->f11.query_base_addr) {
- hid_err(hdev, "No 2D sensor found, giving up.\n");
- return -ENODEV;
- }
-
- /* query 0 contains some useful information */
- ret = rmi_read(hdev, data->f11.query_base_addr, buf);
- if (ret) {
- hid_err(hdev, "can not get query 0: %d.\n", ret);
- return ret;
- }
- has_query9 = !!(buf[0] & BIT(3));
- has_query11 = !!(buf[0] & BIT(4));
- has_query12 = !!(buf[0] & BIT(5));
- has_query27 = !!(buf[0] & BIT(6));
- has_query28 = !!(buf[0] & BIT(7));
-
- /* query 1 to get the max number of fingers */
- ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
- if (ret) {
- hid_err(hdev, "can not get NumberOfFingers: %d.\n", ret);
- return ret;
- }
- data->max_fingers = (buf[0] & 0x07) + 1;
- if (data->max_fingers > 5)
- data->max_fingers = 10;
-
- data->f11.report_size = data->max_fingers * 5 +
- DIV_ROUND_UP(data->max_fingers, 4);
-
- if (!(buf[0] & BIT(4))) {
- hid_err(hdev, "No absolute events, giving up.\n");
- return -ENODEV;
- }
-
- has_rel = !!(buf[0] & BIT(3));
- has_gestures = !!(buf[0] & BIT(5));
-
- ret = rmi_read(hdev, data->f11.query_base_addr + 5, buf);
- if (ret) {
- hid_err(hdev, "can not get absolute data sources: %d.\n", ret);
+ ret = rmi_reset_attn_mode(hdev);
+ if (ret)
return ret;
- }
-
- has_dribble = !!(buf[0] & BIT(4));
-
- /*
- * At least 4 queries are guaranteed to be present in F11
- * +1 for query 5 which is present since absolute events are
- * reported and +1 for query 12.
- */
- query_offset = 6;
-
- if (has_rel)
- ++query_offset; /* query 6 is present */
-
- if (has_gestures) {
- /* query 8 to find out if query 10 exists */
- ret = rmi_read(hdev,
- data->f11.query_base_addr + query_offset + 1, buf);
- if (ret) {
- hid_err(hdev, "can not read gesture information: %d.\n",
- ret);
- return ret;
- }
- has_palm_detect = !!(buf[0] & BIT(0));
- has_query10 = !!(buf[0] & BIT(2));
-
- query_offset += 2; /* query 7 and 8 are present */
- }
-
- if (has_query9)
- ++query_offset;
-
- if (has_query10)
- ++query_offset;
-
- if (has_query11)
- ++query_offset;
-
- /* query 12 to know if the physical properties are reported */
- if (has_query12) {
- ret = rmi_read(hdev, data->f11.query_base_addr
- + query_offset, buf);
- if (ret) {
- hid_err(hdev, "can not get query 12: %d.\n", ret);
- return ret;
- }
- has_physical_props = !!(buf[0] & BIT(5));
-
- if (has_physical_props) {
- query_offset += 1;
- ret = rmi_read_block(hdev,
- data->f11.query_base_addr
- + query_offset, buf, 4);
- if (ret) {
- hid_err(hdev, "can not read query 15-18: %d.\n",
- ret);
- return ret;
- }
-
- x_size = buf[0] | (buf[1] << 8);
- y_size = buf[2] | (buf[3] << 8);
-
- data->x_size_mm = DIV_ROUND_CLOSEST(x_size, 10);
- data->y_size_mm = DIV_ROUND_CLOSEST(y_size, 10);
-
- hid_info(hdev, "%s: size in mm: %d x %d\n",
- __func__, data->x_size_mm, data->y_size_mm);
-
- /*
- * query 15 - 18 contain the size of the sensor
- * and query 19 - 26 contain bezel dimensions
- */
- query_offset += 12;
- }
- }
-
- if (has_query27)
- ++query_offset;
- if (has_query28) {
- ret = rmi_read(hdev, data->f11.query_base_addr
- + query_offset, buf);
- if (ret) {
- hid_err(hdev, "can not get query 28: %d.\n", ret);
- return ret;
- }
-
- has_query36 = !!(buf[0] & BIT(6));
- }
-
- if (has_query36) {
- query_offset += 2;
- ret = rmi_read(hdev, data->f11.query_base_addr
- + query_offset, buf);
- if (ret) {
- hid_err(hdev, "can not get query 36: %d.\n", ret);
- return ret;
- }
-
- has_data40 = !!(buf[0] & BIT(5));
- }
-
-
- if (has_data40)
- data->f11.report_size += data->max_fingers * 2;
-
- ret = rmi_read_block(hdev, data->f11.control_base_addr,
- data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
+ ret = rmi_driver_resume(rmi_dev, false);
if (ret) {
- hid_err(hdev, "can not read ctrl block of size 11: %d.\n", ret);
+ hid_warn(hdev, "Failed to resume device: %d\n", ret);
return ret;
}
- /* data->f11_ctrl_regs now contains valid register data */
- data->read_f11_ctrl_regs = true;
-
- data->max_x = data->f11_ctrl_regs[6] | (data->f11_ctrl_regs[7] << 8);
- data->max_y = data->f11_ctrl_regs[8] | (data->f11_ctrl_regs[9] << 8);
-
- if (has_dribble) {
- data->f11_ctrl_regs[0] = data->f11_ctrl_regs[0] & ~BIT(6);
- ret = rmi_write(hdev, data->f11.control_base_addr,
- data->f11_ctrl_regs);
- if (ret) {
- hid_err(hdev, "can not write to control reg 0: %d.\n",
- ret);
- return ret;
- }
- }
-
- if (has_palm_detect) {
- data->f11_ctrl_regs[11] = data->f11_ctrl_regs[11] & ~BIT(0);
- ret = rmi_write(hdev, data->f11.control_base_addr + 11,
- &data->f11_ctrl_regs[11]);
- if (ret) {
- hid_err(hdev, "can not write to control reg 11: %d.\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int rmi_populate_f30(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- u8 buf[20];
- int ret;
- bool has_gpio, has_led;
- unsigned bytes_per_ctrl;
- u8 ctrl2_addr;
- int ctrl2_3_length;
- int i;
-
- /* function F30 is for physical buttons */
- if (!data->f30.query_base_addr) {
- hid_err(hdev, "No GPIO/LEDs found, giving up.\n");
- return -ENODEV;
- }
-
- ret = rmi_read_block(hdev, data->f30.query_base_addr, buf, 2);
- if (ret) {
- hid_err(hdev, "can not get F30 query registers: %d.\n", ret);
- return ret;
- }
-
- has_gpio = !!(buf[0] & BIT(3));
- has_led = !!(buf[0] & BIT(2));
- data->gpio_led_count = buf[1] & 0x1f;
-
- /* retrieve ctrl 2 & 3 registers */
- bytes_per_ctrl = (data->gpio_led_count + 7) / 8;
- /* Ctrl0 is present only if both has_gpio and has_led are set*/
- ctrl2_addr = (has_gpio && has_led) ? bytes_per_ctrl : 0;
- /* Ctrl1 is always be present */
- ctrl2_addr += bytes_per_ctrl;
- ctrl2_3_length = 2 * bytes_per_ctrl;
-
- data->f30.report_size = bytes_per_ctrl;
-
- ret = rmi_read_block(hdev, data->f30.control_base_addr + ctrl2_addr,
- buf, ctrl2_3_length);
- if (ret) {
- hid_err(hdev, "can not read ctrl 2&3 block of size %d: %d.\n",
- ctrl2_3_length, ret);
- return ret;
- }
-
- for (i = 0; i < data->gpio_led_count; i++) {
- int byte_position = i >> 3;
- int bit_position = i & 0x07;
- u8 dir_byte = buf[byte_position];
- u8 data_byte = buf[byte_position + bytes_per_ctrl];
- bool dir = (dir_byte >> bit_position) & BIT(0);
- bool dat = (data_byte >> bit_position) & BIT(0);
-
- if (dir == 0) {
- /* input mode */
- if (dat) {
- /* actual buttons have pull up resistor */
- data->button_count++;
- set_bit(i, &data->button_mask);
- set_bit(i, &data->button_state_mask);
- }
- }
-
- }
-
return 0;
}
+#endif /* CONFIG_PM */
-static int rmi_populate(struct hid_device *hdev)
+static int rmi_hid_reset(struct rmi_transport_dev *xport, u16 reset_addr)
{
- struct rmi_data *data = hid_get_drvdata(hdev);
- int ret;
-
- ret = rmi_scan_pdt(hdev);
- if (ret) {
- hid_err(hdev, "PDT scan failed with code %d.\n", ret);
- return ret;
- }
-
- ret = rmi_populate_f01(hdev);
- if (ret) {
- hid_err(hdev, "Error while initializing F01 (%d).\n", ret);
- return ret;
- }
-
- ret = rmi_populate_f11(hdev);
- if (ret) {
- hid_err(hdev, "Error while initializing F11 (%d).\n", ret);
- return ret;
- }
-
- if (!(data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)) {
- ret = rmi_populate_f30(hdev);
- if (ret)
- hid_warn(hdev, "Error while initializing F30 (%d).\n", ret);
- }
+ struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+ struct hid_device *hdev = data->hdev;
- return 0;
+ return rmi_reset_attn_mode(hdev);
}
static int rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct rmi_data *data = hid_get_drvdata(hdev);
struct input_dev *input = hi->input;
- int ret;
- int res_x, res_y, i;
+ int ret = 0;
+
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
- data->input = input;
+ data->xport.input = input;
hid_dbg(hdev, "Opening low level driver\n");
ret = hid_hw_open(hdev);
if (ret)
return ret;
- if (!(data->device_flags & RMI_DEVICE))
- return 0;
-
/* Allow incoming hid reports */
hid_device_io_start(hdev);
@@ -1222,40 +489,10 @@ static int rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
goto exit;
}
- ret = rmi_populate(hdev);
- if (ret)
- goto exit;
-
- hid_info(hdev, "firmware id: %ld\n", data->firmware_id);
-
- __set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_POSITION_X, 1, data->max_x, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 1, data->max_y, 0, 0);
-
- if (data->x_size_mm && data->y_size_mm) {
- res_x = (data->max_x - 1) / data->x_size_mm;
- res_y = (data->max_y - 1) / data->y_size_mm;
-
- input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
- input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
- }
-
- input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
-
- ret = input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER);
- if (ret < 0)
+ ret = rmi_register_transport_device(&data->xport);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to register transport driver\n");
goto exit;
-
- if (data->button_count) {
- __set_bit(EV_KEY, input->evbit);
- for (i = 0; i < data->button_count; i++)
- __set_bit(BTN_LEFT + i, input->keybit);
-
- if (data->button_count == 1)
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
set_bit(RMI_STARTED, &data->flags);
@@ -1304,6 +541,71 @@ static int rmi_check_valid_report_id(struct hid_device *hdev, unsigned type,
return 0;
}
+static struct rmi_device_platform_data rmi_hid_pdata = {
+ .sensor_pdata = {
+ .sensor_type = rmi_sensor_touchpad,
+ .axis_align.flip_y = true,
+ .dribble = RMI_REG_STATE_ON,
+ .palm_detect = RMI_REG_STATE_OFF,
+ },
+};
+
+static const struct rmi_transport_ops hid_rmi_ops = {
+ .write_block = rmi_hid_write_block,
+ .read_block = rmi_hid_read_block,
+ .reset = rmi_hid_reset,
+};
+
+static void rmi_irq_teardown(void *data)
+{
+ struct rmi_data *hdata = data;
+ struct irq_domain *domain = hdata->domain;
+
+ if (!domain)
+ return;
+
+ irq_dispose_mapping(irq_find_mapping(domain, 0));
+
+ irq_domain_remove(domain);
+ hdata->domain = NULL;
+ hdata->rmi_irq = 0;
+}
+
+static int rmi_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops rmi_irq_ops = {
+ .map = rmi_irq_map,
+};
+
+static int rmi_setup_irq_domain(struct hid_device *hdev)
+{
+ struct rmi_data *hdata = hid_get_drvdata(hdev);
+ int ret;
+
+ hdata->domain = irq_domain_create_linear(hdev->dev.fwnode, 1,
+ &rmi_irq_ops, hdata);
+ if (!hdata->domain)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(&hdev->dev, &rmi_irq_teardown, hdata);
+ if (ret)
+ return ret;
+
+ hdata->rmi_irq = irq_create_mapping(hdata->domain, 0);
+ if (hdata->rmi_irq <= 0) {
+ hid_err(hdev, "Can't allocate an IRQ\n");
+ return hdata->rmi_irq < 0 ? hdata->rmi_irq : -ENXIO;
+ }
+
+ return 0;
+}
+
static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct rmi_data *data = NULL;
@@ -1365,8 +667,8 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
data->writeReport = devm_kzalloc(&hdev->dev, alloc_size, GFP_KERNEL);
if (!data->writeReport) {
- ret = -ENOMEM;
- return ret;
+ hid_err(hdev, "failed to allocate buffer for HID reports\n");
+ return -ENOMEM;
}
data->readReport = data->writeReport + data->output_report_size;
@@ -1375,6 +677,21 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
mutex_init(&data->page_mutex);
+ ret = rmi_setup_irq_domain(hdev);
+ if (ret) {
+ hid_err(hdev, "failed to allocate IRQ domain\n");
+ return ret;
+ }
+
+ if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)
+ rmi_hid_pdata.f30_data.disable = true;
+
+ data->xport.dev = hdev->dev.parent;
+ data->xport.pdata = rmi_hid_pdata;
+ data->xport.pdata.irq = data->rmi_irq;
+ data->xport.proto_name = "hid";
+ data->xport.ops = &hid_rmi_ops;
+
start:
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
@@ -1382,17 +699,6 @@ start:
return ret;
}
- if ((data->device_flags & RMI_DEVICE) &&
- !test_bit(RMI_STARTED, &data->flags))
- /*
- * The device maybe in the bootloader if rmi_input_configured
- * failed to find F11 in the PDT. Print an error, but don't
- * return an error from rmi_probe so that hidraw will be
- * accessible from userspace. That way a userspace tool
- * can be used to reload working firmware on the touchpad.
- */
- hid_err(hdev, "Device failed to be properly configured\n");
-
return 0;
}
@@ -1401,6 +707,8 @@ static void rmi_remove(struct hid_device *hdev)
struct rmi_data *hdata = hid_get_drvdata(hdev);
clear_bit(RMI_STARTED, &hdata->flags);
+ cancel_work_sync(&hdata->reset_work);
+ rmi_unregister_transport_device(&hdata->xport);
hid_hw_stop(hdev);
}
@@ -1408,6 +716,7 @@ static void rmi_remove(struct hid_device *hdev)
static const struct hid_device_id rmi_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
@@ -1425,7 +734,7 @@ static struct hid_driver rmi_driver = {
#ifdef CONFIG_PM
.suspend = rmi_suspend,
.resume = rmi_post_resume,
- .reset_resume = rmi_post_reset,
+ .reset_resume = rmi_post_resume,
#endif
};
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
index ab68afcba2a2..a5897b9c0956 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
@@ -111,6 +111,14 @@
#define IPC_ILUP_BIT (1<<IPC_ILUP_OFFS)
/*
+ * ISH FW status bits in ISH FW Status Register
+ */
+#define IPC_ISH_FWSTS_SHIFT 12
+#define IPC_ISH_FWSTS_MASK GENMASK(15, 12)
+#define IPC_GET_ISH_FWSTS(status) \
+ (((status) & IPC_ISH_FWSTS_MASK) >> IPC_ISH_FWSTS_SHIFT)
+
+/*
* FW status bits (relevant)
*/
#define IPC_FWSTS_ILUP 0x1
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 46615a03e78f..fd34307a7a70 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -61,6 +61,18 @@ struct ish_hw {
void __iomem *mem_addr;
};
+/*
+ * ISH FW status type
+ */
+enum {
+ FWSTS_AFTER_RESET = 0,
+ FWSTS_WAIT_FOR_HOST = 4,
+ FWSTS_START_KERNEL_DMA = 5,
+ FWSTS_FW_IS_RUNNING = 7,
+ FWSTS_SENSOR_APP_LOADED = 8,
+ FWSTS_SENSOR_APP_RUNNING = 15
+};
+
#define to_ish_hw(dev) (struct ish_hw *)((dev)->hw)
irqreturn_t ish_irq_handler(int irq, void *dev_id);
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 20d647d2dd2c..8df81dc84529 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -24,7 +24,6 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <linux/miscdevice.h>
#define CREATE_TRACE_POINTS
#include <trace/events/intel_ish.h>
#include "ishtp-dev.h"
@@ -47,7 +46,8 @@ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
*
* Callback to direct log messages to Linux trace buffers
*/
-static void ish_event_tracer(struct ishtp_device *dev, char *format, ...)
+static __printf(2, 3)
+void ish_event_tracer(struct ishtp_device *dev, const char *format, ...)
{
if (trace_ishtp_dump_enabled()) {
va_list args;
@@ -205,12 +205,15 @@ static void ish_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM
static struct device *ish_resume_device;
+/* 50ms to get resume response */
+#define WAIT_FOR_RESUME_ACK_MS 50
+
/**
* ish_resume_handler() - Work function to complete resume
* @work: work struct
*
* The resume work function to complete resume function asynchronously.
- * There are two types of platforms, one where ISH is not powered off,
+ * There are two resume paths, one where ISH is not powered off,
* in that case a simple resume message is enough, others we need
* a reset sequence.
*/
@@ -218,20 +221,31 @@ static void ish_resume_handler(struct work_struct *work)
{
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
+ uint32_t fwsts;
int ret;
- ishtp_send_resume(dev);
+ /* Get ISH FW status */
+ fwsts = IPC_GET_ISH_FWSTS(dev->ops->get_fw_status(dev));
- /* 50 ms to get resume response */
- if (dev->resume_flag)
- ret = wait_event_interruptible_timeout(dev->resume_wait,
- !dev->resume_flag,
- msecs_to_jiffies(50));
+ /*
+ * If currently, in ISH FW, sensor app is loaded or beyond that,
+ * it means ISH isn't powered off, in this case, send a resume message.
+ */
+ if (fwsts >= FWSTS_SENSOR_APP_LOADED) {
+ ishtp_send_resume(dev);
+
+ /* Waiting to get resume response */
+ if (dev->resume_flag)
+ ret = wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+ }
/*
- * If no resume response. This platform is not S0ix compatible
- * So on resume full reboot of ISH processor will happen, so
- * need to go through init sequence again
+ * If in ISH FW, sensor app isn't loaded yet, or no resume response.
+ * That means this platform is not S0ix compatible, or something is
+ * wrong with ISH FW. So on resume, full reboot of ISH processor will
+ * happen, so need to go through init sequence again.
*/
if (dev->resume_flag)
ish_init(dev);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 277983aa1d90..cd23903ddcf1 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -208,7 +208,7 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
hid->version = le16_to_cpu(ISH_HID_VERSION);
hid->vendor = le16_to_cpu(ISH_HID_VENDOR);
hid->product = le16_to_cpu(ISH_HID_PRODUCT);
- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", "hid-ishtp",
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-ishtp",
hid->vendor, hid->product);
rv = hid_add_device(hid);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index f4cbc744e657..5f382fedc2ab 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -358,7 +358,7 @@ static void ishtp_cl_dev_release(struct device *dev)
kfree(to_ishtp_cl_device(dev));
}
-static struct device_type ishtp_cl_device_type = {
+static const struct device_type ishtp_cl_device_type = {
.release = ishtp_cl_dev_release,
};
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 59460b66e689..b7213608ce43 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -19,7 +19,6 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
-#include <linux/miscdevice.h>
#include "ishtp-dev.h"
#include "hbm.h"
#include "client.h"
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
index ac364418e17c..d27e03526acd 100644
--- a/drivers/hid/intel-ish-hid/ishtp/init.c
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -16,7 +16,6 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/miscdevice.h>
#include "ishtp-dev.h"
#include "hbm.h"
#include "client.h"
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index a94f9a8a96a0..6a6d927b78b0 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -238,7 +238,8 @@ struct ishtp_device {
uint64_t ishtp_host_dma_rx_buf_phys;
/* Dump to trace buffers if enabled*/
- void (*print_log)(struct ishtp_device *dev, char *format, ...);
+ __printf(2, 3) void (*print_log)(struct ishtp_device *dev,
+ const char *format, ...);
/* Debug stats */
unsigned int ipc_rx_cnt;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 333108ef18cf..961bc6fdd2d9 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -43,7 +43,6 @@
*/
#define DRIVER_DESC "USB HID core driver"
-#define DRIVER_LICENSE "GPL"
/*
* Module parameters.
@@ -1660,4 +1659,4 @@ MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e9d6cc7cdfc5..d6847a664446 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -84,7 +85,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -102,12 +103,6 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
@@ -296,7 +291,7 @@ static void usbhid_remove_all_dquirks(void)
}
-/**
+/**
* usbhid_quirks_init: apply USB HID quirks specified at module load time
*/
int usbhid_quirks_init(char **quirks_param)
@@ -360,7 +355,7 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
if (bl_entry != NULL)
dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
+ bl_entry->quirks, bl_entry->idVendor,
bl_entry->idProduct);
return bl_entry;
}
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 9a332e683db7..7fb2d1e4f5dd 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -39,11 +39,10 @@
#define DRIVER_VERSION ""
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB HID Boot Protocol keyboard driver"
-#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
static const unsigned char usb_kbd_keycode[256] = {
0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index bf16d72dc370..dd911c5241d8 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -42,11 +42,10 @@
#define DRIVER_VERSION "v1.6"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB HID Boot Protocol mouse driver"
-#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
struct usb_mouse {
char name[128];
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index d303e413306d..38ee2125412f 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -102,7 +102,6 @@
#define DRIVER_VERSION "v2.00"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB Wacom tablet driver"
-#define DRIVER_LICENSE "GPL"
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_VENDOR_ID_LENOVO 0x17ef
@@ -166,7 +165,9 @@ struct wacom {
struct work_struct wireless_work;
struct work_struct battery_work;
struct work_struct remote_work;
+ struct delayed_work init_work;
struct wacom_remote *remote;
+ bool generic_has_leds;
struct wacom_leds {
struct wacom_group_leds *groups;
unsigned int count;
@@ -218,4 +219,6 @@ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led);
struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
unsigned int id);
struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
+int wacom_equivalent_usage(int usage);
+int wacom_initialize_leds(struct wacom *wacom);
#endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b9779bcbd140..be8f7e2a026f 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -16,15 +16,7 @@
#include <linux/input/mt.h>
#define WAC_MSG_RETRIES 5
-
-#define WAC_CMD_WL_LED_CONTROL 0x03
-#define WAC_CMD_LED_CONTROL 0x20
-#define WAC_CMD_ICON_START 0x21
-#define WAC_CMD_ICON_XFER 0x23
-#define WAC_CMD_ICON_BT_XFER 0x26
#define WAC_CMD_RETRIES 10
-#define WAC_CMD_DELETE_PAIRING 0x20
-#define WAC_CMD_UNPAIR_ALL 0xFF
#define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP)
#define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP)
@@ -120,11 +112,12 @@ static void wacom_feature_mapping(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_features *features = &wacom->wacom_wac.features;
struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
u8 *data;
int ret;
int n;
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_DG_CONTACTMAX:
/* leave touch_max as is if predefined */
if (!features->touch_max) {
@@ -333,8 +326,14 @@ static void wacom_post_parse_hid(struct hid_device *hdev,
if (features->type == HID_GENERIC) {
/* Any last-minute generic device setup */
if (features->touch_max > 1) {
- input_mt_init_slots(wacom_wac->touch_input, wacom_wac->features.touch_max,
- INPUT_MT_DIRECT);
+ if (features->device_type & WACOM_DEVICETYPE_DIRECT)
+ input_mt_init_slots(wacom_wac->touch_input,
+ wacom_wac->features.touch_max,
+ INPUT_MT_DIRECT);
+ else
+ input_mt_init_slots(wacom_wac->touch_input,
+ wacom_wac->features.touch_max,
+ INPUT_MT_POINTER);
}
}
}
@@ -497,11 +496,11 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
* from the tablet, it is necessary to switch the tablet out of this
* mode and into one which sends the full range of tablet data.
*/
-static int wacom_query_tablet_data(struct hid_device *hdev,
- struct wacom_features *features)
+static int _wacom_query_tablet_data(struct wacom *wacom)
{
- struct wacom *wacom = hid_get_drvdata(hdev);
+ struct hid_device *hdev = wacom->hdev;
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
@@ -740,6 +739,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
+
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -752,9 +756,6 @@ static int wacom_led_control(struct wacom *wacom)
unsigned char report_id = WAC_CMD_LED_CONTROL;
int buf_size = 9;
- if (!hid_get_drvdata(wacom->hdev))
- return -ENODEV;
-
if (!wacom->led.groups)
return -ENOTSUPP;
@@ -762,12 +763,21 @@ static int wacom_led_control(struct wacom *wacom)
report_id = WAC_CMD_WL_LED_CONTROL;
buf_size = 13;
}
+ else if (wacom->wacom_wac.features.type == INTUOSP2_BT) {
+ report_id = WAC_CMD_WL_INTUOSP2;
+ buf_size = 51;
+ }
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (wacom->wacom_wac.features.type >= INTUOS5S &&
- wacom->wacom_wac.features.type <= INTUOSPL) {
+ if (wacom->wacom_wac.features.type == HID_GENERIC) {
+ buf[0] = WAC_CMD_LED_CONTROL_GENERIC;
+ buf[1] = wacom->led.llv;
+ buf[2] = wacom->led.groups[0].select & 0x03;
+
+ } else if ((wacom->wacom_wac.features.type >= INTUOS5S &&
+ wacom->wacom_wac.features.type <= INTUOSPL)) {
/*
* Touch Ring and crop mark LED luminance may take on
* one of four values:
@@ -787,6 +797,16 @@ static int wacom_led_control(struct wacom *wacom)
} else
buf[1] = led_bits;
}
+ else if (wacom->wacom_wac.features.type == INTUOSP2_BT) {
+ buf[0] = report_id;
+ buf[4] = 100; // Power Connection LED (ORANGE)
+ buf[5] = 100; // BT Connection LED (BLUE)
+ buf[6] = 100; // Paper Mode (RED?)
+ buf[7] = 100; // Paper Mode (GREEN?)
+ buf[8] = 100; // Paper Mode (BLUE?)
+ buf[9] = wacom->led.llv;
+ buf[10] = wacom->led.groups[0].select & 0x03;
+ }
else {
int led = wacom->led.groups[0].select | 0x4;
@@ -1027,6 +1047,17 @@ static struct attribute_group intuos5_led_attr_group = {
.attrs = intuos5_led_attrs,
};
+static struct attribute *generic_led_attrs[] = {
+ &dev_attr_status0_luminance.attr,
+ &dev_attr_status_led0_select.attr,
+ NULL
+};
+
+static struct attribute_group generic_led_attr_group = {
+ .name = "wacom_led",
+ .attrs = generic_led_attrs,
+};
+
struct wacom_sysfs_group_devres {
struct attribute_group *group;
struct kobject *root;
@@ -1348,7 +1379,7 @@ static int wacom_leds_alloc_and_register(struct wacom *wacom, int group_count,
return 0;
}
-static int wacom_initialize_leds(struct wacom *wacom)
+int wacom_initialize_leds(struct wacom *wacom)
{
int error;
@@ -1357,6 +1388,23 @@ static int wacom_initialize_leds(struct wacom *wacom)
/* Initialize default values */
switch (wacom->wacom_wac.features.type) {
+ case HID_GENERIC:
+ if (!wacom->generic_has_leds)
+ return 0;
+ wacom->led.llv = 100;
+ wacom->led.max_llv = 100;
+
+ error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
+
+ error = wacom_devm_sysfs_create_group(wacom,
+ &generic_led_attr_group);
+ break;
+
case INTUOS4S:
case INTUOS4:
case INTUOS4WL:
@@ -1415,6 +1463,17 @@ static int wacom_initialize_leds(struct wacom *wacom)
&intuos5_led_attr_group);
break;
+ case INTUOSP2_BT:
+ wacom->led.llv = 50;
+ wacom->led.max_llv = 100;
+ error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
+ return 0;
+
case REMOTE:
wacom->led.llv = 255;
wacom->led.max_llv = 255;
@@ -1435,11 +1494,23 @@ static int wacom_initialize_leds(struct wacom *wacom)
"cannot create sysfs group err: %d\n", error);
return error;
}
- wacom_led_control(wacom);
return 0;
}
+static void wacom_init_work(struct work_struct *work)
+{
+ struct wacom *wacom = container_of(work, struct wacom, init_work.work);
+
+ _wacom_query_tablet_data(wacom);
+ wacom_led_control(wacom);
+}
+
+static void wacom_query_tablet_data(struct wacom *wacom)
+{
+ schedule_delayed_work(&wacom->init_work, msecs_to_jiffies(1000));
+}
+
static enum power_supply_property wacom_battery_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PRESENT,
@@ -2015,6 +2086,24 @@ static void wacom_release_resources(struct wacom *wacom)
wacom->wacom_wac.pad_input = NULL;
}
+static void wacom_set_shared_values(struct wacom_wac *wacom_wac)
+{
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) {
+ wacom_wac->shared->type = wacom_wac->features.type;
+ wacom_wac->shared->touch_input = wacom_wac->touch_input;
+ }
+
+ if (wacom_wac->has_mute_touch_switch)
+ wacom_wac->shared->has_mute_touch_switch = true;
+
+ if (wacom_wac->shared->has_mute_touch_switch &&
+ wacom_wac->shared->touch_input) {
+ set_bit(EV_SW, wacom_wac->shared->touch_input->evbit);
+ input_set_capability(wacom_wac->shared->touch_input, EV_SW,
+ SW_MUTE_DEVICE);
+ }
+}
+
static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
@@ -2036,10 +2125,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
-
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2080,10 +2165,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
@@ -2118,7 +2202,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (!wireless) {
/* Note that if query fails it is not a hard failure */
- wacom_query_tablet_data(hdev, features);
+ wacom_query_tablet_data(wacom);
}
/* touch only Bamboo doesn't support pen */
@@ -2139,13 +2223,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
error = hid_hw_open(hdev);
- if ((wacom_wac->features.type == INTUOSHT ||
- wacom_wac->features.type == INTUOSHT2) &&
- (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)) {
- wacom_wac->shared->type = wacom_wac->features.type;
- wacom_wac->shared->touch_input = wacom_wac->touch_input;
- }
-
+ wacom_set_shared_values(wacom_wac);
devres_close_group(&hdev->dev, wacom);
return 0;
@@ -2450,6 +2528,7 @@ static int wacom_probe(struct hid_device *hdev,
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
+ INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
INIT_WORK(&wacom->battery_work, wacom_battery_work);
INIT_WORK(&wacom->remote_work, wacom_remote_work);
@@ -2491,12 +2570,17 @@ static void wacom_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
+ cancel_delayed_work_sync(&wacom->init_work);
cancel_work_sync(&wacom->wireless_work);
cancel_work_sync(&wacom->battery_work);
cancel_work_sync(&wacom->remote_work);
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
+ /* make sure we don't trigger the LEDs */
+ wacom_led_groups_release(wacom);
+ wacom_release_resources(wacom);
+
hid_set_drvdata(hdev, NULL);
}
@@ -2504,12 +2588,11 @@ static void wacom_remove(struct hid_device *hdev)
static int wacom_resume(struct hid_device *hdev)
{
struct wacom *wacom = hid_get_drvdata(hdev);
- struct wacom_features *features = &wacom->wacom_wac.features;
mutex_lock(&wacom->lock);
/* switch to wacom mode first */
- wacom_query_tablet_data(hdev, features);
+ _wacom_query_tablet_data(wacom);
wacom_led_control(wacom);
mutex_unlock(&wacom->lock);
@@ -2540,4 +2623,4 @@ module_hid_driver(wacom_driver);
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b1a9a3ca6d56..4aa3de9f1163 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -43,6 +43,8 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
static int wacom_numbered_button_to_key(int n);
+static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
+ int group);
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -166,19 +168,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
wacom->id[0] = STYLUS_DEVICE_ID;
}
- pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
- if (features->pressure_max > 255)
- pressure = (pressure << 1) | ((data[4] >> 6) & 1);
- pressure += (features->pressure_max + 1) / 2;
+ if (prox) {
+ pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+ if (features->pressure_max > 255)
+ pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+ pressure += (features->pressure_max + 1) / 2;
- input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
- input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
- input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+ input_report_abs(input, ABS_PRESSURE, pressure);
- input_report_key(input, BTN_TOUCH, data[4] & 0x08);
- input_report_key(input, BTN_STYLUS, data[4] & 0x10);
- /* Only allow the stylus2 button to be reported for the pen tool. */
- input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+ /* Only allow the stylus2 button to be reported for the pen tool. */
+ input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+ }
if (!prox)
wacom->id[0] = 0;
@@ -1190,6 +1194,166 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
return count;
}
+static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+{
+ const int pen_frame_len = 14;
+ const int pen_frames = 7;
+
+ struct input_dev *pen_input = wacom->pen_input;
+ unsigned char *data = wacom->data;
+ int i;
+
+ wacom->serial[0] = get_unaligned_le64(&data[99]);
+ wacom->id[0] = get_unaligned_le16(&data[107]);
+ if (wacom->serial[0] >> 52 == 1) {
+ /* Add back in missing bits of ID for non-USI pens */
+ wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
+ }
+ wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
+
+ for (i = 0; i < pen_frames; i++) {
+ unsigned char *frame = &data[i*pen_frame_len + 1];
+ bool valid = frame[0] & 0x80;
+ bool prox = frame[0] & 0x40;
+ bool range = frame[0] & 0x20;
+
+ if (!valid)
+ continue;
+
+ if (range) {
+ input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
+ input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
+ input_report_abs(pen_input, ABS_TILT_X, frame[7]);
+ input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
+ input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
+ input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
+ }
+ input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+ input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max);
+
+ input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
+ input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
+ input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+
+ input_report_key(pen_input, wacom->tool[0], prox);
+ input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
+ input_report_abs(pen_input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+
+ wacom->shared->stylus_in_proximity = prox;
+
+ input_sync(pen_input);
+ }
+}
+
+static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
+{
+ const int finger_touch_len = 8;
+ const int finger_frames = 4;
+ const int finger_frame_len = 43;
+
+ struct input_dev *touch_input = wacom->touch_input;
+ unsigned char *data = wacom->data;
+ int num_contacts_left = 5;
+ int i, j;
+
+ for (i = 0; i < finger_frames; i++) {
+ unsigned char *frame = &data[i*finger_frame_len + 109];
+ int current_num_contacts = frame[0] & 0x7F;
+ int contacts_to_send;
+
+ if (!(frame[0] & 0x80))
+ continue;
+
+ /*
+ * First packet resets the counter since only the first
+ * packet in series will have non-zero current_num_contacts.
+ */
+ if (current_num_contacts)
+ wacom->num_contacts_left = current_num_contacts;
+
+ contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
+
+ for (j = 0; j < contacts_to_send; j++) {
+ unsigned char *touch = &frame[j*finger_touch_len + 1];
+ int slot = input_mt_get_slot_by_key(touch_input, touch[0]);
+ int x = get_unaligned_le16(&touch[2]);
+ int y = get_unaligned_le16(&touch[4]);
+ int w = touch[6] * input_abs_get_res(touch_input, ABS_MT_POSITION_X);
+ int h = touch[7] * input_abs_get_res(touch_input, ABS_MT_POSITION_Y);
+
+ if (slot < 0)
+ continue;
+
+ input_mt_slot(touch_input, slot);
+ input_mt_report_slot_state(touch_input, MT_TOOL_FINGER, touch[1] & 0x01);
+ input_report_abs(touch_input, ABS_MT_POSITION_X, x);
+ input_report_abs(touch_input, ABS_MT_POSITION_Y, y);
+ input_report_abs(touch_input, ABS_MT_TOUCH_MAJOR, max(w, h));
+ input_report_abs(touch_input, ABS_MT_TOUCH_MINOR, min(w, h));
+ input_report_abs(touch_input, ABS_MT_ORIENTATION, w > h);
+ }
+
+ input_mt_sync_frame(touch_input);
+
+ wacom->num_contacts_left -= contacts_to_send;
+ if (wacom->num_contacts_left <= 0) {
+ wacom->num_contacts_left = 0;
+ wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+ }
+ }
+
+ input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
+ input_sync(touch_input);
+}
+
+static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
+{
+ struct input_dev *pad_input = wacom->pad_input;
+ unsigned char *data = wacom->data;
+
+ int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
+ int ring = data[285];
+ int prox = buttons | (ring & 0x80);
+
+ wacom_report_numbered_buttons(pad_input, 9, buttons);
+
+ input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
+
+ input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
+ input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
+ input_event(pad_input, EV_MSC, MSC_SERIAL, 0xffffffff);
+
+ input_sync(pad_input);
+}
+
+static void wacom_intuos_pro2_bt_battery(struct wacom_wac *wacom)
+{
+ unsigned char *data = wacom->data;
+
+ bool chg = data[284] & 0x80;
+ int battery_status = data[284] & 0x7F;
+
+ wacom_notify_battery(wacom, battery_status, chg, 1, chg);
+}
+
+static int wacom_intuos_pro2_bt_irq(struct wacom_wac *wacom, size_t len)
+{
+ unsigned char *data = wacom->data;
+
+ if (data[0] != 0x80) {
+ dev_dbg(wacom->pen_input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
+ return 0;
+ }
+
+ wacom_intuos_pro2_bt_pen(wacom);
+ wacom_intuos_pro2_bt_touch(wacom);
+ wacom_intuos_pro2_bt_pad(wacom);
+ wacom_intuos_pro2_bt_battery(wacom);
+ return 0;
+}
+
static int wacom_24hdt_irq(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->touch_input;
@@ -1444,7 +1608,7 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
-static int wacom_equivalent_usage(int usage)
+int wacom_equivalent_usage(int usage)
{
if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
int subpage = (usage & 0xFF00) << 8;
@@ -1471,6 +1635,16 @@ static int wacom_equivalent_usage(int usage)
return subpage | subusage;
}
+ if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMTOUCH) {
+ int subpage = (usage & 0xFF00) << 8;
+ int subusage = (usage & 0xFF);
+
+ if (subpage == HID_UP_UNDEFINED)
+ subpage = WACOM_HID_SP_DIGITIZER;
+
+ return subpage | subusage;
+ }
+
return usage;
}
@@ -1550,12 +1724,14 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
+ case WACOM_HID_WD_BUTTONCENTER:
+ wacom->generic_has_leds = true;
+ /* fall through */
case WACOM_HID_WD_BUTTONHOME:
case WACOM_HID_WD_BUTTONUP:
case WACOM_HID_WD_BUTTONDOWN:
case WACOM_HID_WD_BUTTONLEFT:
case WACOM_HID_WD_BUTTONRIGHT:
- case WACOM_HID_WD_BUTTONCENTER:
wacom_map_usage(input, usage, field, EV_KEY,
wacom_numbered_button_to_key(features->numbered_buttons),
0);
@@ -1563,7 +1739,17 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHONOFF:
- wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+ /*
+ * This usage, which is used to mute touch events, comes
+ * from the pad packet, but is reported on the touch
+ * interface. Because the touch interface may not have
+ * been created yet, we cannot call wacom_map_usage(). In
+ * order to process this usage when we receive it, we set
+ * the usage type and code directly.
+ */
+ wacom_wac->has_mute_touch_switch = true;
+ usage->type = EV_SW;
+ usage->code = SW_MUTE_DEVICE;
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHSTRIP:
@@ -1578,6 +1764,10 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
}
switch (equivalent_usage & 0xfffffff0) {
@@ -1620,17 +1810,40 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
struct input_dev *input = wacom_wac->pad_input;
struct wacom_features *features = &wacom_wac->features;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int i;
+
+ /*
+ * Avoid reporting this event and setting inrange_state if this usage
+ * hasn't been mapped.
+ */
+ if (!usage->type)
+ return;
if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
- wacom_wac->hid_data.inrange_state |= value;
+ if (usage->hid != WACOM_HID_WD_TOUCHRING)
+ wacom_wac->hid_data.inrange_state |= value;
}
switch (equivalent_usage) {
case WACOM_HID_WD_TOUCHRINGSTATUS:
+ if (!value)
+ input_event(input, usage->type, usage->code, 0);
+ break;
+
+ case WACOM_HID_WD_TOUCHONOFF:
+ if (wacom_wac->shared->touch_input) {
+ input_report_switch(wacom_wac->shared->touch_input,
+ SW_MUTE_DEVICE, !value);
+ input_sync(wacom_wac->shared->touch_input);
+ }
break;
+ case WACOM_HID_WD_BUTTONCENTER:
+ for (i = 0; i < wacom->led.count; i++)
+ wacom_update_led(wacom, features->numbered_buttons,
+ value, i);
+ /* fall through*/
default:
- features->input_event_flag = true;
input_event(input, usage->type, usage->code, value);
break;
}
@@ -1668,20 +1881,15 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pad_input;
bool active = wacom_wac->hid_data.inrange_state != 0;
/* report prox for expresskey events */
if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
- features->input_event_flag = true;
input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
- }
-
- if (features->input_event_flag) {
- features->input_event_flag = false;
input_sync(input);
}
+
}
static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
@@ -2056,8 +2264,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
for (j = 0; j < field->maxusage; j++) {
struct hid_usage *usage = &field->usage[j];
+ unsigned int equivalent_usage =
+ wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
case HID_GD_Y:
case HID_DG_WIDTH:
@@ -2066,7 +2276,7 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
case HID_DG_INRANGE:
case HID_DG_INVERT:
case HID_DG_TIPSWITCH:
- hid_data->last_slot_field = usage->hid;
+ hid_data->last_slot_field = equivalent_usage;
break;
case HID_DG_CONTACTCOUNT:
hid_data->cc_report = report->id;
@@ -2121,8 +2331,8 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom_wac->features;
- /* currently, only direct devices have proper hid report descriptors */
- features->device_type |= WACOM_DEVICETYPE_DIRECT;
+ if (WACOM_DIRECT_DEVICE(field))
+ features->device_type |= WACOM_DEVICETYPE_DIRECT;
if (WACOM_PAD_FIELD(field))
wacom_wac_pad_usage_mapping(hdev, field, usage);
@@ -2140,6 +2350,9 @@ void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
if (wacom->wacom_wac.features.type != HID_GENERIC)
return;
+ if (value > field->logical_maximum || value < field->logical_minimum)
+ return;
+
if (WACOM_PAD_FIELD(field)) {
wacom_wac_pad_battery_event(hdev, field, usage, value);
if (wacom->wacom_wac.pad_input)
@@ -2187,6 +2400,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
wacom_report_events(hdev, report);
+ /*
+ * Non-input reports may be sent prior to the device being
+ * completely initialized. Since only their events need
+ * to be processed, exit after 'wacom_report_events' has
+ * been called to prevent potential crashes in the report-
+ * processing functions.
+ */
+ if (report->type != HID_INPUT_REPORT)
+ return;
+
if (WACOM_PAD_FIELD(field)) {
wacom_wac_pad_battery_report(hdev, report);
if (wacom->wacom_wac.pad_input)
@@ -2657,6 +2880,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_intuos_irq(wacom_wac);
break;
+ case INTUOSP2_BT:
+ sync = wacom_intuos_pro2_bt_irq(wacom_wac, len);
+ break;
+
case TABLETPC:
case TABLETPCE:
case TABLETPC2FG:
@@ -2767,8 +2994,6 @@ void wacom_setup_device_quirks(struct wacom *wacom)
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
- if (features->numbered_buttons > 0)
- features->device_type |= WACOM_DEVICETYPE_PAD;
if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
features->type == DTUS ||
(features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2828,6 +3053,13 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type == REMOTE)
features->device_type = WACOM_DEVICETYPE_PAD;
+ if (features->type == INTUOSP2_BT) {
+ features->device_type |= WACOM_DEVICETYPE_PEN |
+ WACOM_DEVICETYPE_PAD |
+ WACOM_DEVICETYPE_TOUCH;
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ }
+
switch (features->type) {
case PL:
case DTU:
@@ -2974,6 +3206,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case INTUOSPL:
case INTUOS5S:
case INTUOSPS:
+ case INTUOSP2_BT:
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
features->distance_fuzz, 0);
@@ -3082,6 +3315,27 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
}
switch (features->type) {
+ case INTUOSP2_BT:
+ input_dev->evbit[0] |= BIT_MASK(EV_SW);
+ __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
+
+ if (wacom_wac->shared->touch->product == 0x361) {
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, 12440, 4, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, 8640, 4, 0);
+ }
+ else if (wacom_wac->shared->touch->product == 0x360) {
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, 8960, 4, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, 5920, 4, 0);
+ }
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+
+ /* fall through */
+
case INTUOS5:
case INTUOS5L:
case INTUOSPM:
@@ -3278,6 +3532,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
+ if ((features->type == HID_GENERIC) && features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
if (!(features->device_type & WACOM_DEVICETYPE_PAD))
return -ENODEV;
@@ -3379,6 +3636,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOSPL:
case INTUOS5S:
case INTUOSPS:
+ case INTUOSP2_BT:
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
@@ -3937,6 +4195,12 @@ static const struct wacom_features wacom_features_0x343 =
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+static const struct wacom_features wacom_features_0x360 =
+ { "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
+ INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+static const struct wacom_features wacom_features_0x361 =
+ { "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
+ INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4103,6 +4367,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x33D) },
{ USB_DEVICE_WACOM(0x33E) },
{ USB_DEVICE_WACOM(0x343) },
+ { BT_DEVICE_WACOM(0x360) },
+ { BT_DEVICE_WACOM(0x361) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
@@ -4111,6 +4377,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(HID_ANY_ID) },
{ I2C_DEVICE_WACOM(HID_ANY_ID) },
+ { BT_DEVICE_WACOM(HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, wacom_ids);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index fb0e50acb10d..857ccee16f38 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -12,8 +12,8 @@
#include <linux/types.h>
#include <linux/hid.h>
-/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 192
+/* maximum packet length for USB/BT devices */
+#define WACOM_PKGLEN_MAX 361
#define WACOM_NAME_MAX 64
#define WACOM_MAX_REMOTES 5
@@ -72,6 +72,17 @@
#define WACOM_REPORT_REMOTE 17
#define WACOM_REPORT_INTUOSHT2_ID 8
+/* wacom command report ids */
+#define WAC_CMD_WL_LED_CONTROL 0x03
+#define WAC_CMD_LED_CONTROL 0x20
+#define WAC_CMD_ICON_START 0x21
+#define WAC_CMD_ICON_XFER 0x23
+#define WAC_CMD_ICON_BT_XFER 0x26
+#define WAC_CMD_DELETE_PAIRING 0x20
+#define WAC_CMD_LED_CONTROL_GENERIC 0x32
+#define WAC_CMD_UNPAIR_ALL 0xFF
+#define WAC_CMD_WL_INTUOSP2 0x82
+
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
#define WACOM_QUIRK_SENSE 0x0002
@@ -91,6 +102,7 @@
#define WACOM_HID_SP_DIGITIZER 0x000d0000
#define WACOM_HID_SP_DIGITIZERINFO 0x00100000
#define WACOM_HID_WD_DIGITIZER (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_PEN (WACOM_HID_UP_WACOMDIGITIZER | 0x02)
#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
@@ -104,6 +116,7 @@
#define WACOM_HID_WD_ACCELEROMETER_Y (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
#define WACOM_HID_WD_ACCELEROMETER_Z (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
#define WACOM_HID_WD_BATTERY_CHARGING (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0454)
#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
@@ -113,7 +126,6 @@
#define WACOM_HID_WD_BUTTONLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
#define WACOM_HID_WD_BUTTONRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
#define WACOM_HID_WD_BUTTONCENTER (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
-#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
#define WACOM_HID_WD_FINGERWHEEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
#define WACOM_HID_WD_OFFSETLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
#define WACOM_HID_WD_OFFSETTOP (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
@@ -127,6 +139,12 @@
#define WACOM_HID_UP_G11 0xff110000
#define WACOM_HID_G11_PEN (WACOM_HID_UP_G11 | 0x02)
#define WACOM_HID_G11_TOUCHSCREEN (WACOM_HID_UP_G11 | 0x11)
+#define WACOM_HID_UP_WACOMTOUCH 0xff000000
+#define WACOM_HID_WT_TOUCHSCREEN (WACOM_HID_UP_WACOMTOUCH | 0x04)
+#define WACOM_HID_WT_TOUCHPAD (WACOM_HID_UP_WACOMTOUCH | 0x05)
+#define WACOM_HID_WT_CONTACTMAX (WACOM_HID_UP_WACOMTOUCH | 0x55)
+#define WACOM_HID_WT_X (WACOM_HID_UP_WACOMTOUCH | 0x130)
+#define WACOM_HID_WT_Y (WACOM_HID_UP_WACOMTOUCH | 0x131)
#define WACOM_PAD_FIELD(f) (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
@@ -144,7 +162,14 @@
((f)->physical == HID_DG_FINGER) || \
((f)->application == HID_DG_TOUCHSCREEN) || \
((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
- ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
+ ((f)->application == WACOM_HID_G11_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_WT_TOUCHPAD) || \
+ ((f)->application == HID_DG_TOUCHPAD))
+
+#define WACOM_DIRECT_DEVICE(f) (((f)->application == HID_DG_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_WT_TOUCHSCREEN) || \
+ ((f)->application == HID_DG_PEN) || \
+ ((f)->application == WACOM_HID_WD_PEN))
enum {
PENPARTNER = 0,
@@ -170,6 +195,7 @@ enum {
INTUOSPS,
INTUOSPM,
INTUOSPL,
+ INTUOSP2_BT,
WACOM_21UX2,
WACOM_22HD,
DTK,
@@ -232,7 +258,6 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
- bool input_event_flag;
};
struct wacom_shared {
@@ -244,6 +269,7 @@ struct wacom_shared {
struct input_dev *touch_input;
struct hid_device *pen;
struct hid_device *touch;
+ bool has_mute_touch_switch;
};
struct hid_data {
@@ -300,6 +326,7 @@ struct wacom_wac {
int mode_report;
int mode_value;
struct hid_data hid_data;
+ bool has_mute_touch_switch;
};
#endif
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index cd49cb17eb7f..308dbda700eb 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return ret;
}
+ init_cached_read_index(channel);
next_read_location = hv_get_next_read_location(inring_info);
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
sizeof(desc),
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 190d270b20a2..0649d53f3d16 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1459,6 +1459,16 @@ config SENSORS_SCH5636
This driver can also be built as a module. If so, the module
will be called sch5636.
+config SENSORS_STTS751
+ tristate "ST Microelectronics STTS751"
+ depends on I2C
+ help
+ If you say yes here you get support for STTS751
+ temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called stts751.
+
config SENSORS_SMM665
tristate "Summit Microelectronics SMM665"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d2cb7e804a0f..5509edf6186a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -148,6 +148,7 @@ obj-$(CONFIG_SENSORS_SMM665) += smm665.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
+obj-$(CONFIG_SENSORS_STTS751) += stts751.o
obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_TC74) += tc74.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index ad2b47e40345..bbe3a5c5b3f5 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -28,6 +28,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
#include <linux/bitops.h>
+#include <linux/of.h>
/* Addresses to scan
* The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -58,15 +59,22 @@ static const unsigned short normal_i2c[] = {
#define ADC128_REG_MAN_ID 0x3e
#define ADC128_REG_DEV_ID 0x3f
+/* No. of voltage entries in adc128_attrs */
+#define ADC128_ATTR_NUM_VOLT (8 * 4)
+
+/* Voltage inputs visible per operation mode */
+static const u8 num_inputs[] = { 7, 8, 4, 6 };
+
struct adc128_data {
struct i2c_client *client;
struct regulator *regulator;
int vref; /* Reference voltage in mV */
struct mutex update_lock;
+ u8 mode; /* Operation mode */
bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
- u16 in[3][7]; /* Register value, normalized to 12 bit
+ u16 in[3][8]; /* Register value, normalized to 12 bit
* 0: input voltage
* 1: min limit
* 2: max limit
@@ -87,7 +95,7 @@ static struct adc128_data *adc128_update_device(struct device *dev)
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < num_inputs[data->mode]; i++) {
rv = i2c_smbus_read_word_swapped(client,
ADC128_REG_IN(i));
if (rv < 0)
@@ -107,20 +115,25 @@ static struct adc128_data *adc128_update_device(struct device *dev)
data->in[2][i] = rv << 4;
}
- rv = i2c_smbus_read_word_swapped(client, ADC128_REG_TEMP);
- if (rv < 0)
- goto abort;
- data->temp[0] = rv >> 7;
+ if (data->mode != 1) {
+ rv = i2c_smbus_read_word_swapped(client,
+ ADC128_REG_TEMP);
+ if (rv < 0)
+ goto abort;
+ data->temp[0] = rv >> 7;
- rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_MAX);
- if (rv < 0)
- goto abort;
- data->temp[1] = rv << 1;
+ rv = i2c_smbus_read_byte_data(client,
+ ADC128_REG_TEMP_MAX);
+ if (rv < 0)
+ goto abort;
+ data->temp[1] = rv << 1;
- rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_HYST);
- if (rv < 0)
- goto abort;
- data->temp[2] = rv << 1;
+ rv = i2c_smbus_read_byte_data(client,
+ ADC128_REG_TEMP_HYST);
+ if (rv < 0)
+ goto abort;
+ data->temp[2] = rv << 1;
+ }
rv = i2c_smbus_read_byte_data(client, ADC128_REG_ALARM);
if (rv < 0)
@@ -240,6 +253,25 @@ static ssize_t adc128_show_alarm(struct device *dev,
return sprintf(buf, "%u\n", !!(alarms & mask));
}
+static umode_t adc128_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct adc128_data *data = dev_get_drvdata(dev);
+
+ if (index < ADC128_ATTR_NUM_VOLT) {
+ /* Voltage, visible according to num_inputs[] */
+ if (index >= num_inputs[data->mode] * 4)
+ return 0;
+ } else {
+ /* Temperature, visible if not in mode 1 */
+ if (data->mode == 1)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
adc128_show_in, NULL, 0, 0);
static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
@@ -289,6 +321,13 @@ static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
adc128_show_in, adc128_set_in, 6, 2);
+static SENSOR_DEVICE_ATTR_2(in7_input, S_IRUGO,
+ adc128_show_in, NULL, 7, 0);
+static SENSOR_DEVICE_ATTR_2(in7_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 7, 1);
+static SENSOR_DEVICE_ATTR_2(in7_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 7, 2);
+
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adc128_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
adc128_show_temp, adc128_set_temp, 1);
@@ -302,44 +341,54 @@ static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, adc128_show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, adc128_show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, adc128_show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, adc128_show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, adc128_show_alarm, NULL, 7);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, adc128_show_alarm, NULL, 7);
static struct attribute *adc128_attrs[] = {
- &sensor_dev_attr_in0_min.dev_attr.attr,
- &sensor_dev_attr_in1_min.dev_attr.attr,
- &sensor_dev_attr_in2_min.dev_attr.attr,
- &sensor_dev_attr_in3_min.dev_attr.attr,
- &sensor_dev_attr_in4_min.dev_attr.attr,
- &sensor_dev_attr_in5_min.dev_attr.attr,
- &sensor_dev_attr_in6_min.dev_attr.attr,
- &sensor_dev_attr_in0_max.dev_attr.attr,
- &sensor_dev_attr_in1_max.dev_attr.attr,
- &sensor_dev_attr_in2_max.dev_attr.attr,
- &sensor_dev_attr_in3_max.dev_attr.attr,
- &sensor_dev_attr_in4_max.dev_attr.attr,
- &sensor_dev_attr_in5_max.dev_attr.attr,
- &sensor_dev_attr_in6_max.dev_attr.attr,
+ &sensor_dev_attr_in0_alarm.dev_attr.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in4_alarm.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in5_alarm.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in5_max.dev_attr.attr,
+ &sensor_dev_attr_in5_min.dev_attr.attr,
+ &sensor_dev_attr_in6_alarm.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in6_max.dev_attr.attr,
+ &sensor_dev_attr_in6_min.dev_attr.attr,
+ &sensor_dev_attr_in7_alarm.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in7_max.dev_attr.attr,
+ &sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_in0_alarm.dev_attr.attr,
- &sensor_dev_attr_in1_alarm.dev_attr.attr,
- &sensor_dev_attr_in2_alarm.dev_attr.attr,
- &sensor_dev_attr_in3_alarm.dev_attr.attr,
- &sensor_dev_attr_in4_alarm.dev_attr.attr,
- &sensor_dev_attr_in5_alarm.dev_attr.attr,
- &sensor_dev_attr_in6_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
NULL
};
-ATTRIBUTE_GROUPS(adc128);
+
+static struct attribute_group adc128_group = {
+ .attrs = adc128_attrs,
+ .is_visible = adc128_is_visible,
+};
+__ATTRIBUTE_GROUPS(adc128);
static int adc128_detect(struct i2c_client *client, struct i2c_board_info *info)
{
@@ -387,6 +436,15 @@ static int adc128_init_client(struct adc128_data *data)
if (err)
return err;
+ /* Set operation mode, if non-default */
+ if (data->mode != 0) {
+ err = i2c_smbus_write_byte_data(client,
+ ADC128_REG_CONFIG_ADV,
+ data->mode << 1);
+ if (err)
+ return err;
+ }
+
/* Start monitoring */
err = i2c_smbus_write_byte_data(client, ADC128_REG_CONFIG, 0x01);
if (err)
@@ -433,6 +491,21 @@ static int adc128_probe(struct i2c_client *client,
data->vref = 2560; /* 2.56V, in mV */
}
+ /* Operation mode is optional. If unspecified, keep current mode */
+ if (of_property_read_u8(dev->of_node, "ti,mode", &data->mode) == 0) {
+ if (data->mode > 3) {
+ dev_err(dev, "invalid operation mode %d\n",
+ data->mode);
+ err = -EINVAL;
+ goto error;
+ }
+ } else {
+ err = i2c_smbus_read_byte_data(client, ADC128_REG_CONFIG_ADV);
+ if (err < 0)
+ goto error;
+ data->mode = (err >> 1) & ADC128_REG_MASK;
+ }
+
data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 1fdcc3e703b9..eacf10fadbc6 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -191,7 +191,7 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> index) & 1);
}
-static ssize_t show_alarms(struct device *dev,
+static ssize_t alarms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -251,16 +251,16 @@ static ssize_t set_temp_min(struct device *dev,
return count;
}
-static ssize_t show_low_power(struct device *dev,
+static ssize_t low_power_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct adm1021_data *data = adm1021_update_device(dev);
return sprintf(buf, "%d\n", data->low_power);
}
-static ssize_t set_low_power(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t low_power_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adm1021_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -303,8 +303,8 @@ static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-static DEVICE_ATTR(low_power, S_IWUSR | S_IRUGO, show_low_power, set_low_power);
+static DEVICE_ATTR_RO(alarms);
+static DEVICE_ATTR_RW(low_power);
static struct attribute *adm1021_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 1abb4609b412..1e4dad36f5ef 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -333,12 +333,12 @@ set_temp(1);
set_temp(2);
static ssize_t
-show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t
show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
@@ -358,21 +358,21 @@ static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
static ssize_t
-show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
static ssize_t
-show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct adm1025_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->vrm);
}
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1025_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -388,7 +388,7 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
data->vrm = val;
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
/*
* Real code
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index b2a5d9e5c590..e43f09a07cd0 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1034,15 +1034,15 @@ temp_crit_reg(1);
temp_crit_reg(2);
temp_crit_reg(3);
-static ssize_t show_analog_out_reg(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t analog_out_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", DAC_FROM_REG(data->analog_out));
}
-static ssize_t set_analog_out_reg(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t analog_out_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1060,11 +1060,10 @@ static ssize_t set_analog_out_reg(struct device *dev,
return count;
}
-static DEVICE_ATTR(analog_out, S_IRUGO | S_IWUSR, show_analog_out_reg,
- set_analog_out_reg);
+static DEVICE_ATTR_RW(analog_out);
-static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
int vid = (data->gpio >> 11) & 0x1f;
@@ -1073,17 +1072,17 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", vid_from_reg(vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
-static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct adm1026_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
-static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -1100,16 +1099,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
-static ssize_t show_alarms_reg(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1148,14 +1147,15 @@ static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 24);
static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 25);
static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 26);
-static ssize_t show_alarm_mask(struct device *dev,
+static ssize_t alarm_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->alarm_mask);
}
-static ssize_t set_alarm_mask(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t alarm_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1186,18 +1186,17 @@ static ssize_t set_alarm_mask(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(alarm_mask, S_IRUGO | S_IWUSR, show_alarm_mask,
- set_alarm_mask);
+static DEVICE_ATTR_RW(alarm_mask);
-static ssize_t show_gpio(struct device *dev, struct device_attribute *attr,
+static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->gpio);
}
-static ssize_t set_gpio(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t gpio_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1221,16 +1220,18 @@ static ssize_t set_gpio(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(gpio, S_IRUGO | S_IWUSR, show_gpio, set_gpio);
+static DEVICE_ATTR_RW(gpio);
-static ssize_t show_gpio_mask(struct device *dev, struct device_attribute *attr,
+static ssize_t gpio_mask_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->gpio_mask);
}
-static ssize_t set_gpio_mask(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t gpio_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1254,17 +1255,17 @@ static ssize_t set_gpio_mask(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(gpio_mask, S_IRUGO | S_IWUSR, show_gpio_mask, set_gpio_mask);
+static DEVICE_ATTR_RW(gpio_mask);
-static ssize_t show_pwm_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm1.pwm));
}
-static ssize_t set_pwm_reg(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1285,16 +1286,17 @@ static ssize_t set_pwm_reg(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp1_auto_point1_pwm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->pwm1.auto_pwm_min);
}
-static ssize_t set_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t temp1_auto_point1_pwm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1316,21 +1318,23 @@ static ssize_t set_auto_pwm_min(struct device *dev,
return count;
}
-static ssize_t show_auto_pwm_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp1_auto_point2_pwm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", ADM1026_PWM_MAX);
}
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->pwm1.enable);
}
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1366,25 +1370,25 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
}
/* enable PWM fan control */
-static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg);
-static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg);
-static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg);
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
- set_pwm_enable);
-static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
- set_pwm_enable);
-static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
- set_pwm_enable);
-static DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO | S_IWUSR,
- show_auto_pwm_min, set_auto_pwm_min);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+ pwm1_enable_store);
+static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+ pwm1_enable_store);
+static DEVICE_ATTR_RW(temp1_auto_point1_pwm);
static DEVICE_ATTR(temp2_auto_point1_pwm, S_IRUGO | S_IWUSR,
- show_auto_pwm_min, set_auto_pwm_min);
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
static DEVICE_ATTR(temp3_auto_point1_pwm, S_IRUGO | S_IWUSR,
- show_auto_pwm_min, set_auto_pwm_min);
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
-static DEVICE_ATTR(temp1_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL);
-static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL);
-static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL);
+static DEVICE_ATTR_RO(temp1_auto_point2_pwm);
+static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+ NULL);
+static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+ NULL);
static struct attribute *adm1026_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index a5818980dad7..bcf508269fd6 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -829,14 +829,14 @@ temp_reg(2);
temp_reg(3);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", data->alarm);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -867,7 +867,7 @@ static const unsigned int update_intervals[] = {
16000, 8000, 4000, 2000, 1000, 500, 250, 125,
};
-static ssize_t show_update_interval(struct device *dev,
+static ssize_t update_interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1031_data *data = dev_get_drvdata(dev);
@@ -875,9 +875,9 @@ static ssize_t show_update_interval(struct device *dev,
return sprintf(buf, "%u\n", data->update_interval);
}
-static ssize_t set_update_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -912,8 +912,7 @@ static ssize_t set_update_interval(struct device *dev,
return count;
}
-static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
- set_update_interval);
+static DEVICE_ATTR_RW(update_interval);
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 72bf2489511e..255413fdbde9 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -262,8 +262,8 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
/*** sysfs accessors ***/
/* temperature */
-static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
- char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *dummy, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
@@ -298,7 +298,7 @@ static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
return count;
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
show_max, set_max, 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
@@ -501,13 +501,13 @@ fan(1);
fan(2);
/* alarms */
-static ssize_t show_alarms(struct device *dev,
+static ssize_t alarms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -527,25 +527,25 @@ static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
/* vid */
-static ssize_t show_vid(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
/* analog output */
-static ssize_t show_aout(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t aout_output_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%d\n", AOUT_FROM_REG(data->aout));
}
-static ssize_t set_aout(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t aout_output_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm9240_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -562,7 +562,7 @@ static ssize_t set_aout(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
-static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
+static DEVICE_ATTR_RW(aout_output);
static ssize_t chassis_clear(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index bdeaece9641d..b939f8a115ba 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -21,6 +21,21 @@
#include <linux/hwmon-sysfs.h>
#include <linux/slab.h>
+#define ADT7411_REG_STAT_1 0x00
+#define ADT7411_STAT_1_INT_TEMP_HIGH BIT(0)
+#define ADT7411_STAT_1_INT_TEMP_LOW BIT(1)
+#define ADT7411_STAT_1_EXT_TEMP_HIGH_AIN1 BIT(2)
+#define ADT7411_STAT_1_EXT_TEMP_LOW BIT(3)
+#define ADT7411_STAT_1_EXT_TEMP_FAULT BIT(4)
+#define ADT7411_STAT_1_AIN2 BIT(5)
+#define ADT7411_STAT_1_AIN3 BIT(6)
+#define ADT7411_STAT_1_AIN4 BIT(7)
+#define ADT7411_REG_STAT_2 0x01
+#define ADT7411_STAT_2_AIN5 BIT(0)
+#define ADT7411_STAT_2_AIN6 BIT(1)
+#define ADT7411_STAT_2_AIN7 BIT(2)
+#define ADT7411_STAT_2_AIN8 BIT(3)
+#define ADT7411_STAT_2_VDD BIT(4)
#define ADT7411_REG_INT_TEMP_VDD_LSB 0x03
#define ADT7411_REG_EXT_TEMP_AIN14_LSB 0x04
#define ADT7411_REG_VDD_MSB 0x06
@@ -28,20 +43,31 @@
#define ADT7411_REG_EXT_TEMP_AIN1_MSB 0x08
#define ADT7411_REG_CFG1 0x18
-#define ADT7411_CFG1_START_MONITOR (1 << 0)
-#define ADT7411_CFG1_RESERVED_BIT1 (1 << 1)
-#define ADT7411_CFG1_EXT_TDM (1 << 2)
-#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3)
+#define ADT7411_CFG1_START_MONITOR BIT(0)
+#define ADT7411_CFG1_RESERVED_BIT1 BIT(1)
+#define ADT7411_CFG1_EXT_TDM BIT(2)
+#define ADT7411_CFG1_RESERVED_BIT3 BIT(3)
#define ADT7411_REG_CFG2 0x19
-#define ADT7411_CFG2_DISABLE_AVG (1 << 5)
+#define ADT7411_CFG2_DISABLE_AVG BIT(5)
#define ADT7411_REG_CFG3 0x1a
-#define ADT7411_CFG3_ADC_CLK_225 (1 << 0)
-#define ADT7411_CFG3_RESERVED_BIT1 (1 << 1)
-#define ADT7411_CFG3_RESERVED_BIT2 (1 << 2)
-#define ADT7411_CFG3_RESERVED_BIT3 (1 << 3)
-#define ADT7411_CFG3_REF_VDD (1 << 4)
+#define ADT7411_CFG3_ADC_CLK_225 BIT(0)
+#define ADT7411_CFG3_RESERVED_BIT1 BIT(1)
+#define ADT7411_CFG3_RESERVED_BIT2 BIT(2)
+#define ADT7411_CFG3_RESERVED_BIT3 BIT(3)
+#define ADT7411_CFG3_REF_VDD BIT(4)
+
+#define ADT7411_REG_VDD_HIGH 0x23
+#define ADT7411_REG_VDD_LOW 0x24
+#define ADT7411_REG_TEMP_HIGH(nr) (0x25 + 2 * (nr))
+#define ADT7411_REG_TEMP_LOW(nr) (0x26 + 2 * (nr))
+#define ADT7411_REG_IN_HIGH(nr) ((nr) > 1 \
+ ? 0x2b + 2 * ((nr)-2) \
+ : 0x27)
+#define ADT7411_REG_IN_LOW(nr) ((nr) > 1 \
+ ? 0x2c + 2 * ((nr)-2) \
+ : 0x28)
#define ADT7411_REG_DEVICE_ID 0x4d
#define ADT7411_REG_MANUFACTURER_ID 0x4e
@@ -51,6 +77,30 @@
static const unsigned short normal_i2c[] = { 0x48, 0x4a, 0x4b, I2C_CLIENT_END };
+static const u8 adt7411_in_alarm_reg[] = {
+ ADT7411_REG_STAT_2,
+ ADT7411_REG_STAT_1,
+ ADT7411_REG_STAT_1,
+ ADT7411_REG_STAT_1,
+ ADT7411_REG_STAT_1,
+ ADT7411_REG_STAT_2,
+ ADT7411_REG_STAT_2,
+ ADT7411_REG_STAT_2,
+ ADT7411_REG_STAT_2,
+};
+
+static const u8 adt7411_in_alarm_bits[] = {
+ ADT7411_STAT_2_VDD,
+ ADT7411_STAT_1_EXT_TEMP_HIGH_AIN1,
+ ADT7411_STAT_1_AIN2,
+ ADT7411_STAT_1_AIN3,
+ ADT7411_STAT_1_AIN4,
+ ADT7411_STAT_2_AIN5,
+ ADT7411_STAT_2_AIN6,
+ ADT7411_STAT_2_AIN7,
+ ADT7411_STAT_2_AIN8,
+};
+
struct adt7411_data {
struct mutex device_lock; /* for "atomic" device accesses */
struct mutex update_lock;
@@ -165,6 +215,19 @@ static struct attribute *adt7411_attrs[] = {
};
ATTRIBUTE_GROUPS(adt7411);
+static int adt7411_read_in_alarm(struct device *dev, int channel, long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, adt7411_in_alarm_reg[channel]);
+ if (ret < 0)
+ return ret;
+ *val = !!(ret & adt7411_in_alarm_bits[channel]);
+ return 0;
+}
+
static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
{
struct adt7411_data *data = dev_get_drvdata(dev);
@@ -179,32 +242,41 @@ static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
return ret;
*val = ret * 7000 / 1024;
return 0;
+ case hwmon_in_min:
+ ret = i2c_smbus_read_byte_data(client, ADT7411_REG_VDD_LOW);
+ if (ret < 0)
+ return ret;
+ *val = ret * 7000 / 256;
+ return 0;
+ case hwmon_in_max:
+ ret = i2c_smbus_read_byte_data(client, ADT7411_REG_VDD_HIGH);
+ if (ret < 0)
+ return ret;
+ *val = ret * 7000 / 256;
+ return 0;
+ case hwmon_in_alarm:
+ return adt7411_read_in_alarm(dev, 0, val);
default:
return -EOPNOTSUPP;
}
}
-static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
- long *val)
+static int adt7411_update_vref(struct device *dev)
{
struct adt7411_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
+ int val;
- int ret;
- int lsb_reg, lsb_shift;
- int nr = channel - 1;
-
- mutex_lock(&data->update_lock);
if (time_after_eq(jiffies, data->next_update)) {
- ret = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
- if (ret < 0)
- goto exit_unlock;
+ val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
+ if (val < 0)
+ return val;
- if (ret & ADT7411_CFG3_REF_VDD) {
- ret = adt7411_read_in_vdd(dev, hwmon_in_input,
+ if (val & ADT7411_CFG3_REF_VDD) {
+ val = adt7411_read_in_vdd(dev, hwmon_in_input,
&data->vref_cached);
- if (ret < 0)
- goto exit_unlock;
+ if (val < 0)
+ return val;
} else {
data->vref_cached = 2250;
}
@@ -212,6 +284,24 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
data->next_update = jiffies + HZ;
}
+ return 0;
+}
+
+static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ int ret;
+ int reg, lsb_reg, lsb_shift;
+ int nr = channel - 1;
+
+ mutex_lock(&data->update_lock);
+ ret = adt7411_update_vref(dev);
+ if (ret < 0)
+ goto exit_unlock;
+
switch (attr) {
case hwmon_in_input:
lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
@@ -224,6 +314,20 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
*val = ret * data->vref_cached / 1024;
ret = 0;
break;
+ case hwmon_in_min:
+ case hwmon_in_max:
+ reg = (attr == hwmon_in_min)
+ ? ADT7411_REG_IN_LOW(channel)
+ : ADT7411_REG_IN_HIGH(channel);
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ goto exit_unlock;
+ *val = ret * data->vref_cached / 256;
+ ret = 0;
+ break;
+ case hwmon_in_alarm:
+ ret = adt7411_read_in_alarm(dev, channel, val);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -242,12 +346,44 @@ static int adt7411_read_in(struct device *dev, u32 attr, int channel,
return adt7411_read_in_chan(dev, attr, channel, val);
}
+
+static int adt7411_read_temp_alarm(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret, bit;
+
+ ret = i2c_smbus_read_byte_data(client, ADT7411_REG_STAT_1);
+ if (ret < 0)
+ return ret;
+
+ switch (attr) {
+ case hwmon_temp_min_alarm:
+ bit = channel ? ADT7411_STAT_1_EXT_TEMP_LOW
+ : ADT7411_STAT_1_INT_TEMP_LOW;
+ break;
+ case hwmon_temp_max_alarm:
+ bit = channel ? ADT7411_STAT_1_EXT_TEMP_HIGH_AIN1
+ : ADT7411_STAT_1_INT_TEMP_HIGH;
+ break;
+ case hwmon_temp_fault:
+ bit = ADT7411_STAT_1_EXT_TEMP_FAULT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *val = !!(ret & bit);
+ return 0;
+}
+
static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
long *val)
{
struct adt7411_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- int ret, regl, regh;
+ int ret, reg, regl, regh;
switch (attr) {
case hwmon_temp_input:
@@ -261,6 +397,21 @@ static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
ret = ret & 0x200 ? ret - 0x400 : ret; /* 10 bit signed */
*val = ret * 250;
return 0;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ reg = (attr == hwmon_temp_min)
+ ? ADT7411_REG_TEMP_LOW(channel)
+ : ADT7411_REG_TEMP_HIGH(channel);
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ return ret;
+ ret = ret & 0x80 ? ret - 0x100 : ret; /* 8 bit signed */
+ *val = ret * 1000;
+ return 0;
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_fault:
+ return adt7411_read_temp_alarm(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
@@ -279,26 +430,143 @@ static int adt7411_read(struct device *dev, enum hwmon_sensor_types type,
}
}
+static int adt7411_write_in_vdd(struct device *dev, u32 attr, long val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int reg;
+
+ val = clamp_val(val, 0, 255 * 7000 / 256);
+ val = DIV_ROUND_CLOSEST(val * 256, 7000);
+
+ switch (attr) {
+ case hwmon_in_min:
+ reg = ADT7411_REG_VDD_LOW;
+ break;
+ case hwmon_in_max:
+ reg = ADT7411_REG_VDD_HIGH;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int adt7411_write_in_chan(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret, reg;
+
+ mutex_lock(&data->update_lock);
+ ret = adt7411_update_vref(dev);
+ if (ret < 0)
+ goto exit_unlock;
+ val = clamp_val(val, 0, 255 * data->vref_cached / 256);
+ val = DIV_ROUND_CLOSEST(val * 256, data->vref_cached);
+
+ switch (attr) {
+ case hwmon_in_min:
+ reg = ADT7411_REG_IN_LOW(channel);
+ break;
+ case hwmon_in_max:
+ reg = ADT7411_REG_IN_HIGH(channel);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ exit_unlock:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static int adt7411_write_in(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ if (channel == 0)
+ return adt7411_write_in_vdd(dev, attr, val);
+ else
+ return adt7411_write_in_chan(dev, attr, channel, val);
+}
+
+static int adt7411_write_temp(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int reg;
+
+ val = clamp_val(val, -128000, 127000);
+ val = DIV_ROUND_CLOSEST(val, 1000);
+
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = ADT7411_REG_TEMP_LOW(channel);
+ break;
+ case hwmon_temp_max:
+ reg = ADT7411_REG_TEMP_HIGH(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int adt7411_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ return adt7411_write_in(dev, attr, channel, val);
+ case hwmon_temp:
+ return adt7411_write_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static umode_t adt7411_is_visible(const void *_data,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
const struct adt7411_data *data = _data;
+ bool visible;
switch (type) {
case hwmon_in:
- if (channel > 0 && channel < 3)
- return data->use_ext_temp ? 0 : S_IRUGO;
- else
- return S_IRUGO;
+ visible = channel == 0 || channel >= 3 || !data->use_ext_temp;
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_alarm:
+ return visible ? S_IRUGO : 0;
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return visible ? S_IRUGO | S_IWUSR : 0;
+ }
+ break;
case hwmon_temp:
- if (channel == 1)
- return data->use_ext_temp ? S_IRUGO : 0;
- else
- return S_IRUGO;
+ visible = channel == 0 || data->use_ext_temp;
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_fault:
+ return visible ? S_IRUGO : 0;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return visible ? S_IRUGO | S_IWUSR : 0;
+ }
+ break;
default:
- return 0;
+ break;
}
+ return 0;
}
static int adt7411_detect(struct i2c_client *client,
@@ -372,15 +640,15 @@ static int adt7411_init_device(struct adt7411_data *data)
}
static const u32 adt7411_in_config[] = {
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
0
};
@@ -390,8 +658,10 @@ static const struct hwmon_channel_info adt7411_in = {
};
static const u32 adt7411_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT,
0
};
@@ -409,6 +679,7 @@ static const struct hwmon_channel_info *adt7411_info[] = {
static const struct hwmon_ops adt7411_hwmon_ops = {
.is_visible = adt7411_is_visible,
.read = adt7411_read,
+ .write = adt7411_write,
};
static const struct hwmon_chip_info adt7411_chip_info = {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c9a1d9c25572..2cd920751441 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -403,7 +403,7 @@ out:
return data;
}
-static ssize_t show_auto_update_interval(struct device *dev,
+static ssize_t auto_update_interval_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
@@ -411,10 +411,9 @@ static ssize_t show_auto_update_interval(struct device *dev,
return sprintf(buf, "%d\n", data->auto_update_interval);
}
-static ssize_t set_auto_update_interval(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
+static ssize_t auto_update_interval_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adt7470_data *data = dev_get_drvdata(dev);
long temp;
@@ -431,7 +430,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
return count;
}
-static ssize_t show_num_temp_sensors(struct device *dev,
+static ssize_t num_temp_sensors_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
@@ -439,10 +438,9 @@ static ssize_t show_num_temp_sensors(struct device *dev,
return sprintf(buf, "%d\n", data->num_temp_sensors);
}
-static ssize_t set_num_temp_sensors(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
+static ssize_t num_temp_sensors_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adt7470_data *data = dev_get_drvdata(dev);
long temp;
@@ -537,7 +535,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", 1000 * data->temp[attr->index]);
}
-static ssize_t show_alarm_mask(struct device *dev,
+static ssize_t alarm_mask_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
@@ -546,10 +544,9 @@ static ssize_t show_alarm_mask(struct device *dev,
return sprintf(buf, "%x\n", data->alarms_mask);
}
-static ssize_t set_alarm_mask(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
+static ssize_t alarm_mask_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adt7470_data *data = dev_get_drvdata(dev);
long mask;
@@ -723,8 +720,8 @@ static const int adt7470_freq_map[] = {
11, 15, 22, 29, 35, 44, 59, 88, 1400, 22500
};
-static ssize_t show_pwm_freq(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t pwm1_freq_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct adt7470_data *data = adt7470_update_device(dev);
unsigned char cfg_reg_1;
@@ -745,9 +742,9 @@ static ssize_t show_pwm_freq(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%d\n", adt7470_freq_map[index]);
}
-static ssize_t set_pwm_freq(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t pwm1_freq_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adt7470_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -1012,12 +1009,9 @@ static ssize_t show_alarm(struct device *dev,
return sprintf(buf, "0\n");
}
-static DEVICE_ATTR(alarm_mask, S_IWUSR | S_IRUGO, show_alarm_mask,
- set_alarm_mask);
-static DEVICE_ATTR(num_temp_sensors, S_IWUSR | S_IRUGO, show_num_temp_sensors,
- set_num_temp_sensors);
-static DEVICE_ATTR(auto_update_interval, S_IWUSR | S_IRUGO,
- show_auto_update_interval, set_auto_update_interval);
+static DEVICE_ATTR_RW(alarm_mask);
+static DEVICE_ATTR_RW(num_temp_sensors);
+static DEVICE_ATTR_RW(auto_update_interval);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 0);
@@ -1133,7 +1127,7 @@ static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3);
-static DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO, show_pwm_freq, set_pwm_freq);
+static DEVICE_ATTR_RW(pwm1_freq);
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm_min, set_pwm_min, 0);
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 3cefd1aeb24f..c646670b9ea9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -856,16 +856,17 @@ static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_pwm_at_crit(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
}
-static ssize_t set_pwm_at_crit(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t pwm_use_point2_pwm_at_crit_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
@@ -888,15 +889,15 @@ static ssize_t set_pwm_at_crit(struct device *dev,
return count;
}
-static ssize_t show_vrm(struct device *dev, struct device_attribute *devattr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct adt7475_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (int)data->vrm);
}
-static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adt7475_data *data = dev_get_drvdata(dev);
long val;
@@ -910,8 +911,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
return count;
}
-static ssize_t show_vid(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
@@ -1057,11 +1058,10 @@ static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
set_pwm, MAX, 2);
/* Non-standard name, might need revisiting */
-static DEVICE_ATTR(pwm_use_point2_pwm_at_crit, S_IWUSR | S_IRUGO,
- show_pwm_at_crit, set_pwm_at_crit);
+static DEVICE_ATTR_RW(pwm_use_point2_pwm_at_crit);
-static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, set_vrm);
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RW(vrm);
+static DEVICE_ATTR_RO(cpu0_vid);
static struct attribute *adt7475_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index 98141f483165..0f538f8be6bf 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -331,9 +331,8 @@ static ssize_t adt7x10_show_alarm(struct device *dev,
return sprintf(buf, "%d\n", !!(ret & attr->index));
}
-static ssize_t adt7x10_show_name(struct device *dev,
- struct device_attribute *da,
- char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
@@ -359,7 +358,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, adt7x10_show_alarm,
NULL, ADT7X10_STAT_T_HIGH);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, adt7x10_show_alarm,
NULL, ADT7X10_STAT_T_CRIT);
-static DEVICE_ATTR(name, S_IRUGO, adt7x10_show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *adt7x10_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 272fcc837ecc..62e191311139 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -483,25 +483,25 @@ sysfs_temp(3);
sysfs_temp(4);
/* VID */
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct asb100_data *data = asb100_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
/* VRM */
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct asb100_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct asb100_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -519,16 +519,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
}
/* Alarms */
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct asb100_data *data = asb100_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -550,15 +550,15 @@ static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
/* 1 PWM */
-static ssize_t show_pwm1(struct device *dev, struct device_attribute *attr,
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct asb100_data *data = asb100_update_device(dev);
return sprintf(buf, "%d\n", ASB100_PWM_FROM_REG(data->pwm & 0x0f));
}
-static ssize_t set_pwm1(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct asb100_data *data = i2c_get_clientdata(client);
@@ -577,15 +577,16 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_pwm_enable1(struct device *dev,
+static ssize_t pwm1_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asb100_data *data = asb100_update_device(dev);
return sprintf(buf, "%d\n", (data->pwm & 0x80) ? 1 : 0);
}
-static ssize_t set_pwm_enable1(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct asb100_data *data = i2c_get_clientdata(client);
@@ -604,9 +605,8 @@ static ssize_t set_pwm_enable1(struct device *dev,
return count;
}
-static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm1, set_pwm1);
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
- show_pwm_enable1, set_pwm_enable1);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_enable);
static struct attribute *asb100_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index f2f2f2fc755a..b7eadb54c8cb 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -81,8 +81,8 @@ static struct atxp1_data *atxp1_update_device(struct device *dev)
}
/* sys file functions for cpu0_vid */
-static ssize_t atxp1_showvcore(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int size;
struct atxp1_data *data;
@@ -95,9 +95,9 @@ static ssize_t atxp1_showvcore(struct device *dev,
return size;
}
-static ssize_t atxp1_storevcore(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t cpu0_vid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct atxp1_data *data = atxp1_update_device(dev);
struct i2c_client *client = data->client;
@@ -154,12 +154,11 @@ static ssize_t atxp1_storevcore(struct device *dev,
* CPU core reference voltage
* unit: millivolt
*/
-static DEVICE_ATTR(cpu0_vid, S_IRUGO | S_IWUSR, atxp1_showvcore,
- atxp1_storevcore);
+static DEVICE_ATTR_RW(cpu0_vid);
/* sys file functions for GPIO1 */
-static ssize_t atxp1_showgpio1(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t gpio1_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int size;
struct atxp1_data *data;
@@ -171,9 +170,8 @@ static ssize_t atxp1_showgpio1(struct device *dev,
return size;
}
-static ssize_t atxp1_storegpio1(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t gpio1_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct atxp1_data *data = atxp1_update_device(dev);
struct i2c_client *client = data->client;
@@ -201,11 +199,11 @@ static ssize_t atxp1_storegpio1(struct device *dev,
* GPIO1 data register
* unit: Four bit as hex (e.g. 0x0f)
*/
-static DEVICE_ATTR(gpio1, S_IRUGO | S_IWUSR, atxp1_showgpio1, atxp1_storegpio1);
+static DEVICE_ATTR_RW(gpio1);
/* sys file functions for GPIO2 */
-static ssize_t atxp1_showgpio2(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t gpio2_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int size;
struct atxp1_data *data;
@@ -217,9 +215,8 @@ static ssize_t atxp1_showgpio2(struct device *dev,
return size;
}
-static ssize_t atxp1_storegpio2(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t gpio2_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct atxp1_data *data = atxp1_update_device(dev);
struct i2c_client *client = data->client;
@@ -246,7 +243,7 @@ static ssize_t atxp1_storegpio2(struct device *dev,
* GPIO2 data register
* unit: Eight bit as hex (e.g. 0xff)
*/
-static DEVICE_ATTR(gpio2, S_IRUGO | S_IWUSR, atxp1_showgpio2, atxp1_storegpio2);
+static DEVICE_ATTR_RW(gpio2);
static struct attribute *atxp1_attrs[] = {
&dev_attr_gpio1.attr,
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 8763c4a8280c..aa40a00ad689 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -279,7 +279,8 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
static inline int IN_TO_REG(long val, int nominal)
{
- return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
+ val = clamp_val(val, 0, 255 * nominal / 192);
+ return DIV_ROUND_CLOSEST(val * 192, nominal);
}
/*
@@ -295,7 +296,8 @@ static inline int TEMP_FROM_REG(int reg, int res)
static inline int TEMP_TO_REG(long val)
{
- return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
+ val = clamp_val(val, -128000, 127000);
+ return DIV_ROUND_CLOSEST(val, 1000);
}
/* Temperature range */
@@ -331,9 +333,10 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
}
-static inline int TEMP_HYST_TO_REG(long val, int ix, int reg)
+static inline int TEMP_HYST_TO_REG(int temp, long hyst, int ix, int reg)
{
- int hyst = clamp_val((val + 500) / 1000, 0, 15);
+ hyst = clamp_val(hyst, temp - 15000, temp);
+ hyst = DIV_ROUND_CLOSEST(temp - hyst, 1000);
return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
}
@@ -1022,7 +1025,9 @@ static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
long val;
+ int temp;
int err;
+ u8 reg;
err = kstrtol(buf, 10, &val);
if (err)
@@ -1035,10 +1040,9 @@ static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
data->zone_low[ix] = dme1737_read(data,
DME1737_REG_ZONE_LOW(ix));
/* Modify the temp hyst value */
- data->zone_hyst[ix == 2] = TEMP_HYST_TO_REG(
- TEMP_FROM_REG(data->zone_low[ix], 8) -
- val, ix, dme1737_read(data,
- DME1737_REG_ZONE_HYST(ix == 2)));
+ temp = TEMP_FROM_REG(data->zone_low[ix], 8);
+ reg = dme1737_read(data, DME1737_REG_ZONE_HYST(ix == 2));
+ data->zone_hyst[ix == 2] = TEMP_HYST_TO_REG(temp, val, ix, reg);
dme1737_write(data, DME1737_REG_ZONE_HYST(ix == 2),
data->zone_hyst[ix == 2]);
break;
@@ -1055,10 +1059,10 @@ static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
* Modify the temp range value (which is stored in the upper
* nibble of the pwm_freq register)
*/
- data->pwm_freq[ix] = TEMP_RANGE_TO_REG(val -
- TEMP_FROM_REG(data->zone_low[ix], 8),
- dme1737_read(data,
- DME1737_REG_PWM_FREQ(ix)));
+ temp = TEMP_FROM_REG(data->zone_low[ix], 8);
+ val = clamp_val(val, temp, temp + 80000);
+ reg = dme1737_read(data, DME1737_REG_PWM_FREQ(ix));
+ data->pwm_freq[ix] = TEMP_RANGE_TO_REG(val - temp, reg);
dme1737_write(data, DME1737_REG_PWM_FREQ(ix),
data->pwm_freq[ix]);
break;
@@ -1468,7 +1472,7 @@ exit:
* Miscellaneous sysfs attributes
* --------------------------------------------------------------------- */
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1477,8 +1481,8 @@ static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", data->vrm);
}
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct dme1737_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -1495,15 +1499,15 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dme1737_data *data = dme1737_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dme1737_data *data = dev_get_drvdata(dev);
@@ -1645,9 +1649,9 @@ SENSOR_DEVICE_ATTR_PWM_5TO6(6);
/* Misc */
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* for ISA devices */
+static DEVICE_ATTR_RW(vrm);
+static DEVICE_ATTR_RO(cpu0_vid);
+static DEVICE_ATTR_RO(name); /* for ISA devices */
/*
* This struct holds all the attributes that are always present and need to be
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 8890870309e4..5c317fc32a4a 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -263,7 +263,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_alarms(struct device *dev, struct device_attribute *da,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct ds1621_data *data = ds1621_update_client(dev);
@@ -278,15 +278,16 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", !!(data->conf & attr->index));
}
-static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t update_interval_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct ds1621_data *data = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%hu\n", data->update_interval);
}
-static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
struct ds1621_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -315,9 +316,8 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
return count;
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_convrate,
- set_convrate);
+static DEVICE_ATTR_RO(alarms);
+static DEVICE_ATTR_RW(update_interval);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1);
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 4b870ee9b0d3..1ed9a7aa953d 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -284,7 +284,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
}
static ssize_t
-show_fan(struct device *dev, struct device_attribute *da, char *buf)
+fan1_input_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
@@ -294,7 +294,7 @@ show_fan(struct device *dev, struct device_attribute *da, char *buf)
}
static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
+fan1_div_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int fan_div = 8 / data->fan_multiplier;
@@ -307,8 +307,8 @@ show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
* of least surprise; the user doesn't expect the fan target to change just
* because the divider changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan1_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = data->client;
@@ -369,7 +369,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
}
static ssize_t
-show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
+fan1_target_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
@@ -382,8 +382,9 @@ show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan1_target_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = data->client;
@@ -412,7 +413,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
}
static ssize_t
-show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
+fan1_fault_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
bool fault = ((data->fan_tach & 0x1fe0) == 0x1fe0);
@@ -420,14 +421,15 @@ show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
}
static ssize_t
-show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf)
+pwm1_enable_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
return sprintf(buf, "%d\n", data->fan_rpm_control ? 3 : 0);
}
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
struct emc2103_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -512,14 +514,12 @@ static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm,
static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 3);
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
-static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div);
-static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target,
- set_fan_target);
-static DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RW(fan1_div);
+static DEVICE_ATTR_RW(fan1_target);
+static DEVICE_ATTR_RO(fan1_fault);
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
- set_pwm_enable);
+static DEVICE_ATTR_RW(pwm1_enable);
/* sensors present on all models */
static struct attribute *emc2103_attributes[] = {
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index facd05cda26d..73c681162653 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -946,7 +946,7 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute
return count;
}
-static ssize_t show_alarms_in(struct device *dev, struct device_attribute
+static ssize_t alarms_in_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct f71805f_data *data = f71805f_update_device(dev);
@@ -954,7 +954,7 @@ static ssize_t show_alarms_in(struct device *dev, struct device_attribute
return sprintf(buf, "%lu\n", data->alarms & 0x7ff);
}
-static ssize_t show_alarms_fan(struct device *dev, struct device_attribute
+static ssize_t alarms_fan_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct f71805f_data *data = f71805f_update_device(dev);
@@ -962,7 +962,7 @@ static ssize_t show_alarms_fan(struct device *dev, struct device_attribute
return sprintf(buf, "%lu\n", (data->alarms >> 16) & 0x07);
}
-static ssize_t show_alarms_temp(struct device *dev, struct device_attribute
+static ssize_t alarms_temp_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct f71805f_data *data = f71805f_update_device(dev);
@@ -980,7 +980,7 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute
return sprintf(buf, "%lu\n", (data->alarms >> bitnr) & 1);
}
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct f71805f_data *data = dev_get_drvdata(dev);
@@ -1176,11 +1176,11 @@ static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 16);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 17);
static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 18);
-static DEVICE_ATTR(alarms_in, S_IRUGO, show_alarms_in, NULL);
-static DEVICE_ATTR(alarms_fan, S_IRUGO, show_alarms_fan, NULL);
-static DEVICE_ATTR(alarms_temp, S_IRUGO, show_alarms_temp, NULL);
+static DEVICE_ATTR_RO(alarms_in);
+static DEVICE_ATTR_RO(alarms_fan);
+static DEVICE_ATTR_RO(alarms_temp);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *f71805f_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index cb28e4b4fb10..ca54ce5c8e10 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -390,7 +390,7 @@ static ssize_t show_pwm_auto_point_temp(struct device *dev,
static ssize_t store_pwm_auto_point_temp(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count);
/* Sysfs misc */
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
char *buf);
static int f71882fg_probe(struct platform_device *pdev);
@@ -404,7 +404,7 @@ static struct platform_driver f71882fg_driver = {
.remove = f71882fg_remove,
};
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
/*
* Temp attr for the f71858fg, the f71858fg is special as it has its
@@ -2212,7 +2212,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
return count;
}
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 15aa49d082c4..9545a346044f 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -83,8 +83,8 @@ static bool is_carrizo_or_later(void)
return boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60;
}
-static ssize_t show_power(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t power1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
u32 val, tdp_limit, running_avg_range;
s32 running_avg_capture;
@@ -136,16 +136,16 @@ static ssize_t show_power(struct device *dev,
curr_pwr_watts = (curr_pwr_watts * 15625) >> (10 + running_avg_range);
return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts);
}
-static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL);
+static DEVICE_ATTR_RO(power1_input);
-static ssize_t show_power_crit(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t power1_crit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fam15h_power_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->processor_pwr_watts);
}
-static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
+static DEVICE_ATTR_RO(power1_crit);
static void do_read_registers_on_cu(void *_data)
{
@@ -212,9 +212,8 @@ static int read_registers(struct fam15h_power_data *data)
return 0;
}
-static ssize_t acc_show_power(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t power1_average_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fam15h_power_data *data = dev_get_drvdata(dev);
u64 prev_cu_acc_power[MAX_CUS], prev_ptsc[MAX_CUS],
@@ -267,20 +266,20 @@ static ssize_t acc_show_power(struct device *dev,
return sprintf(buf, "%llu\n", (unsigned long long)avg_acc);
}
-static DEVICE_ATTR(power1_average, S_IRUGO, acc_show_power, NULL);
+static DEVICE_ATTR_RO(power1_average);
-static ssize_t acc_show_power_period(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t power1_average_interval_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct fam15h_power_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%lu\n", data->power_period);
}
-static ssize_t acc_set_power_period(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t power1_average_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct fam15h_power_data *data = dev_get_drvdata(dev);
unsigned long temp;
@@ -301,8 +300,7 @@ static ssize_t acc_set_power_period(struct device *dev,
return count;
}
-static DEVICE_ATTR(power1_average_interval, S_IRUGO | S_IWUSR,
- acc_show_power_period, acc_set_power_period);
+static DEVICE_ATTR_RW(power1_average_interval);
static int fam15h_power_init_attrs(struct pci_dev *pdev,
struct fam15h_power_data *data)
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index d58abdc5a4cf..5e78229ade04 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -561,7 +561,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
* The FSC hwmon family has the ability to force an attached alert led to flash
* from software, we export this as an alert_led sysfs attr
*/
-static ssize_t show_alert_led(struct device *dev,
+static ssize_t alert_led_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct fschmd_data *data = fschmd_update_device(dev);
@@ -572,7 +572,7 @@ static ssize_t show_alert_led(struct device *dev,
return sprintf(buf, "0\n");
}
-static ssize_t store_alert_led(struct device *dev,
+static ssize_t alert_led_store(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
u8 reg;
@@ -602,7 +602,7 @@ static ssize_t store_alert_led(struct device *dev,
return count;
}
-static DEVICE_ATTR(alert_led, 0644, show_alert_led, store_alert_led);
+static DEVICE_ATTR_RW(alert_led);
static struct sensor_device_attribute fschmd_attr[] = {
SENSOR_ATTR(in0_input, 0444, show_in_value, NULL, 0),
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index ec6a77da411a..7be1371b2c3d 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -107,8 +107,8 @@ static struct g760a_data *g760a_update_client(struct device *dev)
return data;
}
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct g760a_data *data = g760a_update_client(dev);
unsigned int rpm = 0;
@@ -121,8 +121,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t fan1_alarm_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct g760a_data *data = g760a_update_client(dev);
@@ -131,16 +131,16 @@ static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", fan_alarm);
}
-static ssize_t get_pwm(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct g760a_data *data = g760a_update_client(dev);
return sprintf(buf, "%d\n", PWM_FROM_CNT(data->set_cnt));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct g760a_data *data = g760a_update_client(dev);
struct i2c_client *client = data->client;
@@ -157,9 +157,9 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
return count;
}
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
-static DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RO(fan1_alarm);
static struct attribute *g760a_attrs[] = {
&dev_attr_pwm1.attr,
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 628be9c95ff9..6dca2fd3d303 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -738,8 +738,8 @@ static int g762_pdata_prop_import(struct i2c_client *client)
* Read function for fan1_input sysfs file. Return current fan RPM value, or
* 0 if fan is out of control.
*/
-static ssize_t get_fan_rpm(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct g762_data *data = g762_update_client(dev);
unsigned int rpm = 0;
@@ -764,8 +764,8 @@ static ssize_t get_fan_rpm(struct device *dev, struct device_attribute *da,
* Read and write functions for pwm1_mode sysfs file. Get and set fan speed
* control mode i.e. PWM (1) or DC (0).
*/
-static ssize_t get_pwm_mode(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t pwm1_mode_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct g762_data *data = g762_update_client(dev);
@@ -776,8 +776,9 @@ static ssize_t get_pwm_mode(struct device *dev, struct device_attribute *da,
!!(data->fan_cmd1 & G762_REG_FAN_CMD1_OUT_MODE));
}
-static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t pwm1_mode_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
unsigned long val;
int ret;
@@ -796,8 +797,8 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *da,
* Read and write functions for fan1_div sysfs file. Get and set fan
* controller prescaler value
*/
-static ssize_t get_fan_div(struct device *dev,
- struct device_attribute *da, char *buf)
+static ssize_t fan1_div_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct g762_data *data = g762_update_client(dev);
@@ -807,9 +808,8 @@ static ssize_t get_fan_div(struct device *dev,
return sprintf(buf, "%d\n", G762_CLKDIV_FROM_REG(data->fan_cmd1));
}
-static ssize_t set_fan_div(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan1_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int ret;
@@ -828,8 +828,8 @@ static ssize_t set_fan_div(struct device *dev,
* Read and write functions for fan1_pulses sysfs file. Get and set number
* of tachometer pulses per fan revolution.
*/
-static ssize_t get_fan_pulses(struct device *dev,
- struct device_attribute *da, char *buf)
+static ssize_t fan1_pulses_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct g762_data *data = g762_update_client(dev);
@@ -839,9 +839,9 @@ static ssize_t get_fan_pulses(struct device *dev,
return sprintf(buf, "%d\n", G762_PULSE_FROM_REG(data->fan_cmd1));
}
-static ssize_t set_fan_pulses(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan1_pulses_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
unsigned long val;
int ret;
@@ -870,8 +870,8 @@ static ssize_t set_fan_pulses(struct device *dev,
* but we do not accept 0 as this mode is not natively supported by the chip
* and it is not emulated by g762 driver. -EINVAL is returned in this case.
*/
-static ssize_t get_pwm_enable(struct device *dev,
- struct device_attribute *da, char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct g762_data *data = g762_update_client(dev);
@@ -882,9 +882,9 @@ static ssize_t get_pwm_enable(struct device *dev,
(!!(data->fan_cmd1 & G762_REG_FAN_CMD1_FAN_MODE)) + 1);
}
-static ssize_t set_pwm_enable(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
unsigned long val;
int ret;
@@ -904,8 +904,8 @@ static ssize_t set_pwm_enable(struct device *dev,
* (which affects fan speed) in open-loop mode. 0 stops the fan and 255
* makes it run at full speed.
*/
-static ssize_t get_pwm(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct g762_data *data = g762_update_client(dev);
@@ -915,8 +915,8 @@ static ssize_t get_pwm(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", data->set_out);
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int ret;
@@ -942,8 +942,8 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
* Also note that due to rounding errors it is possible that you don't read
* back exactly the value you have set.
*/
-static ssize_t get_fan_target(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t fan1_target_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct g762_data *data = g762_update_client(dev);
unsigned int rpm;
@@ -961,8 +961,9 @@ static ssize_t get_fan_target(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%u\n", rpm);
}
-static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan1_target_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
unsigned long val;
int ret;
@@ -978,7 +979,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
}
/* read function for fan1_fault sysfs file. */
-static ssize_t get_fan_failure(struct device *dev, struct device_attribute *da,
+static ssize_t fan1_fault_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct g762_data *data = g762_update_client(dev);
@@ -993,8 +994,8 @@ static ssize_t get_fan_failure(struct device *dev, struct device_attribute *da,
* read function for fan1_alarm sysfs file. Note that OOC condition is
* enabled low
*/
-static ssize_t get_fan_ooc(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t fan1_alarm_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct g762_data *data = g762_update_client(dev);
@@ -1004,18 +1005,15 @@ static ssize_t get_fan_ooc(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%u\n", !(data->fan_sta & G762_REG_FAN_STA_OOC));
}
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
-static DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, get_pwm_mode, set_pwm_mode);
-static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
- get_pwm_enable, set_pwm_enable);
-static DEVICE_ATTR(fan1_input, S_IRUGO, get_fan_rpm, NULL);
-static DEVICE_ATTR(fan1_alarm, S_IRUGO, get_fan_ooc, NULL);
-static DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan_failure, NULL);
-static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO,
- get_fan_target, set_fan_target);
-static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_fan_div, set_fan_div);
-static DEVICE_ATTR(fan1_pulses, S_IWUSR | S_IRUGO,
- get_fan_pulses, set_fan_pulses);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_mode);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RO(fan1_alarm);
+static DEVICE_ATTR_RO(fan1_fault);
+static DEVICE_ATTR_RW(fan1_target);
+static DEVICE_ATTR_RW(fan1_div);
+static DEVICE_ATTR_RW(fan1_pulses);
/* Driver data */
static struct attribute *g762_attrs[] = {
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 0212c8317bca..b267510daeb2 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -86,9 +86,8 @@ enum chips { gl518sm_r00, gl518sm_r80 };
#define BOOL_FROM_REG(val) ((val) ? 0 : 1)
#define BOOL_TO_REG(val) ((val) ? 0 : 1)
-#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
- (val) - 500 : \
- (val) + 500) / 1000) + 119), 0, 255)
+#define TEMP_CLAMP(val) clamp_val(val, -119000, 136000)
+#define TEMP_TO_REG(val) (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 119)
#define TEMP_FROM_REG(val) (((val) - 119) * 1000)
static inline u8 FAN_TO_REG(long rpm, int div)
@@ -101,11 +100,13 @@ static inline u8 FAN_TO_REG(long rpm, int div)
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div))))
-#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
+#define IN_CLAMP(val) clamp_val(val, 0, 255 * 19)
+#define IN_TO_REG(val) DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
#define IN_FROM_REG(val) ((val) * 19)
-#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
-#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
+#define VDD_CLAMP(val) clamp_val(val, 0, 255 * 95 / 4)
+#define VDD_TO_REG(val) DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
+#define VDD_FROM_REG(val) DIV_ROUND_CLOSEST((val) * 95, 4)
#define DIV_FROM_REG(val) (1 << (val))
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index dee93ec87d02..4ff32ee67fb6 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -200,19 +200,21 @@ static struct gl520_data *gl520_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
-#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_FROM_REG(val) DIV_ROUND_CLOSEST((val) * 95, 4)
+#define VDD_CLAMP(val) clamp_val(val, 0, 255 * 95 / 4)
+#define VDD_TO_REG(val) DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
-#define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
+#define IN_FROM_REG(val) ((val) * 19)
+#define IN_CLAMP(val) clamp_val(val, 0, 255 * 19)
+#define IN_TO_REG(val) DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -349,8 +351,13 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
-#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
- clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
+
+#define FAN_BASE(div) (480000 >> (div))
+#define FAN_CLAMP(val, div) clamp_val(val, FAN_BASE(div) / 255, \
+ FAN_BASE(div))
+#define FAN_TO_REG(val, div) ((val) == 0 ? 0 : \
+ DIV_ROUND_CLOSEST(480000, \
+ FAN_CLAMP(val, div) << (div)))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -381,8 +388,8 @@ static ssize_t get_fan_div(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[n]));
}
-static ssize_t get_fan_off(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan1_off_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->fan_off);
@@ -476,8 +483,9 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_off(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan1_off_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -510,12 +518,11 @@ static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
get_fan_div, set_fan_div, 0);
static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
get_fan_div, set_fan_div, 1);
-static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
- get_fan_off, set_fan_off);
+static DEVICE_ATTR_RW(fan1_off);
-#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
- (val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
+#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
+#define TEMP_CLAMP(val) clamp_val(val, -130000, 125000)
+#define TEMP_TO_REG(val) (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -596,29 +603,30 @@ static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR,
get_temp_max_hyst, set_temp_max_hyst, 1);
-static ssize_t get_alarms(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
-static ssize_t get_beep_enable(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t beep_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->beep_enable);
}
-static ssize_t get_beep_mask(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t beep_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->beep_mask);
}
-static ssize_t set_beep_enable(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t beep_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -641,8 +649,9 @@ static ssize_t set_beep_enable(struct device *dev, struct device_attribute
return count;
}
-static ssize_t set_beep_mask(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t beep_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -661,11 +670,9 @@ static ssize_t set_beep_mask(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL);
-static DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR,
- get_beep_enable, set_beep_enable);
-static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR,
- get_beep_mask, set_beep_mask);
+static DEVICE_ATTR_RO(alarms);
+static DEVICE_ATTR_RW(beep_enable);
+static DEVICE_ATTR_RW(beep_mask);
static ssize_t get_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 685568b1236d..9c355b9d31c5 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -77,8 +77,8 @@ static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static ssize_t show_fan_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan1_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
struct gpio_fan_alarm *alarm = fan_data->alarm;
@@ -90,7 +90,7 @@ static ssize_t show_fan_alarm(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL);
+static DEVICE_ATTR_RO(fan1_alarm);
static int fan_alarm_init(struct gpio_fan_data *fan_data,
struct gpio_fan_alarm *alarm)
@@ -188,8 +188,8 @@ static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm)
return fan_data->num_speed - 1;
}
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
u8 pwm = fan_data->speed_index * 255 / (fan_data->num_speed - 1);
@@ -197,8 +197,8 @@ static ssize_t show_pwm(struct device *dev,
return sprintf(buf, "%d\n", pwm);
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long pwm;
@@ -224,16 +224,17 @@ exit_unlock:
return ret;
}
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", fan_data->pwm_enable);
}
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
@@ -257,22 +258,22 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_pwm_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm1_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "0\n");
}
-static ssize_t show_rpm_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan1_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", fan_data->speed[0].rpm);
}
-static ssize_t show_rpm_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan1_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
@@ -280,8 +281,8 @@ static ssize_t show_rpm_max(struct device *dev,
fan_data->speed[fan_data->num_speed - 1].rpm);
}
-static ssize_t show_rpm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
@@ -313,14 +314,13 @@ exit_unlock:
return ret;
}
-static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm);
-static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
- show_pwm_enable, set_pwm_enable);
-static DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL);
-static DEVICE_ATTR(fan1_min, S_IRUGO, show_rpm_min, NULL);
-static DEVICE_ATTR(fan1_max, S_IRUGO, show_rpm_max, NULL);
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_rpm, NULL);
-static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_rpm, set_rpm);
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RO(pwm1_mode);
+static DEVICE_ATTR_RO(fan1_min);
+static DEVICE_ATTR_RO(fan1_max);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, fan1_input_show, set_rpm);
static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3932f9276c07..28375d59cc36 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -63,11 +63,11 @@ struct hwmon_thermal_data {
};
static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_hwmon_device(dev)->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *hwmon_dev_attrs[] = {
&dev_attr_name.attr,
@@ -544,9 +544,11 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
struct device *hdev;
int i, j, err, id;
- /* Do not accept invalid characters in hwmon name attribute */
+ /* Complain about invalid characters in hwmon name attribute */
if (name && (!strlen(name) || strpbrk(name, "-* \t\n")))
- return ERR_PTR(-EINVAL);
+ dev_warn(dev,
+ "hwmon: '%s' is not a valid name attribute, please fix\n",
+ name);
id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
if (id < 0)
@@ -606,7 +608,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
- if (chip && chip->ops->read &&
+ if (dev && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
const struct hwmon_channel_info **info = chip->info;
@@ -651,6 +653,9 @@ hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
const struct attribute_group **groups)
{
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
return __hwmon_device_register(dev, name, drvdata, NULL, groups);
}
EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
@@ -674,6 +679,9 @@ hwmon_device_register_with_info(struct device *dev, const char *name,
const struct hwmon_chip_info *chip,
const struct attribute_group **extra_groups)
{
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
return ERR_PTR(-EINVAL);
@@ -695,7 +703,7 @@ struct device *hwmon_device_register(struct device *dev)
dev_warn(dev,
"hwmon_device_register() is deprecated. Please convert the driver to use hwmon_device_register_with_info().\n");
- return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
+ return __hwmon_device_register(dev, NULL, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(hwmon_device_register);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 3e3ccbf18b4e..400e0675a90b 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -43,8 +43,8 @@
*/
/* Sensor resolution : 0.5 degree C */
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev->parent);
long temp;
@@ -83,7 +83,7 @@ static ssize_t show_alarm(struct device *dev,
return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index 6b3d1972cef7..a5a9f457b7f7 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -114,14 +114,14 @@ struct i5k_amb_data {
unsigned int num_attrs;
};
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
return sprintf(buf, "%s\n", DRVNAME);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct platform_device *amb_pdev;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index ad82cb28d87a..efb01c247e2d 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -12,6 +12,7 @@
*
* Supports: IT8603E Super I/O chip w/LPC interface
* IT8620E Super I/O chip w/LPC interface
+ * IT8622E Super I/O chip w/LPC interface
* IT8623E Super I/O chip w/LPC interface
* IT8628E Super I/O chip w/LPC interface
* IT8705F Super I/O chip w/LPC interface
@@ -31,6 +32,7 @@
* IT8783E/F Super I/O chip w/LPC interface
* IT8786E Super I/O chip w/LPC interface
* IT8790E Super I/O chip w/LPC interface
+ * IT8792E Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
*
* Copyright (C) 2001 Chris Gauthron
@@ -69,8 +71,8 @@
#define DRVNAME "it87"
enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
- it8771, it8772, it8781, it8782, it8783, it8786, it8790, it8603,
- it8620, it8628 };
+ it8771, it8772, it8781, it8782, it8783, it8786, it8790,
+ it8792, it8603, it8620, it8622, it8628 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -151,6 +153,7 @@ static inline void superio_exit(int ioreg)
#define IT8726F_DEVID 0x8726
#define IT8728F_DEVID 0x8728
#define IT8732F_DEVID 0x8732
+#define IT8792E_DEVID 0x8733
#define IT8771E_DEVID 0x8771
#define IT8772E_DEVID 0x8772
#define IT8781F_DEVID 0x8781
@@ -160,6 +163,7 @@ static inline void superio_exit(int ioreg)
#define IT8790E_DEVID 0x8790
#define IT8603E_DEVID 0x8603
#define IT8620E_DEVID 0x8620
+#define IT8622E_DEVID 0x8622
#define IT8623E_DEVID 0x8623
#define IT8628E_DEVID 0x8628
#define IT87_ACT_REG 0x30
@@ -293,9 +297,11 @@ struct it87_devices {
#define FEAT_SIX_FANS BIT(11) /* Supports six fans */
#define FEAT_10_9MV_ADC BIT(12)
#define FEAT_AVCC3 BIT(13) /* Chip supports in9/AVCC3 */
-#define FEAT_SIX_PWM BIT(14) /* Chip supports 6 pwm chn */
-#define FEAT_PWM_FREQ2 BIT(15) /* Separate pwm freq 2 */
-#define FEAT_SIX_TEMP BIT(16) /* Up to 6 temp sensors */
+#define FEAT_FIVE_PWM BIT(14) /* Chip supports 5 pwm chn */
+#define FEAT_SIX_PWM BIT(15) /* Chip supports 6 pwm chn */
+#define FEAT_PWM_FREQ2 BIT(16) /* Separate pwm freq 2 */
+#define FEAT_SIX_TEMP BIT(17) /* Up to 6 temp sensors */
+#define FEAT_VIN3_5V BIT(18) /* VIN3 connected to +5V */
static const struct it87_devices it87_devices[] = {
[it87] = {
@@ -419,6 +425,15 @@ static const struct it87_devices it87_devices[] = {
| FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
+ [it8792] = {
+ .name = "it8792",
+ .suffix = "E",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
+ | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL,
+ .peci_mask = 0x07,
+ .old_peci_mask = 0x02, /* Actually reports PCH */
+ },
[it8603] = {
.name = "it8603",
.suffix = "E",
@@ -433,7 +448,16 @@ static const struct it87_devices it87_devices[] = {
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
| FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
- | FEAT_SIX_TEMP,
+ | FEAT_SIX_TEMP | FEAT_VIN3_5V,
+ .peci_mask = 0x07,
+ },
+ [it8622] = {
+ .name = "it8622",
+ .suffix = "E",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
+ | FEAT_FIVE_PWM | FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2
+ | FEAT_AVCC3 | FEAT_VIN3_5V,
.peci_mask = 0x07,
},
[it8628] = {
@@ -442,7 +466,7 @@ static const struct it87_devices it87_devices[] = {
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
| FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
- | FEAT_SIX_TEMP,
+ | FEAT_SIX_TEMP | FEAT_VIN3_5V,
.peci_mask = 0x07,
},
};
@@ -465,9 +489,12 @@ static const struct it87_devices it87_devices[] = {
#define has_in7_internal(data) ((data)->features & FEAT_IN7_INTERNAL)
#define has_six_fans(data) ((data)->features & FEAT_SIX_FANS)
#define has_avcc3(data) ((data)->features & FEAT_AVCC3)
+#define has_five_pwm(data) ((data)->features & (FEAT_FIVE_PWM \
+ | FEAT_SIX_PWM))
#define has_six_pwm(data) ((data)->features & FEAT_SIX_PWM)
#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
+#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
struct it87_sio_data {
enum chips type;
@@ -1300,25 +1327,35 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
data->fan_main_ctrl);
} else {
+ u8 ctrl;
+
/* No on/off mode, set maximum pwm value */
data->pwm_duty[nr] = pwm_to_reg(data, 0xff);
it87_write_value(data, IT87_REG_PWM_DUTY[nr],
data->pwm_duty[nr]);
/* and set manual mode */
- data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
- data->pwm_temp_map[nr] :
- data->pwm_duty[nr];
- it87_write_value(data, IT87_REG_PWM[nr],
- data->pwm_ctrl[nr]);
+ if (has_newer_autopwm(data)) {
+ ctrl = (data->pwm_ctrl[nr] & 0x7c) |
+ data->pwm_temp_map[nr];
+ } else {
+ ctrl = data->pwm_duty[nr];
+ }
+ data->pwm_ctrl[nr] = ctrl;
+ it87_write_value(data, IT87_REG_PWM[nr], ctrl);
}
} else {
- if (val == 1) /* Manual mode */
- data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
- data->pwm_temp_map[nr] :
- data->pwm_duty[nr];
- else /* Automatic mode */
- data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
- it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
+ u8 ctrl;
+
+ if (has_newer_autopwm(data)) {
+ ctrl = (data->pwm_ctrl[nr] & 0x7c) |
+ data->pwm_temp_map[nr];
+ if (val != 1)
+ ctrl |= 0x80;
+ } else {
+ ctrl = (val == 1 ? data->pwm_duty[nr] : 0x80);
+ }
+ data->pwm_ctrl[nr] = ctrl;
+ it87_write_value(data, IT87_REG_PWM[nr], ctrl);
if (data->type != it8603 && nr < 3) {
/* set SmartGuardian mode */
@@ -1344,6 +1381,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
+ it87_update_pwm_ctrl(data, nr);
if (has_newer_autopwm(data)) {
/*
* If we are in automatic mode, the PWM duty cycle register
@@ -1456,13 +1494,15 @@ static ssize_t set_pwm_temp_map(struct device *dev,
}
mutex_lock(&data->update_lock);
+ it87_update_pwm_ctrl(data, nr);
data->pwm_temp_map[nr] = reg;
/*
* If we are in automatic mode, write the temp mapping immediately;
* otherwise, just store it for later use.
*/
if (data->pwm_ctrl[nr] & 0x80) {
- data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
+ data->pwm_ctrl[nr] = (data->pwm_ctrl[nr] & 0xfc) |
+ data->pwm_temp_map[nr];
it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
}
mutex_unlock(&data->update_lock);
@@ -1762,14 +1802,14 @@ static SENSOR_DEVICE_ATTR(pwm6_auto_slope, S_IRUGO | S_IWUSR,
show_auto_pwm_slope, set_auto_pwm_slope, 5);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1877,16 +1917,16 @@ static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR,
static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO, show_beep, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO, show_beep, NULL, 2);
-static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->vrm);
}
-static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -1898,16 +1938,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
-static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%ld\n", (long)vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
static ssize_t show_label(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1916,17 +1956,21 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
"+5V",
"5VSB",
"Vbat",
+ "AVCC",
};
static const char * const labels_it8721[] = {
"+3.3V",
"3VSB",
"Vbat",
+ "+3.3V",
};
struct it87_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(attr)->index;
const char *label;
- if (has_12mv_adc(data) || has_10_9mv_adc(data))
+ if (has_vin3_5v(data) && nr == 0)
+ label = labels[0];
+ else if (has_12mv_adc(data) || has_10_9mv_adc(data))
label = labels_it8721[nr];
else
label = labels[nr];
@@ -1937,7 +1981,7 @@ static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
/* AVCC3 */
-static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 3);
static umode_t it87_in_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
@@ -2386,6 +2430,9 @@ static int __init it87_find(int sioaddr, unsigned short *address,
case IT8732F_DEVID:
sio_data->type = it8732;
break;
+ case IT8792E_DEVID:
+ sio_data->type = it8792;
+ break;
case IT8771E_DEVID:
sio_data->type = it8771;
break;
@@ -2414,6 +2461,9 @@ static int __init it87_find(int sioaddr, unsigned short *address,
case IT8620E_DEVID:
sio_data->type = it8620;
break;
+ case IT8622E_DEVID:
+ sio_data->type = it8622;
+ break;
case IT8628E_DEVID:
sio_data->type = it8628;
break;
@@ -2457,8 +2507,10 @@ static int __init it87_find(int sioaddr, unsigned short *address,
else
sio_data->skip_in |= BIT(9);
- if (!has_six_pwm(config))
+ if (!has_five_pwm(config))
sio_data->skip_pwm |= BIT(3) | BIT(4) | BIT(5);
+ else if (!has_six_pwm(config))
+ sio_data->skip_pwm |= BIT(5);
if (!has_vid(config))
sio_data->skip_vid = 1;
@@ -2587,7 +2639,7 @@ static int __init it87_find(int sioaddr, unsigned short *address,
/* Check for pwm4 */
reg = superio_inb(sioaddr, IT87_SIO_GPIO4_REG);
- if (!(reg & BIT(2)))
+ if (reg & BIT(2))
sio_data->skip_pwm |= BIT(3);
/* Check for pwm2, fan2 */
@@ -2602,6 +2654,50 @@ static int __init it87_find(int sioaddr, unsigned short *address,
sio_data->skip_fan |= BIT(5);
}
+ /* Check if AVCC is on VIN3 */
+ reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+ if (reg & BIT(0))
+ sio_data->internal |= BIT(0);
+ else
+ sio_data->skip_in |= BIT(9);
+
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ } else if (sio_data->type == it8622) {
+ int reg;
+
+ superio_select(sioaddr, GPIO);
+
+ /* Check for pwm4, fan4 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+ if (reg & BIT(6))
+ sio_data->skip_fan |= BIT(3);
+ if (reg & BIT(5))
+ sio_data->skip_pwm |= BIT(3);
+
+ /* Check for pwm3, fan3, pwm5, fan5 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg & BIT(7))
+ sio_data->skip_fan |= BIT(2);
+ if (reg & BIT(3))
+ sio_data->skip_pwm |= BIT(4);
+ if (reg & BIT(1))
+ sio_data->skip_fan |= BIT(4);
+
+ /* Check for pwm2, fan2 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg & BIT(2))
+ sio_data->skip_fan |= BIT(1);
+
+ /* Check for AVCC */
+ reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+ if (!(reg & BIT(0)))
+ sio_data->skip_in |= BIT(9);
+
sio_data->beep_pin = superio_inb(sioaddr,
IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else {
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 0621ee1b3c98..2d40a2e771d7 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -44,8 +44,8 @@ static irqreturn_t jz4740_hwmon_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t in0_input_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct platform_device *pdev = hwmon->pdev;
@@ -79,7 +79,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
return ret;
}
-static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL);
+static DEVICE_ATTR_RO(in0_input);
static struct attribute *jz4740_attrs[] = {
&dev_attr_in0_input.attr,
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 9cdfde6515ad..ce3b91f22e30 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -72,8 +72,8 @@ static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
mutex_unlock(&nb_smu_ind_mutex);
}
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
u32 regval;
struct pci_dev *pdev = dev_get_drvdata(dev);
@@ -88,8 +88,8 @@ static ssize_t show_temp(struct device *dev,
return sprintf(buf, "%u\n", (regval >> 21) * 125);
}
-static ssize_t show_temp_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp1_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", 70 * 1000);
}
@@ -110,8 +110,8 @@ static ssize_t show_temp_crit(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RO(temp1_max);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 734d55d48cc8..5a632bcf869b 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -100,7 +100,7 @@ static struct k8temp_data *k8temp_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct k8temp_data *data = dev_get_drvdata(dev);
@@ -133,7 +133,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0);
static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1);
static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static const struct pci_device_id k8temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 33bfdb444138..2e1948699114 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -417,16 +417,16 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
return count;
}
-static ssize_t show_pwm1_enable(struct device *dev,
+static ssize_t pwm1_enable_show(struct device *dev,
struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
}
-static ssize_t set_pwm1_enable(struct device *dev,
- struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *dummy,
+ const char *buf, size_t count)
{
struct lm63_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -600,7 +600,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
* Hysteresis register holds a relative value, while we want to present
* an absolute to user-space
*/
-static ssize_t show_temp2_crit_hyst(struct device *dev,
+static ssize_t temp2_crit_hyst_show(struct device *dev,
struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
@@ -624,9 +624,9 @@ static ssize_t show_lut_temp_hyst(struct device *dev,
* And now the other way around, user-space provides an absolute
* hysteresis value and we have to store a relative one
*/
-static ssize_t set_temp2_crit_hyst(struct device *dev,
- struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t temp2_crit_hyst_store(struct device *dev,
+ struct device_attribute *dummy,
+ const char *buf, size_t count)
{
struct lm63_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -670,7 +670,7 @@ static void lm63_set_convrate(struct lm63_data *data, unsigned int interval)
data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, i);
}
-static ssize_t show_update_interval(struct device *dev,
+static ssize_t update_interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm63_data *data = dev_get_drvdata(dev);
@@ -678,9 +678,9 @@ static ssize_t show_update_interval(struct device *dev,
return sprintf(buf, "%u\n", data->update_interval);
}
-static ssize_t set_update_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lm63_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -697,16 +697,17 @@ static ssize_t set_update_interval(struct device *dev,
return count;
}
-static ssize_t show_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp2_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm63_data *data = dev_get_drvdata(dev);
return sprintf(buf, data->trutherm ? "1\n" : "2\n");
}
-static ssize_t set_type(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp2_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lm63_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -731,7 +732,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
@@ -753,8 +754,7 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
set_fan, 1);
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1, 0);
-static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
- show_pwm1_enable, set_pwm1_enable);
+static DEVICE_ATTR_RW(pwm1_enable);
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 1);
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp, S_IWUSR | S_IRUGO,
@@ -841,10 +841,9 @@ static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 3);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
set_temp8, 2);
-static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
- set_temp2_crit_hyst);
+static DEVICE_ATTR_RW(temp2_crit_hyst);
-static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type);
+static DEVICE_ATTR_RW(temp2_type);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
@@ -854,10 +853,9 @@ static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
/* Raw alarm file for compatibility */
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
-static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
- set_update_interval);
+static DEVICE_ATTR_RW(update_interval);
static struct attribute *lm63_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 583f883a4cfe..543556dc563b 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -46,6 +46,7 @@
#define LM70_CHIP_TMP121 1 /* TI TMP121/TMP123 */
#define LM70_CHIP_LM71 2 /* NS LM71 */
#define LM70_CHIP_LM74 3 /* NS LM74 */
+#define LM70_CHIP_TMP122 4 /* TI TMP122/TMP124 */
struct lm70 {
struct spi_device *spi;
@@ -54,8 +55,8 @@ struct lm70 {
};
/* sysfs hook function */
-static ssize_t lm70_sense_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm70 *p_lm70 = dev_get_drvdata(dev);
struct spi_device *spi = p_lm70->spi;
@@ -72,7 +73,8 @@ static ssize_t lm70_sense_temp(struct device *dev,
*/
status = spi_write_then_read(spi, NULL, 0, &rxbuf[0], 2);
if (status < 0) {
- pr_warn("spi_write_then_read failed with status %d\n", status);
+ dev_warn(dev, "spi_write_then_read failed with status %d\n",
+ status);
goto out;
}
raw = (rxbuf[0] << 8) + rxbuf[1];
@@ -91,7 +93,7 @@ static ssize_t lm70_sense_temp(struct device *dev,
* Celsius.
* So it's equivalent to multiplying by 0.25 * 1000 = 250.
*
- * LM74 and TMP121/TMP123:
+ * LM74 and TMP121/TMP122/TMP123/TMP124:
* 13 bits of 2's complement data, discard LSB 3 bits,
* resolution 0.0625 degrees celsius.
*
@@ -105,6 +107,7 @@ static ssize_t lm70_sense_temp(struct device *dev,
break;
case LM70_CHIP_TMP121:
+ case LM70_CHIP_TMP122:
case LM70_CHIP_LM74:
val = ((int)raw / 8) * 625 / 10;
break;
@@ -120,7 +123,7 @@ out:
return status;
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, lm70_sense_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
static struct attribute *lm70_attrs[] = {
&dev_attr_temp1_input.attr,
@@ -142,6 +145,10 @@ static const struct of_device_id lm70_of_ids[] = {
.data = (void *) LM70_CHIP_TMP121,
},
{
+ .compatible = "ti,tmp122",
+ .data = (void *) LM70_CHIP_TMP122,
+ },
+ {
.compatible = "ti,lm71",
.data = (void *) LM70_CHIP_LM71,
},
@@ -190,6 +197,7 @@ static int lm70_probe(struct spi_device *spi)
static const struct spi_device_id lm70_ids[] = {
{ "lm70", LM70_CHIP_LM70 },
{ "tmp121", LM70_CHIP_TMP121 },
+ { "tmp122", LM70_CHIP_TMP122 },
{ "lm71", LM70_CHIP_LM71 },
{ "lm74", LM70_CHIP_LM74 },
{ },
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 539efe4ad991..0cb7ff613b80 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -236,22 +236,23 @@ show_in_offset(5);
show_in_offset(6);
/* Temperature */
-static ssize_t show_temp(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp));
}
-static ssize_t show_temp_over(struct device *dev, struct device_attribute *da,
+static ssize_t temp1_max_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_over));
}
-static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t temp1_max_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
struct lm78_data *data = dev_get_drvdata(dev);
long val;
@@ -268,15 +269,16 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t temp1_max_hyst_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_hyst));
}
-static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t temp1_max_hyst_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
struct lm78_data *data = dev_get_drvdata(dev);
long val;
@@ -293,11 +295,9 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
return count;
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
- show_temp_over, set_temp_over);
-static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
- show_temp_hyst, set_temp_hyst);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RW(temp1_max);
+static DEVICE_ATTR_RW(temp1_max_hyst);
/* 3 Fans */
static ssize_t show_fan(struct device *dev, struct device_attribute *da,
@@ -431,22 +431,22 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2);
/* VID */
-static ssize_t show_vid(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, 82));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *da,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
char *buf)
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 4bcd9b882948..08e3945a6fbf 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -432,7 +432,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
return count;
}
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm80_data *data = lm80_update_device(dev);
@@ -505,7 +505,7 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp,
set_temp, t_os_max);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp,
set_temp, t_os_hyst);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 9e4d0e1d3c4b..cbfd0bb7f135 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -188,7 +188,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
return count;
}
-static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct lm83_data *data = lm83_update_device(dev);
@@ -236,7 +236,7 @@ static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 15);
/* Raw alarm file for compatibility */
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static struct attribute *lm83_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 29c8136ce9c5..691469ffa24e 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -604,8 +604,8 @@ show_fan_offset(4);
/* vid, vrm, alarms */
-static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm85_data *data = lm85_update_device(dev);
int vid;
@@ -621,17 +621,17 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", vid);
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
-static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm85_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%ld\n", (long) data->vrm);
}
-static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lm85_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -648,16 +648,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
-static ssize_t show_alarms_reg(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 13cca3606e06..e06faf9d3f0f 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -445,23 +445,23 @@ set_temp(1);
set_temp(2);
set_temp(3);
-static ssize_t show_temp_crit_int(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp1_crit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit_int));
}
-static ssize_t show_temp_crit_ext(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp2_crit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit_ext));
}
-static DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit_int, NULL);
-static DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit_ext, NULL);
-static DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit_ext, NULL);
+static DEVICE_ATTR_RO(temp1_crit);
+static DEVICE_ATTR_RO(temp2_crit);
+static DEVICE_ATTR(temp3_crit, S_IRUGO, temp2_crit_show, NULL);
static ssize_t show_fan_input(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -586,30 +586,30 @@ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
set_fan(1);
set_fan(2);
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm87_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lm87_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -625,16 +625,17 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
data->vrm = val;
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
-static ssize_t show_aout(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t aout_output_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
return sprintf(buf, "%d\n", AOUT_FROM_REG(data->aout));
}
-static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t aout_output_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -651,7 +652,7 @@ static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
+static DEVICE_ATTR_RW(aout_output);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 841f2428e84a..aff5297bc2bc 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -830,7 +830,7 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
}
/* pec used for ADM1032 only */
-static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
+static ssize_t pec_show(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -838,8 +838,8 @@ static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
}
-static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t pec_store(struct device *dev, struct device_attribute *dummy,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
long val;
@@ -863,7 +863,7 @@ static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
return count;
}
-static DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec);
+static DEVICE_ATTR_RW(pec);
static int lm90_get_temp11(struct lm90_data *data, int index)
{
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index cfaf70b9cba7..2a91974a10bb 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -181,8 +181,8 @@ static ssize_t show_temp_hyst(struct device *dev,
- TEMP_FROM_REG(data->temp[t_hyst]));
}
-static ssize_t show_temp_min_hyst(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp1_min_hyst_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[t_min])
@@ -213,7 +213,7 @@ static ssize_t set_temp_hyst(struct device *dev,
return count;
}
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
@@ -235,11 +235,11 @@ static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst,
set_temp_hyst, t_crit);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp,
t_min);
-static DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_temp_min_hyst, NULL);
+static DEVICE_ATTR_RO(temp1_min_hyst);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp,
t_max);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_hyst, NULL, t_max);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 90bb04858117..77a0a83399b3 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2156,7 +2156,7 @@ static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_time, S_IWUSR | S_IRUGO,
show_pwm_auto_spinup_time,
store_pwm_auto_spinup_time, 1);
-static ssize_t show_pwm_auto_prochot_ramp(struct device *dev,
+static ssize_t pwm_auto_prochot_ramp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm93_data *data = lm93_update_device(dev);
@@ -2164,7 +2164,7 @@ static ssize_t show_pwm_auto_prochot_ramp(struct device *dev,
LM93_RAMP_FROM_REG(data->pwm_ramp_ctl >> 4 & 0x0f));
}
-static ssize_t store_pwm_auto_prochot_ramp(struct device *dev,
+static ssize_t pwm_auto_prochot_ramp_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -2186,11 +2186,9 @@ static ssize_t store_pwm_auto_prochot_ramp(struct device *dev,
return count;
}
-static DEVICE_ATTR(pwm_auto_prochot_ramp, S_IRUGO | S_IWUSR,
- show_pwm_auto_prochot_ramp,
- store_pwm_auto_prochot_ramp);
+static DEVICE_ATTR_RW(pwm_auto_prochot_ramp);
-static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev,
+static ssize_t pwm_auto_vrdhot_ramp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm93_data *data = lm93_update_device(dev);
@@ -2198,7 +2196,7 @@ static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev,
LM93_RAMP_FROM_REG(data->pwm_ramp_ctl & 0x0f));
}
-static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev,
+static ssize_t pwm_auto_vrdhot_ramp_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -2220,9 +2218,7 @@ static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev,
return 0;
}
-static DEVICE_ATTR(pwm_auto_vrdhot_ramp, S_IRUGO | S_IWUSR,
- show_pwm_auto_vrdhot_ramp,
- store_pwm_auto_vrdhot_ramp);
+static DEVICE_ATTR_RW(pwm_auto_vrdhot_ramp);
static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2378,7 +2374,7 @@ static SENSOR_DEVICE_ATTR(prochot1_interval, S_IWUSR | S_IRUGO,
static SENSOR_DEVICE_ATTR(prochot2_interval, S_IWUSR | S_IRUGO,
show_prochot_interval, store_prochot_interval, 1);
-static ssize_t show_prochot_override_duty_cycle(struct device *dev,
+static ssize_t prochot_override_duty_cycle_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -2386,7 +2382,7 @@ static ssize_t show_prochot_override_duty_cycle(struct device *dev,
return sprintf(buf, "%d\n", data->prochot_override & 0x0f);
}
-static ssize_t store_prochot_override_duty_cycle(struct device *dev,
+static ssize_t prochot_override_duty_cycle_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -2408,18 +2404,16 @@ static ssize_t store_prochot_override_duty_cycle(struct device *dev,
return count;
}
-static DEVICE_ATTR(prochot_override_duty_cycle, S_IRUGO | S_IWUSR,
- show_prochot_override_duty_cycle,
- store_prochot_override_duty_cycle);
+static DEVICE_ATTR_RW(prochot_override_duty_cycle);
-static ssize_t show_prochot_short(struct device *dev,
+static ssize_t prochot_short_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", (data->config & 0x10) ? 1 : 0);
}
-static ssize_t store_prochot_short(struct device *dev,
+static ssize_t prochot_short_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -2442,8 +2436,7 @@ static ssize_t store_prochot_short(struct device *dev,
return count;
}
-static DEVICE_ATTR(prochot_short, S_IRUGO | S_IWUSR,
- show_prochot_short, store_prochot_short);
+static DEVICE_ATTR_RW(prochot_short);
static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2457,23 +2450,23 @@ static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(vrdhot1, S_IRUGO, show_vrdhot, NULL, 0);
static SENSOR_DEVICE_ATTR(vrdhot2, S_IRUGO, show_vrdhot, NULL, 1);
-static ssize_t show_gpio(struct device *dev, struct device_attribute *attr,
+static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", LM93_GPI_FROM_REG(data->gpi));
}
-static DEVICE_ATTR(gpio, S_IRUGO, show_gpio, NULL);
+static DEVICE_ATTR_RO(gpio);
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", LM93_ALARMS_FROM_REG(data->block1));
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static struct attribute *lm93_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 8796de39ff9b..c7fcc9e7f57a 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -450,8 +450,8 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t update_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm95234_data *data = dev_get_drvdata(dev);
int ret = lm95234_update_device(data);
@@ -463,8 +463,9 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
DIV_ROUND_CLOSEST(data->interval * 1000, HZ));
}
-static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lm95234_data *data = dev_get_drvdata(dev);
int ret = lm95234_update_device(data);
@@ -566,8 +567,7 @@ static SENSOR_DEVICE_ATTR(temp4_offset, S_IWUSR | S_IRUGO, show_offset,
static SENSOR_DEVICE_ATTR(temp5_offset, S_IWUSR | S_IRUGO, show_offset,
set_offset, 3);
-static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
- set_interval);
+static DEVICE_ATTR_RW(update_interval);
static struct attribute *lm95234_common_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 8445c9fd946b..b904cb547ffb 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -215,6 +215,7 @@ static const struct of_device_id ltc4151_match[] = {
{ .compatible = "lltc,ltc4151" },
{},
};
+MODULE_DEVICE_TABLE(of, ltc4151_match);
/* This is the driver that will be inserted */
static struct i2c_driver ltc4151_driver = {
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 303d0c9df907..8ddd4d690652 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(max1111_read_channel);
* likely to be used by hwmon applications to distinguish between
* different devices, explicitly add a name attribute here.
*/
-static ssize_t show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
@@ -125,7 +125,7 @@ static ssize_t show_adc(struct device *dev,
#define MAX1111_ADC_ATTR(_id) \
SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id)
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static MAX1111_ADC_ATTR(0);
static MAX1111_ADC_ATTR(1);
static MAX1111_ADC_ATTR(2);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index eda9cf599685..a18278938494 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -173,7 +173,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
return count;
}
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct max1619_data *data = max1619_update_device(dev);
@@ -199,7 +199,7 @@ static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp, set_temp,
static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp,
set_temp, t_hyst2);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 07628569547a..638567fb7cd8 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -207,8 +207,8 @@ unlock:
return ret;
}
-static ssize_t max197_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
return sprintf(buf, "%s\n", pdev->name);
@@ -231,7 +231,7 @@ static ssize_t max197_show_name(struct device *dev,
&sensor_dev_attr_in##chan##_max.dev_attr.attr, \
&sensor_dev_attr_in##chan##_min.dev_attr.attr
-static DEVICE_ATTR(name, S_IRUGO, max197_show_name, NULL);
+static DEVICE_ATTR_RO(name);
MAX197_SENSOR_DEVICE_ATTR_CH(0);
MAX197_SENSOR_DEVICE_ATTR_CH(1);
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index a993b44ed538..65be4b19fe47 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -270,8 +270,8 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
* controlled.
*/
-static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t fan1_target_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int kscale, ktach, rpm;
@@ -318,8 +318,9 @@ static int max6650_set_target(struct max6650_data *data, unsigned long rpm)
data->speed);
}
-static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan1_target_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct max6650_data *data = dev_get_drvdata(dev);
unsigned long rpm;
@@ -350,8 +351,8 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
* back exactly the value you have set.
*/
-static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t pwm1_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int pwm;
struct max6650_data *data = max6650_update_device(dev);
@@ -371,8 +372,9 @@ static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", pwm);
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t pwm1_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct max6650_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -406,8 +408,8 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
* 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
* 3 = Fan off
*/
-static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t pwm1_enable_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
@@ -416,8 +418,9 @@ static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", sysfs_modes[mode]);
}
-static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t pwm1_enable_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct max6650_data *data = dev_get_drvdata(dev);
unsigned long mode;
@@ -458,16 +461,17 @@ static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
* defined for that. See the data sheet for details.
*/
-static ssize_t get_div(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t fan1_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->count));
}
-static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan1_div_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct max6650_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -534,10 +538,10 @@ static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
-static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target);
-static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div);
-static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable);
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
+static DEVICE_ATTR_RW(fan1_target);
+static DEVICE_ATTR_RW(fan1_div);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RW(pwm1);
static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_MAX);
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL,
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 0c02f40eb0c1..960a1db6f269 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -40,8 +40,8 @@ struct mc13783_adc_priv {
char name[PLATFORM_NAME_SIZE];
};
-static ssize_t mc13783_adc_show_name(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct mc13783_adc_priv *priv = dev_get_drvdata(dev);
@@ -111,7 +111,7 @@ static ssize_t mc13783_adc_read_gp(struct device *dev,
return sprintf(buf, "%u\n", val);
}
-static DEVICE_ATTR(name, S_IRUGO, mc13783_adc_show_name, NULL);
+static DEVICE_ATTR_RO(name);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, mc13783_adc_read_bp, NULL, 2);
static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, mc13783_adc_read_gp, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, mc13783_adc_read_gp, NULL, 6);
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 1929734c3b1d..de886f82101b 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -86,8 +86,8 @@ static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
}
-static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in0_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct mcp3021_data *data = i2c_get_clientdata(client);
@@ -102,7 +102,7 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", in_input);
}
-static DEVICE_ATTR(in0_input, 0444, show_in_input, NULL);
+static DEVICE_ATTR_RO(in0_input);
static int mcp3021_probe(struct i2c_client *client,
const struct i2c_device_id *id)
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 559c596b24f9..8b0bc4fc06e8 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -979,7 +979,7 @@ static const struct sensor_template_group nct6683_pwm_template_group = {
};
static ssize_t
-show_global_beep(struct device *dev, struct device_attribute *attr, char *buf)
+beep_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nct6683_data *data = dev_get_drvdata(dev);
int ret;
@@ -1004,7 +1004,7 @@ error:
}
static ssize_t
-store_global_beep(struct device *dev, struct device_attribute *attr,
+beep_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nct6683_data *data = dev_get_drvdata(dev);
@@ -1039,7 +1039,8 @@ error:
/* Case open detection */
static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+intrusion0_alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct nct6683_data *data = dev_get_drvdata(dev);
int ret;
@@ -1064,8 +1065,8 @@ error:
}
static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+intrusion0_alarm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct nct6683_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -1102,10 +1103,8 @@ error:
return count;
}
-static DEVICE_ATTR(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen);
-static DEVICE_ATTR(beep_enable, S_IWUSR | S_IRUGO, show_global_beep,
- store_global_beep);
+static DEVICE_ATTR_RW(intrusion0_alarm);
+static DEVICE_ATTR_RW(beep_enable);
static struct attribute *nct6683_attributes_other[] = {
&dev_attr_intrusion0_alarm.attr,
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index ce75dd4db7eb..2458b406f6aa 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -3127,14 +3127,14 @@ static const struct sensor_template_group nct6775_pwm_template_group = {
};
static ssize_t
-show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nct6775_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
/* Case open detection */
diff --git a/drivers/hwmon/nsa320-hwmon.c b/drivers/hwmon/nsa320-hwmon.c
index 0517a265741f..5a16109cdea8 100644
--- a/drivers/hwmon/nsa320-hwmon.c
+++ b/drivers/hwmon/nsa320-hwmon.c
@@ -122,8 +122,8 @@ static ssize_t show_label(struct device *dev,
return sprintf(buf, "%s\n", nsa320_input_names[channel]);
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
s32 mcu_data = nsa320_hwmon_update(dev);
@@ -133,8 +133,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (mcu_data & 0xffff) * 100);
}
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
s32 mcu_data = nsa320_hwmon_update(dev);
@@ -145,9 +145,9 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
}
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, NSA320_TEMP);
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR_RO(temp1_input);
static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, show_label, NULL, NSA320_FAN);
-static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
+static DEVICE_ATTR_RO(fan1_input);
static struct attribute *nsa320_attrs[] = {
&sensor_dev_attr_temp1_label.dev_attr.attr,
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index d50fbf93a737..7e3697727537 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -589,22 +589,22 @@ static struct sensor_device_attribute in_max_alarm[] = {
&in_min_alarm[X].dev_attr.attr, \
&in_max_alarm[X].dev_attr.attr
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pc87360_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->vrm);
}
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pc87360_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -620,15 +620,15 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
data->vrm = val;
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
-static ssize_t show_in_alarms(struct device *dev,
+static ssize_t alarms_in_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
return sprintf(buf, "%u\n", data->in_alarms);
}
-static DEVICE_ATTR(alarms_in, S_IRUGO, show_in_alarms, NULL);
+static DEVICE_ATTR_RO(alarms_in);
static struct attribute *pc8736x_vin_attr_array[] = {
VIN_UNIT_ATTRS(0),
@@ -1006,14 +1006,14 @@ static struct sensor_device_attribute temp_crit[] = {
show_temp_crit, set_temp_crit, 2),
};
-static ssize_t show_temp_alarms(struct device *dev,
+static ssize_t alarms_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
return sprintf(buf, "%u\n", data->temp_alarms);
}
-static DEVICE_ATTR(alarms_temp, S_IRUGO, show_temp_alarms, NULL);
+static DEVICE_ATTR_RO(alarms_temp);
/*
* show_temp_min/max_alarm() reads data from the per-channel status
@@ -1106,14 +1106,14 @@ static const struct attribute_group pc8736x_temp_attr_group[] = {
{ .attrs = pc8736x_temp_attr[2] }
};
-static ssize_t show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct pc87360_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
/*
* Device detection, registration and update
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index cb9fdd37bd0d..dc5a9d5ada51 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -943,14 +943,14 @@ static const struct attribute_group pc87427_group_temp[6] = {
{ .attrs = pc87427_attributes_temp[5] },
};
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct pc87427_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
/*
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 5740888c6242..60e25c85e71c 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -103,16 +103,16 @@ show_in_channel(1);
show_in_channel(2);
show_in_channel(3);
-static ssize_t show_out0_ouput(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t out0_output_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pcf8591_data *data = i2c_get_clientdata(to_i2c_client(dev));
return sprintf(buf, "%d\n", data->aout * 10);
}
-static ssize_t set_out0_output(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t out0_output_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned long val;
struct i2c_client *client = to_i2c_client(dev);
@@ -132,19 +132,18 @@ static ssize_t set_out0_output(struct device *dev,
return count;
}
-static DEVICE_ATTR(out0_output, S_IWUSR | S_IRUGO,
- show_out0_ouput, set_out0_output);
+static DEVICE_ATTR_RW(out0_output);
-static ssize_t show_out0_enable(struct device *dev,
+static ssize_t out0_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcf8591_data *data = i2c_get_clientdata(to_i2c_client(dev));
return sprintf(buf, "%u\n", !(!(data->control & PCF8591_CONTROL_AOEF)));
}
-static ssize_t set_out0_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t out0_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct pcf8591_data *data = i2c_get_clientdata(client);
@@ -165,8 +164,7 @@ static ssize_t set_out0_enable(struct device *dev,
return count;
}
-static DEVICE_ATTR(out0_enable, S_IWUSR | S_IRUGO,
- show_out0_enable, set_out0_enable);
+static DEVICE_ATTR_RW(out0_enable);
static struct attribute *pcf8591_attributes[] = {
&dev_attr_out0_enable.attr,
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 19f85c0da270..91544f2312e6 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -205,7 +205,7 @@ static int reg_to_rpm(u16 reg)
return 5400540 / reg;
}
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME);
@@ -326,7 +326,7 @@ static ssize_t show_in_label(struct device *dev, struct device_attribute
SCH5627_IN_LABELS[attr->index]);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 68c350c704fb..bda3d5285586 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -28,7 +28,6 @@
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
-#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include "sch56xx-common.h"
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index a2fdbb7d20ed..e4d642b673c6 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/bitrev.h>
+#include <linux/of_gpio.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
@@ -769,7 +770,7 @@ static ssize_t sht15_show_humidity(struct device *dev,
return ret ? ret : sprintf(buf, "%d\n", sht15_calc_humid(data));
}
-static ssize_t show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -787,7 +788,7 @@ static SENSOR_DEVICE_ATTR(humidity1_fault, S_IRUGO, sht15_show_status, NULL,
SHT15_STATUS_LOW_BATTERY);
static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR, sht15_show_status,
sht15_store_heater, SHT15_STATUS_HEATER);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *sht15_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_humidity1_input.dev_attr.attr,
@@ -911,6 +912,54 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
return NOTIFY_OK;
}
+#ifdef CONFIG_OF
+static const struct of_device_id sht15_dt_match[] = {
+ { .compatible = "sensirion,sht15" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sht15_dt_match);
+
+/*
+ * This function returns NULL if pdev isn't a device instatiated by dt,
+ * a pointer to pdata if it could successfully get all information
+ * from dt or a negative ERR_PTR() on error.
+ */
+static struct sht15_platform_data *sht15_probe_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct sht15_platform_data *pdata;
+
+ /* no device tree device */
+ if (!np)
+ return NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->gpio_data = of_get_named_gpio(np, "data-gpios", 0);
+ if (pdata->gpio_data < 0) {
+ if (pdata->gpio_data != -EPROBE_DEFER)
+ dev_err(dev, "data-gpios not found\n");
+ return ERR_PTR(pdata->gpio_data);
+ }
+
+ pdata->gpio_sck = of_get_named_gpio(np, "clk-gpios", 0);
+ if (pdata->gpio_sck < 0) {
+ if (pdata->gpio_sck != -EPROBE_DEFER)
+ dev_err(dev, "clk-gpios not found\n");
+ return ERR_PTR(pdata->gpio_sck);
+ }
+
+ return pdata;
+}
+#else
+static inline struct sht15_platform_data *sht15_probe_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int sht15_probe(struct platform_device *pdev)
{
int ret;
@@ -928,11 +977,17 @@ static int sht15_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
init_waitqueue_head(&data->wait_queue);
- if (dev_get_platdata(&pdev->dev) == NULL) {
- dev_err(&pdev->dev, "no platform data supplied\n");
- return -EINVAL;
+ data->pdata = sht15_probe_dt(&pdev->dev);
+ if (IS_ERR(data->pdata))
+ return PTR_ERR(data->pdata);
+ if (data->pdata == NULL) {
+ data->pdata = dev_get_platdata(&pdev->dev);
+ if (data->pdata == NULL) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
}
- data->pdata = dev_get_platdata(&pdev->dev);
+
data->supply_uv = data->pdata->supply_mv * 1000;
if (data->pdata->checksum)
data->checksumming = true;
@@ -1075,6 +1130,7 @@ MODULE_DEVICE_TABLE(platform, sht15_device_ids);
static struct platform_driver sht15_driver = {
.driver = {
.name = "sht15",
+ .of_match_table = of_match_ptr(sht15_dt_match),
},
.probe = sht15_probe,
.remove = sht15_remove,
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 84cdb1cf0fb4..06706d288355 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -34,23 +34,29 @@
/* I2C command bytes */
#define SHT21_TRIG_T_MEASUREMENT_HM 0xe3
#define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5
+#define SHT21_READ_SNB_CMD1 0xFA
+#define SHT21_READ_SNB_CMD2 0x0F
+#define SHT21_READ_SNAC_CMD1 0xFC
+#define SHT21_READ_SNAC_CMD2 0xC9
/**
* struct sht21 - SHT21 device specific data
* @hwmon_dev: device registered with hwmon
* @lock: mutex to protect measurement values
- * @valid: only 0 before first measurement is taken
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
* @humidity: cached humidity measurement value
+ * @valid: only 0 before first measurement is taken
+ * @eic: cached electronic identification code text
*/
struct sht21 {
struct i2c_client *client;
struct mutex lock;
- char valid;
unsigned long last_update;
int temperature;
int humidity;
+ char valid;
+ char eic[18];
};
/**
@@ -165,15 +171,97 @@ static ssize_t sht21_show_humidity(struct device *dev,
return sprintf(buf, "%d\n", sht21->humidity);
}
+static ssize_t eic_read(struct sht21 *sht21)
+{
+ struct i2c_client *client = sht21->client;
+ u8 tx[2];
+ u8 rx[8];
+ u8 eic[8];
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = tx,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 8,
+ .buf = rx,
+ },
+ };
+ int ret;
+
+ tx[0] = SHT21_READ_SNB_CMD1;
+ tx[1] = SHT21_READ_SNB_CMD2;
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 0)
+ goto out;
+ eic[2] = rx[0];
+ eic[3] = rx[2];
+ eic[4] = rx[4];
+ eic[5] = rx[6];
+
+ tx[0] = SHT21_READ_SNAC_CMD1;
+ tx[1] = SHT21_READ_SNAC_CMD2;
+ msgs[1].len = 6;
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 0)
+ goto out;
+ eic[0] = rx[3];
+ eic[1] = rx[4];
+ eic[6] = rx[0];
+ eic[7] = rx[1];
+
+ ret = snprintf(sht21->eic, sizeof(sht21->eic),
+ "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ eic[0], eic[1], eic[2], eic[3],
+ eic[4], eic[5], eic[6], eic[7]);
+out:
+ if (ret < 0)
+ sht21->eic[0] = 0;
+
+ return ret;
+}
+
+/**
+ * eic_show() - show Electronic Identification Code in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where EIC is written
+ *
+ * Will be called on read access to eic sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t eic_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht21 *sht21 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = sizeof(sht21->eic) - 1;
+ mutex_lock(&sht21->lock);
+ if (!sht21->eic[0])
+ ret = eic_read(sht21);
+ if (ret > 0)
+ memcpy(buf, sht21->eic, ret);
+ mutex_unlock(&sht21->lock);
+ return ret;
+}
+
/* sysfs attributes */
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature,
NULL, 0);
static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity,
NULL, 0);
+static DEVICE_ATTR_RO(eic);
static struct attribute *sht21_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_humidity1_input.dev_attr.attr,
+ &dev_attr_eic.attr,
NULL
};
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 45a028fb8851..6d789aab54c9 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -304,22 +304,23 @@ show_in_offset(3);
show_in_offset(4);
/* Temperature */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp));
}
-static ssize_t show_temp_over(struct device *dev, struct device_attribute *attr,
+static ssize_t temp1_max_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_over));
}
-static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp1_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
long val;
@@ -336,15 +337,16 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp1_max_hyst_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_hyst));
}
-static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp1_max_hyst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
long val;
@@ -361,11 +363,9 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
- show_temp_over, set_temp_over);
-static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
- show_temp_hyst, set_temp_hyst);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RW(temp1_max);
+static DEVICE_ATTR_RW(temp1_max_hyst);
/* 2 Fans */
static ssize_t show_fan(struct device *dev, struct device_attribute *da,
@@ -492,13 +492,13 @@ show_fan_offset(1);
show_fan_offset(2);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
char *buf)
@@ -516,13 +516,13 @@ static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 15);
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sis5595_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *sis5595_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 5d323186d2c1..c7b6a425e2c0 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -264,8 +264,8 @@ static ssize_t get_pwm_en(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", PWM_EN_FROM_REG(data->pwm[attr->index]));
}
-static ssize_t get_alarms(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t alarms_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%d\n", data->alarms);
@@ -440,16 +440,16 @@ fan_present(1);
fan_present(2);
fan_present(3);
-static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct smsc47m1_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *smsc47m1_attributes_fan1[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 15650f247679..6989408033ec 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -400,23 +400,23 @@ show_temp_index(2)
show_temp_index(3)
/* VID */
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
-static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct smsc47m192_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
-static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct smsc47m192_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -431,7 +431,7 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
data->vrm = val;
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR_RW(vrm);
/* Alarms */
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
new file mode 100644
index 000000000000..55450680fb58
--- /dev/null
+++ b/drivers/hwmon/stts751.c
@@ -0,0 +1,834 @@
+/*
+ * STTS751 sensor driver
+ *
+ * Copyright (C) 2016-2017 Istituto Italiano di Tecnologia - RBCS - EDL
+ * Robotics, Brain and Cognitive Sciences department
+ * Electronic Design Laboratory
+ *
+ * Written by Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on LM95241 driver and LM90 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/util_macros.h>
+
+#define DEVNAME "stts751"
+
+static const unsigned short normal_i2c[] = {
+ 0x48, 0x49, 0x38, 0x39, /* STTS751-0 */
+ 0x4A, 0x4B, 0x3A, 0x3B, /* STTS751-1 */
+ I2C_CLIENT_END };
+
+#define STTS751_REG_TEMP_H 0x00
+#define STTS751_REG_STATUS 0x01
+#define STTS751_STATUS_TRIPT BIT(0)
+#define STTS751_STATUS_TRIPL BIT(5)
+#define STTS751_STATUS_TRIPH BIT(6)
+#define STTS751_REG_TEMP_L 0x02
+#define STTS751_REG_CONF 0x03
+#define STTS751_CONF_RES_MASK 0x0C
+#define STTS751_CONF_RES_SHIFT 2
+#define STTS751_CONF_EVENT_DIS BIT(7)
+#define STTS751_CONF_STOP BIT(6)
+#define STTS751_REG_RATE 0x04
+#define STTS751_REG_HLIM_H 0x05
+#define STTS751_REG_HLIM_L 0x06
+#define STTS751_REG_LLIM_H 0x07
+#define STTS751_REG_LLIM_L 0x08
+#define STTS751_REG_TLIM 0x20
+#define STTS751_REG_HYST 0x21
+#define STTS751_REG_SMBUS_TO 0x22
+
+#define STTS751_REG_PROD_ID 0xFD
+#define STTS751_REG_MAN_ID 0xFE
+#define STTS751_REG_REV_ID 0xFF
+
+#define STTS751_0_PROD_ID 0x00
+#define STTS751_1_PROD_ID 0x01
+#define ST_MAN_ID 0x53
+
+/*
+ * Possible update intervals are (in mS):
+ * 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 62.5, 31.25
+ * However we are not going to complicate things too much and we stick to the
+ * approx value in mS.
+ */
+static const int stts751_intervals[] = {
+ 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 63, 31
+};
+
+static const struct i2c_device_id stts751_id[] = {
+ { "stts751", 0 },
+ { }
+};
+
+struct stts751_priv {
+ struct device *dev;
+ struct i2c_client *client;
+ struct mutex access_lock;
+ u8 interval;
+ int res;
+ int event_max, event_min;
+ int therm;
+ int hyst;
+ bool smbus_timeout;
+ int temp;
+ unsigned long last_update, last_alert_update;
+ u8 config;
+ bool min_alert, max_alert, therm_trip;
+ bool data_valid, alert_valid;
+ bool notify_max, notify_min;
+};
+
+/*
+ * These functions converts temperature from HW format to integer format and
+ * vice-vers. They are (mostly) taken from lm90 driver. Unit is in mC.
+ */
+static int stts751_to_deg(s16 hw_val)
+{
+ return hw_val * 125 / 32;
+}
+
+static s32 stts751_to_hw(int val)
+{
+ return DIV_ROUND_CLOSEST(val, 125) * 32;
+}
+
+static int stts751_adjust_resolution(struct stts751_priv *priv)
+{
+ u8 res;
+
+ switch (priv->interval) {
+ case 9:
+ /* 10 bits */
+ res = 0;
+ break;
+ case 8:
+ /* 11 bits */
+ res = 1;
+ break;
+ default:
+ /* 12 bits */
+ res = 3;
+ break;
+ }
+
+ if (priv->res == res)
+ return 0;
+
+ priv->config &= ~STTS751_CONF_RES_MASK;
+ priv->config |= res << STTS751_CONF_RES_SHIFT;
+ dev_dbg(&priv->client->dev, "setting res %d. config %x",
+ res, priv->config);
+ priv->res = res;
+
+ return i2c_smbus_write_byte_data(priv->client,
+ STTS751_REG_CONF, priv->config);
+}
+
+static int stts751_update_temp(struct stts751_priv *priv)
+{
+ s32 integer1, integer2, frac;
+
+ /*
+ * There is a trick here, like in the lm90 driver. We have to read two
+ * registers to get the sensor temperature, but we have to beware a
+ * conversion could occur between the readings. We could use the
+ * one-shot conversion register, but we don't want to do this (disables
+ * hardware monitoring). So the solution used here is to read the high
+ * byte once, then the low byte, then the high byte again. If the new
+ * high byte matches the old one, then we have a valid reading. Else we
+ * have to read the low byte again, and now we believe we have a correct
+ * reading.
+ */
+ integer1 = i2c_smbus_read_byte_data(priv->client, STTS751_REG_TEMP_H);
+ if (integer1 < 0) {
+ dev_dbg(&priv->client->dev,
+ "I2C read failed (temp H). ret: %x\n", integer1);
+ return integer1;
+ }
+
+ frac = i2c_smbus_read_byte_data(priv->client, STTS751_REG_TEMP_L);
+ if (frac < 0) {
+ dev_dbg(&priv->client->dev,
+ "I2C read failed (temp L). ret: %x\n", frac);
+ return frac;
+ }
+
+ integer2 = i2c_smbus_read_byte_data(priv->client, STTS751_REG_TEMP_H);
+ if (integer2 < 0) {
+ dev_dbg(&priv->client->dev,
+ "I2C 2nd read failed (temp H). ret: %x\n", integer2);
+ return integer2;
+ }
+
+ if (integer1 != integer2) {
+ frac = i2c_smbus_read_byte_data(priv->client,
+ STTS751_REG_TEMP_L);
+ if (frac < 0) {
+ dev_dbg(&priv->client->dev,
+ "I2C 2nd read failed (temp L). ret: %x\n",
+ frac);
+ return frac;
+ }
+ }
+
+ priv->temp = stts751_to_deg((integer1 << 8) | frac);
+ return 0;
+}
+
+static int stts751_set_temp_reg16(struct stts751_priv *priv, int temp,
+ u8 hreg, u8 lreg)
+{
+ s32 hwval;
+ int ret;
+
+ hwval = stts751_to_hw(temp);
+
+ ret = i2c_smbus_write_byte_data(priv->client, hreg, hwval >> 8);
+ if (ret)
+ return ret;
+
+ return i2c_smbus_write_byte_data(priv->client, lreg, hwval & 0xff);
+}
+
+static int stts751_set_temp_reg8(struct stts751_priv *priv, int temp, u8 reg)
+{
+ s32 hwval;
+
+ hwval = stts751_to_hw(temp);
+ return i2c_smbus_write_byte_data(priv->client, reg, hwval >> 8);
+}
+
+static int stts751_read_reg16(struct stts751_priv *priv, int *temp,
+ u8 hreg, u8 lreg)
+{
+ int integer, frac;
+
+ integer = i2c_smbus_read_byte_data(priv->client, hreg);
+ if (integer < 0)
+ return integer;
+
+ frac = i2c_smbus_read_byte_data(priv->client, lreg);
+ if (frac < 0)
+ return frac;
+
+ *temp = stts751_to_deg((integer << 8) | frac);
+
+ return 0;
+}
+
+static int stts751_read_reg8(struct stts751_priv *priv, int *temp, u8 reg)
+{
+ int integer;
+
+ integer = i2c_smbus_read_byte_data(priv->client, reg);
+ if (integer < 0)
+ return integer;
+
+ *temp = stts751_to_deg(integer << 8);
+
+ return 0;
+}
+
+/*
+ * Update alert flags without waiting for cache to expire. We detects alerts
+ * immediately for the sake of the alert handler; we still need to deal with
+ * caching to workaround the fact that alarm flags int the status register,
+ * despite what the datasheet claims, gets always cleared on read.
+ */
+static int stts751_update_alert(struct stts751_priv *priv)
+{
+ int ret;
+ bool conv_done;
+ int cache_time = msecs_to_jiffies(stts751_intervals[priv->interval]);
+
+ /*
+ * Add another 10% because if we run faster than the HW conversion
+ * rate we will end up in reporting incorrectly alarms.
+ */
+ cache_time += cache_time / 10;
+
+ ret = i2c_smbus_read_byte_data(priv->client, STTS751_REG_STATUS);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&priv->client->dev, "status reg %x\n", ret);
+ conv_done = ret & (STTS751_STATUS_TRIPH | STTS751_STATUS_TRIPL);
+ /*
+ * Reset the cache if the cache time expired, or if we are sure
+ * we have valid data from a device conversion, or if we know
+ * our cache has been never written.
+ *
+ * Note that when the cache has been never written the point is
+ * to correctly initialize the timestamp, rather than clearing
+ * the cache values.
+ *
+ * Note that updating the cache timestamp when we get an alarm flag
+ * is required, otherwise we could incorrectly report alarms to be zero.
+ */
+ if (time_after(jiffies, priv->last_alert_update + cache_time) ||
+ conv_done || !priv->alert_valid) {
+ priv->max_alert = false;
+ priv->min_alert = false;
+ priv->alert_valid = true;
+ priv->last_alert_update = jiffies;
+ dev_dbg(&priv->client->dev, "invalidating alert cache\n");
+ }
+
+ priv->max_alert |= !!(ret & STTS751_STATUS_TRIPH);
+ priv->min_alert |= !!(ret & STTS751_STATUS_TRIPL);
+ priv->therm_trip = !!(ret & STTS751_STATUS_TRIPT);
+
+ dev_dbg(&priv->client->dev, "max_alert: %d, min_alert: %d, therm_trip: %d\n",
+ priv->max_alert, priv->min_alert, priv->therm_trip);
+
+ return 0;
+}
+
+static void stts751_alert(struct i2c_client *client,
+ enum i2c_alert_protocol type, unsigned int data)
+{
+ int ret;
+ struct stts751_priv *priv = i2c_get_clientdata(client);
+
+ if (type != I2C_PROTOCOL_SMBUS_ALERT)
+ return;
+
+ dev_dbg(&client->dev, "alert!");
+
+ mutex_lock(&priv->access_lock);
+ ret = stts751_update_alert(priv);
+ if (ret < 0) {
+ /* default to worst case */
+ priv->max_alert = true;
+ priv->min_alert = true;
+
+ dev_warn(priv->dev,
+ "Alert received, but can't communicate to the device. Triggering all alarms!");
+ }
+
+ if (priv->max_alert) {
+ if (priv->notify_max)
+ dev_notice(priv->dev, "got alert for HIGH temperature");
+ priv->notify_max = false;
+
+ /* unblock alert poll */
+ sysfs_notify(&priv->dev->kobj, NULL, "temp1_max_alarm");
+ }
+
+ if (priv->min_alert) {
+ if (priv->notify_min)
+ dev_notice(priv->dev, "got alert for LOW temperature");
+ priv->notify_min = false;
+
+ /* unblock alert poll */
+ sysfs_notify(&priv->dev->kobj, NULL, "temp1_min_alarm");
+ }
+
+ if (priv->min_alert || priv->max_alert)
+ kobject_uevent(&priv->dev->kobj, KOBJ_CHANGE);
+
+ mutex_unlock(&priv->access_lock);
+}
+
+static int stts751_update(struct stts751_priv *priv)
+{
+ int ret;
+ int cache_time = msecs_to_jiffies(stts751_intervals[priv->interval]);
+
+ if (time_after(jiffies, priv->last_update + cache_time) ||
+ !priv->data_valid) {
+ ret = stts751_update_temp(priv);
+ if (ret)
+ return ret;
+
+ ret = stts751_update_alert(priv);
+ if (ret)
+ return ret;
+ priv->data_valid = true;
+ priv->last_update = jiffies;
+ }
+
+ return 0;
+}
+
+static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->access_lock);
+ ret = stts751_update(priv);
+ if (!ret)
+ priv->notify_max = true;
+ mutex_unlock(&priv->access_lock);
+ if (ret < 0)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->max_alert);
+}
+
+static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->access_lock);
+ ret = stts751_update(priv);
+ if (!ret)
+ priv->notify_min = true;
+ mutex_unlock(&priv->access_lock);
+ if (ret < 0)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->min_alert);
+}
+
+static ssize_t show_input(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->access_lock);
+ ret = stts751_update(priv);
+ mutex_unlock(&priv->access_lock);
+ if (ret < 0)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->temp);
+}
+
+static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm);
+}
+
+static ssize_t set_therm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long temp;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ if (kstrtol(buf, 10, &temp) < 0)
+ return -EINVAL;
+
+ /* HW works in range -64C to +127.937C */
+ temp = clamp_val(temp, -64000, 127937);
+ mutex_lock(&priv->access_lock);
+ ret = stts751_set_temp_reg8(priv, temp, STTS751_REG_TLIM);
+ if (ret)
+ goto exit;
+
+ dev_dbg(&priv->client->dev, "setting therm %ld", temp);
+
+ /*
+ * hysteresis reg is relative to therm, so the HW does not need to be
+ * adjusted, we need to update our local copy only.
+ */
+ priv->hyst = temp - (priv->therm - priv->hyst);
+ priv->therm = temp;
+
+exit:
+ mutex_unlock(&priv->access_lock);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->hyst);
+}
+
+static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long temp;
+
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ if (kstrtol(buf, 10, &temp) < 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->access_lock);
+ /* HW works in range -64C to +127.937C */
+ temp = clamp_val(temp, -64000, priv->therm);
+ priv->hyst = temp;
+ dev_dbg(&priv->client->dev, "setting hyst %ld", temp);
+ temp = priv->therm - temp;
+ ret = stts751_set_temp_reg8(priv, temp, STTS751_REG_HYST);
+ mutex_unlock(&priv->access_lock);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_therm_trip(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->access_lock);
+ ret = stts751_update(priv);
+ mutex_unlock(&priv->access_lock);
+ if (ret < 0)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm_trip);
+}
+
+static ssize_t show_max(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_max);
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long temp;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ if (kstrtol(buf, 10, &temp) < 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->access_lock);
+ /* HW works in range -64C to +127.937C */
+ temp = clamp_val(temp, priv->event_min, 127937);
+ ret = stts751_set_temp_reg16(priv, temp,
+ STTS751_REG_HLIM_H, STTS751_REG_HLIM_L);
+ if (ret)
+ goto exit;
+
+ dev_dbg(&priv->client->dev, "setting event max %ld", temp);
+ priv->event_max = temp;
+ ret = count;
+exit:
+ mutex_unlock(&priv->access_lock);
+ return ret;
+}
+
+static ssize_t show_min(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_min);
+}
+
+static ssize_t set_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long temp;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ if (kstrtol(buf, 10, &temp) < 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->access_lock);
+ /* HW works in range -64C to +127.937C */
+ temp = clamp_val(temp, -64000, priv->event_max);
+ ret = stts751_set_temp_reg16(priv, temp,
+ STTS751_REG_LLIM_H, STTS751_REG_LLIM_L);
+ if (ret)
+ goto exit;
+
+ dev_dbg(&priv->client->dev, "setting event min %ld", temp);
+ priv->event_min = temp;
+ ret = count;
+exit:
+ mutex_unlock(&priv->access_lock);
+ return ret;
+}
+
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ stts751_intervals[priv->interval]);
+}
+
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int idx;
+ int ret = count;
+ struct stts751_priv *priv = dev_get_drvdata(dev);
+
+ if (kstrtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ idx = find_closest_descending(val, stts751_intervals,
+ ARRAY_SIZE(stts751_intervals));
+
+ dev_dbg(&priv->client->dev, "setting interval. req:%lu, idx: %d, val: %d",
+ val, idx, stts751_intervals[idx]);
+
+ mutex_lock(&priv->access_lock);
+ if (priv->interval == idx)
+ goto exit;
+
+ /*
+ * In early development stages I've become suspicious about the chip
+ * starting to misbehave if I ever set, even briefly, an invalid
+ * configuration. While I'm not sure this is really needed, be
+ * conservative and set rate/resolution in such an order that avoids
+ * passing through an invalid configuration.
+ */
+
+ /* speed up: lower the resolution, then modify convrate */
+ if (priv->interval < idx) {
+ dev_dbg(&priv->client->dev, "lower resolution, then modify convrate");
+ priv->interval = idx;
+ ret = stts751_adjust_resolution(priv);
+ if (ret)
+ goto exit;
+ }
+
+ ret = i2c_smbus_write_byte_data(priv->client, STTS751_REG_RATE, idx);
+ if (ret)
+ goto exit;
+ /* slow down: modify convrate, then raise resolution */
+ if (priv->interval != idx) {
+ dev_dbg(&priv->client->dev, "modify convrate, then raise resolution");
+ priv->interval = idx;
+ ret = stts751_adjust_resolution(priv);
+ if (ret)
+ goto exit;
+ }
+ ret = count;
+exit:
+ mutex_unlock(&priv->access_lock);
+
+ return ret;
+}
+
+static int stts751_detect(struct i2c_client *new_client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = new_client->adapter;
+ const char *name;
+ int tmp;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_MAN_ID);
+ if (tmp != ST_MAN_ID)
+ return -ENODEV;
+
+ /* lower temperaure registers always have bits 0-3 set to zero */
+ tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_TEMP_L);
+ if (tmp & 0xf)
+ return -ENODEV;
+
+ tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_HLIM_L);
+ if (tmp & 0xf)
+ return -ENODEV;
+
+ tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_LLIM_L);
+ if (tmp & 0xf)
+ return -ENODEV;
+
+ /* smbus timeout register always have bits 0-7 set to zero */
+ tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_SMBUS_TO);
+ if (tmp & 0x7f)
+ return -ENODEV;
+
+ tmp = i2c_smbus_read_byte_data(new_client, STTS751_REG_PROD_ID);
+
+ switch (tmp) {
+ case STTS751_0_PROD_ID:
+ name = "STTS751-0";
+ break;
+ case STTS751_1_PROD_ID:
+ name = "STTS751-1";
+ break;
+ default:
+ return -ENODEV;
+ }
+ dev_dbg(&new_client->dev, "Chip %s detected", name);
+
+ strlcpy(info->type, stts751_id[0].name, I2C_NAME_SIZE);
+ return 0;
+}
+
+static int stts751_read_chip_config(struct stts751_priv *priv)
+{
+ int ret;
+ int tmp;
+
+ ret = i2c_smbus_read_byte_data(priv->client, STTS751_REG_CONF);
+ if (ret < 0)
+ return ret;
+ priv->config = ret;
+ priv->res = (ret & STTS751_CONF_RES_MASK) >> STTS751_CONF_RES_SHIFT;
+
+ ret = i2c_smbus_read_byte_data(priv->client, STTS751_REG_RATE);
+ if (ret < 0)
+ return ret;
+ priv->interval = ret;
+
+ ret = stts751_read_reg16(priv, &priv->event_max,
+ STTS751_REG_HLIM_H, STTS751_REG_HLIM_L);
+ if (ret)
+ return ret;
+
+ ret = stts751_read_reg16(priv, &priv->event_min,
+ STTS751_REG_LLIM_H, STTS751_REG_LLIM_L);
+ if (ret)
+ return ret;
+
+ ret = stts751_read_reg8(priv, &priv->therm, STTS751_REG_TLIM);
+ if (ret)
+ return ret;
+
+ ret = stts751_read_reg8(priv, &tmp, STTS751_REG_HYST);
+ if (ret)
+ return ret;
+ priv->hyst = priv->therm - tmp;
+
+ return 0;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, 0644, show_min, set_min, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, 0644, show_max, set_max, 0);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, 0444, show_min_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, 0444, show_max_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, 0644, show_therm, set_therm, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, 0644, show_hyst, set_hyst, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, 0444, show_therm_trip, NULL, 0);
+static SENSOR_DEVICE_ATTR(update_interval, 0644,
+ show_interval, set_interval, 0);
+
+static struct attribute *stts751_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(stts751);
+
+static int stts751_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct stts751_priv *priv;
+ int ret;
+ bool smbus_nto;
+ int rev_id;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ priv->notify_max = true;
+ priv->notify_min = true;
+ i2c_set_clientdata(client, priv);
+ mutex_init(&priv->access_lock);
+
+ if (device_property_present(&client->dev,
+ "smbus-timeout-disable")) {
+ smbus_nto = device_property_read_bool(&client->dev,
+ "smbus-timeout-disable");
+
+ ret = i2c_smbus_write_byte_data(client, STTS751_REG_SMBUS_TO,
+ smbus_nto ? 0 : 0x80);
+ if (ret)
+ return ret;
+ }
+
+ rev_id = i2c_smbus_read_byte_data(client, STTS751_REG_REV_ID);
+ if (rev_id < 0)
+ return -ENODEV;
+ if (rev_id != 0x1) {
+ dev_dbg(&client->dev, "Chip revision 0x%x is untested\n",
+ rev_id);
+ }
+
+ ret = stts751_read_chip_config(priv);
+ if (ret)
+ return ret;
+
+ priv->config &= ~(STTS751_CONF_STOP | STTS751_CONF_EVENT_DIS);
+ ret = i2c_smbus_write_byte_data(client, STTS751_REG_CONF, priv->config);
+ if (ret)
+ return ret;
+
+ priv->dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, priv,
+ stts751_groups);
+ return PTR_ERR_OR_ZERO(priv->dev);
+}
+
+MODULE_DEVICE_TABLE(i2c, stts751_id);
+
+static struct i2c_driver stts751_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVNAME,
+ },
+ .probe = stts751_probe,
+ .id_table = stts751_id,
+ .detect = stts751_detect,
+ .alert = stts751_alert,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(stts751_driver);
+
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
+MODULE_DESCRIPTION("STTS751 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index eeeed2c7d081..1f2d13dc9439 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -82,16 +82,6 @@ static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
{ 0, 0x11 }, /* offset */
};
-static const u8 TMP401_TEMP_LSB[7][2] = {
- { 0x15, 0x10 }, /* temp */
- { 0x17, 0x14 }, /* low limit */
- { 0x16, 0x13 }, /* high limit */
- { 0, 0 }, /* therm (crit) limit (unused) */
- { 0x31, 0x35 }, /* lowest */
- { 0x33, 0x37 }, /* highest */
- { 0, 0x12 }, /* offset */
-};
-
static const u8 TMP432_TEMP_MSB_READ[4][3] = {
{ 0x00, 0x01, 0x23 }, /* temp */
{ 0x06, 0x08, 0x16 }, /* low limit */
@@ -106,12 +96,6 @@ static const u8 TMP432_TEMP_MSB_WRITE[4][3] = {
{ 0x20, 0x19, 0x1A }, /* therm (crit) limit */
};
-static const u8 TMP432_TEMP_LSB[3][3] = {
- { 0x29, 0x10, 0x24 }, /* temp */
- { 0x3E, 0x14, 0x18 }, /* low limit */
- { 0x3D, 0x13, 0x17 }, /* high limit */
-};
-
/* [0] = fault, [1] = low, [2] = high, [3] = therm/crit */
static const u8 TMP432_STATUS_REG[] = {
0x1b, 0x36, 0x35, 0x37 };
@@ -213,25 +197,20 @@ static int tmp401_update_device_reg16(struct i2c_client *client,
for (i = 0; i < num_sensors; i++) { /* local / r1 / r2 */
for (j = 0; j < num_regs; j++) { /* temp / low / ... */
u8 regaddr;
- /*
- * High byte must be read first immediately followed
- * by the low byte
- */
+
regaddr = data->kind == tmp432 ?
TMP432_TEMP_MSB_READ[j][i] :
TMP401_TEMP_MSB_READ[j][i];
- val = i2c_smbus_read_byte_data(client, regaddr);
- if (val < 0)
- return val;
- data->temp[j][i] = val << 8;
- if (j == 3) /* crit is msb only */
- continue;
- regaddr = data->kind == tmp432 ? TMP432_TEMP_LSB[j][i]
- : TMP401_TEMP_LSB[j][i];
- val = i2c_smbus_read_byte_data(client, regaddr);
+ if (j == 3) { /* crit is msb only */
+ val = i2c_smbus_read_byte_data(client, regaddr);
+ } else {
+ val = i2c_smbus_read_word_swapped(client,
+ regaddr);
+ }
if (val < 0)
return val;
- data->temp[j][i] |= val;
+
+ data->temp[j][i] = j == 3 ? val << 8 : val;
}
}
return 0;
@@ -373,11 +352,11 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *devattr,
regaddr = data->kind == tmp432 ? TMP432_TEMP_MSB_WRITE[nr][index]
: TMP401_TEMP_MSB_WRITE[nr][index];
- i2c_smbus_write_byte_data(client, regaddr, reg >> 8);
- if (nr != 3) {
- regaddr = data->kind == tmp432 ? TMP432_TEMP_LSB[nr][index]
- : TMP401_TEMP_LSB[nr][index];
- i2c_smbus_write_byte_data(client, regaddr, reg & 0xFF);
+ if (nr == 3) { /* crit is msb only */
+ i2c_smbus_write_byte_data(client, regaddr, reg >> 8);
+ } else {
+ /* Hardware expects big endian data --> use _swapped */
+ i2c_smbus_write_word_swapped(client, regaddr, reg);
}
data->temp[nr][index] = reg;
@@ -449,7 +428,7 @@ static ssize_t reset_temp_history(struct device *dev,
return count;
}
-static ssize_t show_update_interval(struct device *dev,
+static ssize_t update_interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tmp401_data *data = dev_get_drvdata(dev);
@@ -457,9 +436,9 @@ static ssize_t show_update_interval(struct device *dev,
return sprintf(buf, "%u\n", data->update_interval);
}
-static ssize_t set_update_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t update_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct tmp401_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -521,8 +500,7 @@ static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, show_status, NULL,
static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, show_status, NULL,
3, TMP432_STATUS_REMOTE1);
-static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
- set_update_interval);
+static DEVICE_ATTR_RW(update_interval);
static struct attribute *tmp401_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index d1f209a5feac..07a0cb0a1f28 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -88,8 +88,8 @@ static ssize_t show_temp(struct device *dev,
return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
}
-static ssize_t show_cpu_vid(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct via_cputemp_data *data = dev_get_drvdata(dev);
u32 eax, edx;
@@ -119,7 +119,7 @@ static const struct attribute_group via_cputemp_group = {
};
/* Optional attributes */
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_cpu_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
static int via_cputemp_probe(struct platform_device *pdev)
{
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 40dd93c8f9f4..81f35e3a06b8 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -580,14 +580,14 @@ show_fan_offset(1);
show_fan_offset(2);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct via686a_data *data = via686a_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -607,13 +607,13 @@ static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 15);
static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct via686a_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *via686a_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index cb69a8c2ed5b..367b5eb53fb6 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -263,8 +263,8 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
}
/* Special case for input 5 as this has 3.3V scaling built into the chip */
-static ssize_t show_in5(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in5_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct vt8231_data *data = vt8231_update_device(dev);
@@ -272,7 +272,7 @@ static ssize_t show_in5(struct device *dev, struct device_attribute *attr,
(((data->in[5] - 3) * 10000 * 54) / (958 * 34)));
}
-static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr,
+static ssize_t in5_min_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt8231_data *data = vt8231_update_device(dev);
@@ -281,7 +281,7 @@ static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr,
(((data->in_min[5] - 3) * 10000 * 54) / (958 * 34)));
}
-static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr,
+static ssize_t in5_max_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt8231_data *data = vt8231_update_device(dev);
@@ -290,8 +290,9 @@ static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr,
(((data->in_max[5] - 3) * 10000 * 54) / (958 * 34)));
}
-static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in5_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct vt8231_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -309,8 +310,9 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in5_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct vt8231_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -342,34 +344,35 @@ define_voltage_sysfs(2);
define_voltage_sysfs(3);
define_voltage_sysfs(4);
-static DEVICE_ATTR(in5_input, S_IRUGO, show_in5, NULL);
-static DEVICE_ATTR(in5_min, S_IRUGO | S_IWUSR, show_in5_min, set_in5_min);
-static DEVICE_ATTR(in5_max, S_IRUGO | S_IWUSR, show_in5_max, set_in5_max);
+static DEVICE_ATTR_RO(in5_input);
+static DEVICE_ATTR_RW(in5_min);
+static DEVICE_ATTR_RW(in5_max);
/* Temperatures */
-static ssize_t show_temp0(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%d\n", data->temp[0] * 250);
}
-static ssize_t show_temp0_max(struct device *dev, struct device_attribute *attr,
+static ssize_t temp1_max_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[0] * 1000);
}
-static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp1_max_hyst_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%d\n", data->temp_min[0] * 1000);
}
-static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp1_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct vt8231_data *data = dev_get_drvdata(dev);
long val;
@@ -385,8 +388,9 @@ static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp1_max_hyst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct vt8231_data *data = dev_get_drvdata(dev);
long val;
@@ -481,10 +485,9 @@ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
show_temp_min, set_temp_min, offset - 1)
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp0, NULL);
-static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp0_max, set_temp0_max);
-static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp0_min,
- set_temp0_min);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RW(temp1_max);
+static DEVICE_ATTR_RW(temp1_max_hyst);
define_temperature_sysfs(2);
define_temperature_sysfs(3);
@@ -603,13 +606,13 @@ define_fan_sysfs(1);
define_fan_sysfs(2);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -633,13 +636,13 @@ static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static ssize_t show_name(struct device *dev, struct device_attribute
+static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct vt8231_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct attribute *vt8231_attributes_temps[6][5] = {
{
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 697007afb99c..ab346ed142de 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1687,14 +1687,14 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
fan_time_functions(fan_stop_time, FAN_STOP_TIME)
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
@@ -1754,12 +1754,12 @@ static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
};
static ssize_t
-show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
/* Case open detection */
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 721295b9a051..8ac89d0781cc 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -575,26 +575,30 @@ static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg)
return sprintf(buf,"%ld\n", in0);
}
-static ssize_t show_regs_in_0(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in0_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
return show_in_0(data, buf, data->in[0]);
}
-static ssize_t show_regs_in_min0(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in0_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
return show_in_0(data, buf, data->in_min[0]);
}
-static ssize_t show_regs_in_max0(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in0_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
return show_in_0(data, buf, data->in_max[0]);
}
-static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in0_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -622,8 +626,9 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a
return count;
}
-static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in0_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -651,11 +656,9 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a
return count;
}
-static DEVICE_ATTR(in0_input, S_IRUGO, show_regs_in_0, NULL);
-static DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR,
- show_regs_in_min0, store_regs_in_min0);
-static DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR,
- show_regs_in_max0, store_regs_in_max0);
+static DEVICE_ATTR_RO(in0_input);
+static DEVICE_ATTR_RW(in0_min);
+static DEVICE_ATTR_RW(in0_max);
static ssize_t
show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
@@ -796,21 +799,22 @@ sysfs_temp_decl(2);
sysfs_temp_decl(3);
static ssize_t
-show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
static ssize_t
-show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%ld\n", (long) data->vrm);
}
static ssize_t
-store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -826,15 +830,15 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
static ssize_t
-show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long) data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t
show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
@@ -860,7 +864,7 @@ static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
static ssize_t
-show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf)
+beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n",
@@ -868,7 +872,7 @@ show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-store_beep_mask(struct device *dev, struct device_attribute *attr,
+beep_mask_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
@@ -895,8 +899,7 @@ store_beep_mask(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR,
- show_beep_mask, store_beep_mask);
+static DEVICE_ATTR_RW(beep_mask);
static ssize_t
show_beep(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1264,13 +1267,13 @@ sysfs_temp_type(2);
sysfs_temp_type(3);
static ssize_t
-show_name(struct device *dev, struct device_attribute *devattr, char *buf)
+name_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static int __init w83627hf_find(int sioaddr, unsigned short *addr,
struct w83627hf_sio_data *sio_data)
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 54848fdd181e..246fb2365126 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -416,24 +416,24 @@ sysfs_temp_offsets(2);
sysfs_temp_offsets(3);
static ssize_t
-show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf)
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83781d_data *data = w83781d_update_device(dev);
return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
static ssize_t
-show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83781d_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%ld\n", (long) data->vrm);
}
static ssize_t
-store_vrm_reg(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct w83781d_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -447,16 +447,16 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
static ssize_t
-show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83781d_data *data = w83781d_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -491,7 +491,7 @@ static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_temp3_alarm, NULL, 0);
-static ssize_t show_beep_mask(struct device *dev,
+static ssize_t beep_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83781d_data *data = w83781d_update_device(dev);
@@ -500,7 +500,7 @@ static ssize_t show_beep_mask(struct device *dev,
}
static ssize_t
-store_beep_mask(struct device *dev, struct device_attribute *attr,
+beep_mask_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83781d_data *data = dev_get_drvdata(dev);
@@ -527,8 +527,7 @@ store_beep_mask(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR,
- show_beep_mask, store_beep_mask);
+static DEVICE_ATTR_RW(beep_mask);
static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -708,7 +707,7 @@ show_pwm(struct device *dev, struct device_attribute *da, char *buf)
}
static ssize_t
-show_pwm2_enable(struct device *dev, struct device_attribute *da, char *buf)
+pwm2_enable_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct w83781d_data *data = w83781d_update_device(dev);
return sprintf(buf, "%d\n", (int)data->pwm2_enable);
@@ -736,7 +735,7 @@ store_pwm(struct device *dev, struct device_attribute *da, const char *buf,
}
static ssize_t
-store_pwm2_enable(struct device *dev, struct device_attribute *da,
+pwm2_enable_store(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct w83781d_data *data = dev_get_drvdata(dev);
@@ -778,8 +777,7 @@ static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 2);
static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 3);
/* only PWM2 can be enabled/disabled */
-static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
- show_pwm2_enable, store_pwm2_enable);
+static DEVICE_ATTR_RW(pwm2_enable);
static ssize_t
show_sensor(struct device *dev, struct device_attribute *da, char *buf)
@@ -1616,12 +1614,12 @@ static unsigned short isa_address = 0x290;
* we must create it by ourselves.
*/
static ssize_t
-show_name(struct device *dev, struct device_attribute *devattr, char *buf)
+name_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct w83781d_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static struct w83781d_data *w83781d_data_if_isa(void)
{
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 001df856913f..8af6081b4ab4 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1041,14 +1041,14 @@ static struct sensor_device_attribute sda_temp_alarm[] = {
};
/* get realtime status of all sensors items: voltage, temp, fan */
-static ssize_t show_alarms_reg(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
/* Beep control */
@@ -1147,25 +1147,24 @@ static struct sensor_device_attribute sda_beep_ctrl[] = {
};
/* cpu voltage regulation information */
-static ssize_t show_vid_reg(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cpu0_vid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static DEVICE_ATTR_RO(cpu0_vid);
-static ssize_t show_vrm_reg(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83791d_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
-static ssize_t store_vrm_reg(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct w83791d_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -1188,7 +1187,7 @@ static ssize_t store_vrm_reg(struct device *dev,
return count;
}
-static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
+static DEVICE_ATTR_RW(vrm);
#define IN_UNIT_ATTRS(X) \
&sda_in_input[X].dev_attr.attr, \
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 0a8bce726b4b..d764602d70db 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -578,7 +578,7 @@ static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
/* get realtime status of all sensors items: voltage, temp, fan */
static ssize_t
-show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
@@ -735,16 +735,16 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-show_chassis_clear(struct device *dev, struct device_attribute *attr,
- char *buf)
+intrusion0_alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", data->chassis);
}
static ssize_t
-store_chassis_clear(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+intrusion0_alarm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
@@ -1047,7 +1047,7 @@ static SENSOR_DEVICE_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 0, 4);
static SENSOR_DEVICE_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 1, 4);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
+static DEVICE_ATTR_RO(alarms);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 2);
@@ -1067,8 +1067,7 @@ static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20);
static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21);
static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22);
static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23);
-static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
- show_chassis_clear, store_chassis_clear);
+static DEVICE_ATTR_RW(intrusion0_alarm);
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 816aa6caf5d5..dab5c515d5a3 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -324,7 +324,7 @@ static struct i2c_driver w83793_driver = {
};
static ssize_t
-show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
@@ -342,7 +342,7 @@ show_vid(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-store_vrm(struct device *dev, struct device_attribute *attr,
+vrm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83793_data *data = dev_get_drvdata(dev);
@@ -1169,7 +1169,7 @@ static struct sensor_device_attribute_2 w83793_vid[] = {
SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
};
-static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
+static DEVICE_ATTR_RW(vrm);
static struct sensor_device_attribute_2 sda_single_files[] = {
SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 686971263bef..45d6771fac8c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
}
- ret = i2c_add_adapter(&id->adap);
- if (ret < 0)
- goto err_clk_dis;
-
/*
* Cadence I2C controller has a bug wherein it generates
* invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
*/
cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+ ret = i2c_add_adapter(&id->adap);
+ if (ret < 0)
+ goto err_clk_dis;
+
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 6d81c56184d3..e9db857c6226 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -475,30 +475,28 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
- u32 ic_tar = 0;
+ u32 ic_con, ic_tar = 0;
/* Disable the adapter */
__i2c_dw_enable_and_wait(dev, false);
/* if the slave address is ten bit address, enable 10BITADDR */
- if (dev->dynamic_tar_update_enabled) {
+ ic_con = dw_readl(dev, DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
/*
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
- * mode has to be enabled via bit 12 of IC_TAR register,
- * otherwise bit 4 of IC_CON is used.
+ * mode has to be enabled via bit 12 of IC_TAR register.
+ * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+ * detected from registers.
*/
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
- ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+ ic_tar = DW_IC_TAR_10BITADDR_MASTER;
} else {
- u32 ic_con = dw_readl(dev, DW_IC_CON);
-
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
- ic_con |= DW_IC_CON_10BITADDR_MASTER;
- else
- ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
- dw_writel(dev, ic_con, DW_IC_CON);
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
}
+ dw_writel(dev, ic_con, DW_IC_CON);
+
/*
* Set the slave (target) address and enable 10-bit addressing mode
* if applicable.
@@ -963,7 +961,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
int r;
- u32 reg;
init_completion(&dev->cmd_complete);
@@ -971,26 +968,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
if (r)
return r;
- r = i2c_dw_acquire_lock(dev);
- if (r)
- return r;
-
- /*
- * Test if dynamic TAR update is enabled in this controller by writing
- * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
- * field is read-only so it should not succeed
- */
- reg = dw_readl(dev, DW_IC_CON);
- dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
-
- if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
- (reg & DW_IC_CON_10BITADDR_MASTER)) {
- dev->dynamic_tar_update_enabled = true;
- dev_dbg(dev->dev, "Dynamic TAR update enabled");
- }
-
- i2c_dw_release_lock(dev);
-
snprintf(adap->name, sizeof(adap->name),
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 26250b425e2f..c1db3a5a340f 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -125,7 +125,6 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_runtime_disabled;
- bool dynamic_tar_update_enabled;
};
#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index c62b7cd475f8..3310f2e0dbd3 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int lpi2c_imx_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int lpi2c_imx_resume(struct device *dev)
+{
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume);
+
static struct platform_driver lpi2c_imx_driver = {
.probe = lpi2c_imx_probe,
.remove = lpi2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = lpi2c_imx_of_match,
+ .pm = &imx_lpi2c_pm,
},
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e34d82e79b98..c21ca7bf2efe 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -58,7 +58,7 @@
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
-#define SMBIOSIZE 8
+#define SMBIOSIZE 9
/* PCI Address Constants */
#define SMBBA 0x090
@@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
u8 port;
int retval;
+ mutex_lock(&piix4_mutex_sb800);
+
/* Request the SMBUS semaphore, avoid conflicts with the IMC */
smbslvcnt = inb_p(SMBSLVCNT);
do {
@@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
usleep_range(1000, 2000);
} while (--retries);
/* SMBus is still owned by the IMC, we give up */
- if (!retries)
+ if (!retries) {
+ mutex_unlock(&piix4_mutex_sb800);
return -EBUSY;
-
- mutex_lock(&piix4_mutex_sb800);
+ }
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
- mutex_unlock(&piix4_mutex_sb800);
-
/* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+ mutex_unlock(&piix4_mutex_sb800);
+
return retval;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 583e95042a21..bfb6ba7cac00 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -221,7 +221,8 @@ static int i2c_acpi_get_info(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
- strlcpy(info->type, dev_name(&adev->dev), sizeof(info->type));
+ acpi_set_modalias(adev, dev_name(&adev->dev), info->type,
+ sizeof(info->type));
return 0;
}
@@ -1335,15 +1336,29 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.fwnode = info->fwnode;
i2c_dev_set_name(adap, client);
+
+ if (info->properties) {
+ status = device_add_properties(&client->dev, info->properties);
+ if (status) {
+ dev_err(&adap->dev,
+ "Failed to add properties to client %s: %d\n",
+ client->name, status);
+ goto out_err;
+ }
+ }
+
status = device_register(&client->dev);
if (status)
- goto out_err;
+ goto out_free_props;
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
return client;
+out_free_props:
+ if (info->properties)
+ device_remove_properties(&client->dev);
out_err:
dev_err(&adap->dev,
"Failed to register i2c client %s at 0x%02x (%d)\n",
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 39ea67f9b066..c99a25c075bc 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -10,6 +10,7 @@ menuconfig IDE
tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)"
depends on HAVE_IDE
depends on BLOCK
+ select BLK_SCSI_REQUEST
---help---
If you say Y here, your kernel will be able to manage ATA/(E)IDE and
ATAPI units. The most common cases are IDE hard drives and ATAPI
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index f90ea221f7f2..feb30061123b 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -92,8 +92,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
struct request *rq;
int error;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = (char *)pc;
if (buf && bufflen) {
@@ -103,9 +104,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
goto put_req;
}
- memcpy(rq->cmd, pc->c, 12);
+ memcpy(scsi_req(rq)->cmd, pc->c, 12);
if (drive->media == ide_tape)
- rq->cmd[13] = REQ_IDETAPE_PC1;
+ scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
error = blk_execute_rq(drive->queue, disk, rq, 0);
put_req:
blk_put_request(rq);
@@ -171,7 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
void ide_prep_sense(ide_drive_t *drive, struct request *rq)
{
struct request_sense *sense = &drive->sense_data;
- struct request *sense_rq = &drive->sense_rq;
+ struct request *sense_rq = drive->sense_rq;
+ struct scsi_request *req = scsi_req(sense_rq);
unsigned int cmd_len, sense_len;
int err;
@@ -191,12 +193,13 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
BUG_ON(sense_len > sizeof(*sense));
- if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed)
+ if (ata_sense_request(rq) || drive->sense_rq_armed)
return;
memset(sense, 0, sizeof(*sense));
blk_rq_init(rq->q, sense_rq);
+ scsi_req_init(sense_rq);
err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
GFP_NOIO);
@@ -208,13 +211,14 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
}
sense_rq->rq_disk = rq->rq_disk;
- sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
- sense_rq->cmd[4] = cmd_len;
- sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
+ sense_rq->cmd_flags = REQ_OP_DRV_IN;
+ ide_req(sense_rq)->type = ATA_PRIV_SENSE;
sense_rq->rq_flags |= RQF_PREEMPT;
+ req->cmd[0] = GPCMD_REQUEST_SENSE;
+ req->cmd[4] = cmd_len;
if (drive->media == ide_tape)
- sense_rq->cmd[13] = REQ_IDETAPE_PC1;
+ req->cmd[13] = REQ_IDETAPE_PC1;
drive->sense_rq_armed = true;
}
@@ -229,12 +233,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
return -ENOMEM;
}
- drive->sense_rq.special = special;
+ drive->sense_rq->special = special;
drive->sense_rq_armed = false;
drive->hwif->rq = NULL;
- elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
+ elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
@@ -247,14 +251,14 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
void ide_retry_pc(ide_drive_t *drive)
{
struct request *failed_rq = drive->hwif->rq;
- struct request *sense_rq = &drive->sense_rq;
+ struct request *sense_rq = drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
(void)ide_read_error(drive);
/* init pc from sense_rq */
ide_init_pc(pc);
- memcpy(pc->c, sense_rq->cmd, 12);
+ memcpy(pc->c, scsi_req(sense_rq)->cmd, 12);
if (drive->media == ide_tape)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
@@ -286,7 +290,7 @@ int ide_cd_expiry(ide_drive_t *drive)
* commands/drives support that. Let ide_timer_expiry keep polling us
* for these.
*/
- switch (rq->cmd[0]) {
+ switch (scsi_req(rq)->cmd[0]) {
case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK:
@@ -297,7 +301,7 @@ int ide_cd_expiry(ide_drive_t *drive)
default:
if (!(rq->rq_flags & RQF_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
- rq->cmd[0]);
+ scsi_req(rq)->cmd[0]);
wait = 0;
break;
}
@@ -307,15 +311,21 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
int ide_cd_get_xferlen(struct request *rq)
{
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
return 32768;
- case REQ_TYPE_ATA_SENSE:
- case REQ_TYPE_BLOCK_PC:
- case REQ_TYPE_ATA_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
return blk_rq_bytes(rq);
- default:
- return 0;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ switch (ide_req(rq)->type) {
+ case ATA_PRIV_PC:
+ case ATA_PRIV_SENSE:
+ return blk_rq_bytes(rq);
+ default:
+ return 0;
+ }
}
}
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
@@ -374,7 +384,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
drive->name, __func__, ireason);
}
- if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
+ if (dev_is_idecd(drive) && ata_pc_request(rq))
rq->rq_flags |= RQF_FAILED;
return 1;
@@ -420,7 +430,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
? "write" : "read");
pc->flags |= PC_FLAG_DMA_ERROR;
} else
- rq->resid_len = 0;
+ scsi_req(rq)->resid_len = 0;
debug_log("%s: DMA finished\n", drive->name);
}
@@ -436,7 +446,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
local_irq_enable_in_hardirq();
if (drive->media == ide_tape &&
- (stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE)
+ (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
stat &= ~ATA_ERR;
if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
@@ -446,7 +456,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (drive->media != ide_tape)
pc->rq->errors++;
- if (rq->cmd[0] == REQUEST_SENSE) {
+ if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
printk(KERN_ERR PFX "%s: I/O error in request "
"sense command\n", drive->name);
return ide_do_reset(drive);
@@ -477,12 +487,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (uptodate == 0)
drive->failed_pc = NULL;
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
+ if (ata_misc_request(rq)) {
rq->errors = 0;
error = 0;
} else {
- if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
+ if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
if (rq->errors == 0)
rq->errors = -EIO;
}
@@ -512,7 +522,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pio_bytes(drive, cmd, write, done);
/* Update transferred byte count */
- rq->resid_len -= done;
+ scsi_req(rq)->resid_len -= done;
bcount -= done;
@@ -520,7 +530,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pad_transfer(drive, write, bcount);
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
- rq->cmd[0], done, bcount, rq->resid_len);
+ rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
/* And set the interrupt handler again */
ide_set_handler(drive, ide_pc_intr, timeout);
@@ -603,7 +613,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
if (dev_is_idecd(drive)) {
/* ATAPI commands get padded out to 12 bytes minimum */
- cmd_len = COMMAND_SIZE(rq->cmd[0]);
+ cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
if (cmd_len < ATAPI_MIN_CDB_BYTES)
cmd_len = ATAPI_MIN_CDB_BYTES;
@@ -650,7 +660,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
/* Send the actual packet */
if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
- hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
+ hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
/* Begin DMA, if necessary */
if (dev_is_idecd(drive)) {
@@ -695,7 +705,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
bytes, 63 * 1024));
/* We haven't transferred any data yet */
- rq->resid_len = bcount;
+ scsi_req(rq)->resid_len = bcount;
if (pc->flags & PC_FLAG_DMA_ERROR) {
pc->flags &= ~PC_FLAG_DMA_ERROR;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 9cbd217bc0c9..aef00511ca86 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -121,7 +121,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
* don't log START_STOP unit with LoEj set, since we cannot
* reliably check if drive can auto-close
*/
- if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
+ if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
break;
log = 1;
break;
@@ -163,7 +163,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
* toc has not been recorded yet, it will fail with 05/24/00 (which is a
* confusing error)
*/
- if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
+ if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
if (sense->sense_key == 0x05 && sense->asc == 0x24)
return;
@@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
if (!sense->valid)
break;
if (failed_command == NULL ||
- failed_command->cmd_type != REQ_TYPE_FS)
+ blk_rq_is_passthrough(failed_command))
break;
sector = (sense->information[0] << 24) |
(sense->information[1] << 16) |
@@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
{
/*
- * For REQ_TYPE_ATA_SENSE, "rq->special" points to the original
+ * For ATA_PRIV_SENSE, "rq->special" points to the original
* failed request. Also, the sense data should be read
* directly from rq which might be different from the original
* sense buffer if it got copied during mapping.
@@ -219,15 +219,12 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
void *sense = bio_data(rq->bio);
if (failed) {
- if (failed->sense) {
- /*
- * Sense is always read into drive->sense_data.
- * Copy back if the failed request has its
- * sense pointer set.
- */
- memcpy(failed->sense, sense, 18);
- failed->sense_len = rq->sense_len;
- }
+ /*
+ * Sense is always read into drive->sense_data, copy back to the
+ * original request.
+ */
+ memcpy(scsi_req(failed)->sense, sense, 18);
+ scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
cdrom_analyze_sense_data(drive, failed);
if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
@@ -285,7 +282,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
"stat 0x%x",
rq->cmd[0], rq->cmd_type, err, stat);
- if (rq->cmd_type == REQ_TYPE_ATA_SENSE) {
+ if (ata_sense_request(rq)) {
/*
* We got an error trying to get sense info from the drive
* (probably while trying to recover from a former error).
@@ -296,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
}
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
+ if (blk_rq_is_scsi(rq) && !rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION;
if (blk_noretry_request(rq))
@@ -304,13 +301,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
switch (sense_key) {
case NOT_READY:
- if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
+ if (req_op(rq) == REQ_OP_WRITE) {
if (ide_cd_breathe(drive, rq))
return 1;
} else {
cdrom_saw_media_change(drive);
- if (rq->cmd_type == REQ_TYPE_FS &&
+ if (!blk_rq_is_passthrough(rq) &&
!(rq->rq_flags & RQF_QUIET))
printk(KERN_ERR PFX "%s: tray open\n",
drive->name);
@@ -320,7 +317,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
case UNIT_ATTENTION:
cdrom_saw_media_change(drive);
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
return 0;
/*
@@ -338,7 +335,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
*
* cdrom_log_sense() knows this!
*/
- if (rq->cmd[0] == GPCMD_START_STOP_UNIT)
+ if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
break;
/* fall-through */
case DATA_PROTECT:
@@ -368,7 +365,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
do_end_request = 1;
break;
default:
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
break;
if (err & ~ATA_ABORTED) {
/* go to the default handler for other errors */
@@ -379,7 +376,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
do_end_request = 1;
}
- if (rq->cmd_type != REQ_TYPE_FS) {
+ if (blk_rq_is_passthrough(rq)) {
rq->rq_flags |= RQF_FAILED;
do_end_request = 1;
}
@@ -414,7 +411,7 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
* Some of the trailing request sense fields are optional,
* and some drives don't send them. Sigh.
*/
- if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
+ if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
cmd->nleft > 0 && cmd->nleft <= 5)
cmd->nleft = 0;
}
@@ -425,12 +422,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
- struct request_sense local_sense;
int retries = 10;
- req_flags_t flags = 0;
-
- if (!sense)
- sense = &local_sense;
+ bool failed;
ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
"rq_flags: 0x%x",
@@ -440,12 +433,13 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
do {
struct request *rq;
int error;
+ bool delay = false;
- rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
-
- memcpy(rq->cmd, cmd, BLK_MAX_CDB);
- rq->cmd_type = REQ_TYPE_ATA_PC;
- rq->sense = sense;
+ rq = blk_get_request(drive->queue,
+ write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
+ ide_req(rq)->type = ATA_PRIV_PC;
rq->rq_flags |= rq_flags;
rq->timeout = timeout;
if (buffer) {
@@ -460,21 +454,21 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
error = blk_execute_rq(drive->queue, info->disk, rq, 0);
if (buffer)
- *bufflen = rq->resid_len;
-
- flags = rq->rq_flags;
- blk_put_request(rq);
+ *bufflen = scsi_req(rq)->resid_len;
+ if (sense)
+ memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
/*
* FIXME: we should probably abort/retry or something in case of
* failure.
*/
- if (flags & RQF_FAILED) {
+ failed = (rq->rq_flags & RQF_FAILED) != 0;
+ if (failed) {
/*
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
*/
- struct request_sense *reqbuf = sense;
+ struct request_sense *reqbuf = scsi_req(rq)->sense;
if (reqbuf->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive);
@@ -485,19 +479,20 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
* a disk. Retry, but wait a little to give
* the drive time to complete the load.
*/
- ssleep(2);
+ delay = true;
} else {
/* otherwise, don't retry */
retries = 0;
}
--retries;
}
-
- /* end of retry loop */
- } while ((flags & RQF_FAILED) && retries >= 0);
+ blk_put_request(rq);
+ if (delay)
+ ssleep(2);
+ } while (failed && retries >= 0);
/* return an error if the command failed */
- return (flags & RQF_FAILED) ? -EIO : 0;
+ return failed ? -EIO : 0;
}
/*
@@ -526,7 +521,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0;
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
- int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE);
+ int sense = ata_sense_request(rq);
unsigned int timeout;
u16 len;
u8 ireason, stat;
@@ -569,7 +564,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
ide_read_bcount_and_ireason(drive, &len, &ireason);
- thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
+ thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
if (thislen > len)
thislen = len;
@@ -578,7 +573,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
/* If DRQ is clear, the command has completed. */
if ((stat & ATA_DRQ) == 0) {
- if (rq->cmd_type == REQ_TYPE_FS) {
+ switch (req_op(rq)) {
+ default:
/*
* If we're not done reading/writing, complain.
* Otherwise, complete the command normally.
@@ -592,7 +588,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
rq->rq_flags |= RQF_FAILED;
uptodate = 0;
}
- } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
+ goto out_end;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
ide_cd_request_sense_fixup(drive, cmd);
uptodate = cmd->nleft ? 0 : 1;
@@ -608,8 +606,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (!uptodate)
rq->rq_flags |= RQF_FAILED;
+ goto out_end;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ goto out_end;
}
- goto out_end;
}
rc = ide_check_ireason(drive, rq, len, ireason, write);
@@ -636,12 +637,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
len -= blen;
if (sense && write == 0)
- rq->sense_len += blen;
+ scsi_req(rq)->sense_len += blen;
}
/* pad, if necessary */
if (len > 0) {
- if (rq->cmd_type != REQ_TYPE_FS || write == 0)
+ if (blk_rq_is_passthrough(rq) || write == 0)
ide_pad_transfer(drive, write, len);
else {
printk(KERN_ERR PFX "%s: confused, missing data\n",
@@ -650,12 +651,18 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
}
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ switch (req_op(rq)) {
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
timeout = rq->timeout;
- } else {
+ break;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ expiry = ide_cd_expiry;
+ /*FALLTHRU*/
+ default:
timeout = ATAPI_WAIT_PC;
- if (rq->cmd_type != REQ_TYPE_FS)
- expiry = ide_cd_expiry;
+ break;
}
hwif->expiry = expiry;
@@ -663,15 +670,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
return ide_started;
out_end:
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
- rq->resid_len = 0;
+ if (blk_rq_is_scsi(rq) && rc == 0) {
+ scsi_req(rq)->resid_len = 0;
blk_end_request_all(rq, 0);
hwif->rq = NULL;
} else {
if (sense && uptodate)
ide_cd_complete_failed_rq(drive, rq);
- if (rq->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(rq)) {
if (cmd->nleft == 0)
uptodate = 1;
} else {
@@ -684,10 +691,10 @@ out_end:
return ide_stopped;
/* make sure it's fully ended */
- if (rq->cmd_type != REQ_TYPE_FS) {
- rq->resid_len -= cmd->nbytes - cmd->nleft;
+ if (blk_rq_is_passthrough(rq)) {
+ scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
- rq->resid_len += cmd->last_xfer_len;
+ scsi_req(rq)->resid_len += cmd->last_xfer_len;
}
ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
@@ -744,7 +751,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
rq->cmd[0], rq->cmd_type);
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ if (blk_rq_is_scsi(rq))
rq->rq_flags |= RQF_QUIET;
else
rq->rq_flags &= ~RQF_FAILED;
@@ -786,25 +793,31 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
if (drive->debug_mask & IDE_DBG_RQ)
blk_dump_rq_flags(rq, "ide_cd_do_request");
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
if (cdrom_start_rw(drive, rq) == ide_stopped)
goto out_end;
break;
- case REQ_TYPE_ATA_SENSE:
- case REQ_TYPE_BLOCK_PC:
- case REQ_TYPE_ATA_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ handle_pc:
if (!rq->timeout)
rq->timeout = ATAPI_WAIT_PC;
-
cdrom_do_block_pc(drive, rq);
break;
- case REQ_TYPE_DRV_PRIV:
- /* right now this can only be a reset... */
- uptodate = 1;
- goto out_end;
- default:
- BUG();
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ switch (ide_req(rq)->type) {
+ case ATA_PRIV_MISC:
+ /* right now this can only be a reset... */
+ uptodate = 1;
+ goto out_end;
+ case ATA_PRIV_SENSE:
+ case ATA_PRIV_PC:
+ goto handle_pc;
+ default:
+ BUG();
+ }
}
/* prepare sense request for this command */
@@ -817,7 +830,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq;
- if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
+ if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
@@ -1166,7 +1179,7 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \
CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM)
-static struct cdrom_device_ops ide_cdrom_dops = {
+static const struct cdrom_device_ops ide_cdrom_dops = {
.open = ide_cdrom_open_real,
.release = ide_cdrom_release_real,
.drive_status = ide_cdrom_drive_status,
@@ -1312,28 +1325,29 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
int hard_sect = queue_logical_block_size(q);
long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
+ struct scsi_request *req = scsi_req(rq);
- memset(rq->cmd, 0, BLK_MAX_CDB);
+ memset(req->cmd, 0, BLK_MAX_CDB);
if (rq_data_dir(rq) == READ)
- rq->cmd[0] = GPCMD_READ_10;
+ req->cmd[0] = GPCMD_READ_10;
else
- rq->cmd[0] = GPCMD_WRITE_10;
+ req->cmd[0] = GPCMD_WRITE_10;
/*
* fill in lba
*/
- rq->cmd[2] = (block >> 24) & 0xff;
- rq->cmd[3] = (block >> 16) & 0xff;
- rq->cmd[4] = (block >> 8) & 0xff;
- rq->cmd[5] = block & 0xff;
+ req->cmd[2] = (block >> 24) & 0xff;
+ req->cmd[3] = (block >> 16) & 0xff;
+ req->cmd[4] = (block >> 8) & 0xff;
+ req->cmd[5] = block & 0xff;
/*
* and transfer length
*/
- rq->cmd[7] = (blocks >> 8) & 0xff;
- rq->cmd[8] = blocks & 0xff;
- rq->cmd_len = 10;
+ req->cmd[7] = (blocks >> 8) & 0xff;
+ req->cmd[8] = blocks & 0xff;
+ req->cmd_len = 10;
return BLKPREP_OK;
}
@@ -1343,7 +1357,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
*/
static int ide_cdrom_prep_pc(struct request *rq)
{
- u8 *c = rq->cmd;
+ u8 *c = scsi_req(rq)->cmd;
/* transform 6-byte read/write commands to the 10-byte version */
if (c[0] == READ_6 || c[0] == WRITE_6) {
@@ -1354,7 +1368,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
c[2] = 0;
c[1] &= 0xe0;
c[0] += (READ_10 - READ_6);
- rq->cmd_len = 10;
+ scsi_req(rq)->cmd_len = 10;
return BLKPREP_OK;
}
@@ -1373,9 +1387,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(rq))
return ide_cdrom_prep_fs(q, rq);
- else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ else if (blk_rq_is_scsi(rq))
return ide_cdrom_prep_pc(rq);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index f085e3a2e1d6..9fcefbc8425e 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -303,8 +303,9 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
struct request *rq;
int ret;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq);
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index f079ca2f260b..58a6feb74c02 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -315,12 +315,12 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
while (hi > lo) {
mid = (lo + hi) / 2;
if (packet_command_texts[mid].packet_command ==
- failed_command->cmd[0]) {
+ scsi_req(failed_command)->cmd[0]) {
s = packet_command_texts[mid].text;
break;
}
if (packet_command_texts[mid].packet_command >
- failed_command->cmd[0])
+ scsi_req(failed_command)->cmd[0])
hi = mid;
else
lo = mid + 1;
@@ -329,7 +329,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
printk(KERN_ERR " The failed \"%s\" packet command "
"was: \n \"", s);
for (i = 0; i < BLK_MAX_CDB; i++)
- printk(KERN_CONT "%02x ", failed_command->cmd[i]);
+ printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]);
printk(KERN_CONT "\"\n");
}
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 0dd43b4fcec6..a45dda5386e4 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -165,11 +165,12 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
if (!(setting->flags & DS_SYNC))
return setting->set(drive, arg);
- rq = blk_get_request(q, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd_len = 5;
- rq->cmd[0] = REQ_DEVSET_EXEC;
- *(int *)&rq->cmd[1] = arg;
+ rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
+ scsi_req(rq)->cmd_len = 5;
+ scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
+ *(int *)&scsi_req(rq)->cmd[1] = arg;
rq->special = setting->set;
if (blk_execute_rq(q, NULL, rq, 0))
@@ -183,7 +184,7 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
{
int err, (*setfunc)(ide_drive_t *, int) = rq->special;
- err = setfunc(drive, *(int *)&rq->cmd[1]);
+ err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
if (err)
rq->errors = err;
ide_complete_rq(drive, err, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 5ceace542b77..186159715b71 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_hwif_t *hwif = drive->hwif;
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
- BUG_ON(rq->cmd_type != REQ_TYPE_FS);
+ BUG_ON(blk_rq_is_passthrough(rq));
ledtrig_disk_activity();
@@ -452,8 +452,9 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd->tf_flags = IDE_TFLAG_DYN;
cmd->protocol = ATA_PROT_NODATA;
-
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq->cmd_flags &= ~REQ_OP_MASK;
+ rq->cmd_flags |= REQ_OP_DRV_OUT;
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq->special = cmd;
cmd->rq = rq;
@@ -477,8 +478,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
drive->mult_req = arg;
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index d6da011299f5..cf3af6840368 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -123,8 +123,8 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
return ide_stopped;
/* retry only "normal" I/O: */
- if (rq->cmd_type != REQ_TYPE_FS) {
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ if (blk_rq_is_passthrough(rq)) {
+ if (ata_taskfile_request(rq)) {
struct ide_cmd *cmd = rq->special;
if (cmd)
@@ -147,8 +147,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
{
struct request *rq = drive->hwif->rq;
- if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
- rq->cmd[0] == REQ_DRIVE_RESET) {
+ if (rq && ata_misc_request(rq) &&
+ scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0)
rq->errors = -EIO;
ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index f079d8d1d856..a69e8013f1df 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
drive->failed_pc = NULL;
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
- rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT))
uptodate = 1; /* FIXME */
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
@@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
"Aborting request!\n");
}
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (ata_misc_request(rq))
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
return uptodate;
@@ -203,7 +203,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
- memcpy(rq->cmd, pc->c, 12);
+ memcpy(scsi_req(rq)->cmd, pc->c, 12);
pc->rq = rq;
if (cmd == WRITE)
@@ -216,7 +216,7 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
struct ide_atapi_pc *pc, struct request *rq)
{
ide_init_pc(pc);
- memcpy(pc->c, rq->cmd, sizeof(pc->c));
+ memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
pc->rq = rq;
if (blk_rq_bytes(rq)) {
pc->flags |= PC_FLAG_DMA_OK;
@@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
} else
printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
+ if (ata_misc_request(rq)) {
rq->errors = 0;
ide_complete_rq(drive, 0, blk_rq_bytes(rq));
return ide_stopped;
@@ -254,8 +254,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
@@ -265,16 +265,21 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
pc = &floppy->queued_pc;
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
break;
- case REQ_TYPE_DRV_PRIV:
- case REQ_TYPE_ATA_SENSE:
- pc = (struct ide_atapi_pc *)rq->special;
- break;
- case REQ_TYPE_BLOCK_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
pc = &floppy->queued_pc;
idefloppy_blockpc_cmd(floppy, pc, rq);
break;
- default:
- BUG();
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ switch (ide_req(rq)->type) {
+ case ATA_PRIV_MISC:
+ case ATA_PRIV_SENSE:
+ pc = (struct ide_atapi_pc *)rq->special;
+ break;
+ default:
+ BUG();
+ }
}
ide_prep_sense(drive, rq);
@@ -286,7 +291,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
cmd.rq = rq;
- if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
+ if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
@@ -296,7 +301,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
return ide_floppy_issue_pc(drive, &cmd, pc);
out_end:
drive->failed_pc = NULL;
- if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
+ if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO;
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 201e43fcbc94..043b1fb963cb 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -102,7 +102,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
drive->dev_flags |= IDE_DFLAG_PARKED;
}
- if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ if (rq && ata_taskfile_request(rq)) {
struct ide_cmd *orig_cmd = rq->special;
if (cmd->tf_flags & IDE_TFLAG_DYN)
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{
- u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk;
+ u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
u8 media = drive->media;
drive->failed_pc = NULL;
@@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
- else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
+ else if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO;
}
@@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
{
- u8 cmd = rq->cmd[0];
+ u8 cmd = scsi_req(rq)->cmd[0];
switch (cmd) {
case REQ_PARK_HEADS:
@@ -340,7 +340,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (drive->current_speed == 0xff)
ide_config_drive_speed(drive, drive->desired_speed);
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ if (ata_taskfile_request(rq))
return execute_drive_cmd(drive, rq);
else if (ata_pm_request(rq)) {
struct ide_pm_state *pm = rq->special;
@@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
pm->pm_step == IDE_PM_COMPLETED)
ide_complete_pm_rq(drive, rq);
return startstop;
- } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ } else if (!rq->rq_disk && ata_misc_request(rq))
/*
* TODO: Once all ULDs have been modified to
* check for specific op codes rather than
@@ -545,6 +545,7 @@ repeat:
goto plug_device;
}
+ scsi_req(rq)->resid_len = blk_rq_bytes(rq);
hwif->rq = rq;
spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d05db2469209..248a3e0ceb46 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -125,8 +125,9 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
if (NULL == (void *) arg) {
struct request *rq;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
err = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
@@ -221,10 +222,11 @@ static int generic_drive_reset(ide_drive_t *drive)
struct request *rq;
int ret = 0;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd_len = 1;
- rq->cmd[0] = REQ_DRIVE_RESET;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
+ scsi_req(rq)->cmd_len = 1;
+ scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
if (blk_execute_rq(drive->queue, NULL, rq, 1))
ret = rq->errors;
blk_put_request(rq);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 2d7dca56dd24..101aed9a61ca 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -31,10 +31,11 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
}
spin_unlock_irq(&hwif->lock);
- rq = blk_get_request(q, READ, __GFP_RECLAIM);
- rq->cmd[0] = REQ_PARK_HEADS;
- rq->cmd_len = 1;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
+ scsi_req(rq)->cmd_len = 1;
+ ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = &timeout;
rc = blk_execute_rq(q, NULL, rq, 1);
blk_put_request(rq);
@@ -45,13 +46,14 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
* Make sure that *some* command is sent to the drive after the
* timeout has expired, so power management will be reenabled.
*/
- rq = blk_get_request(q, READ, GFP_NOWAIT);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
+ scsi_req_init(rq);
if (IS_ERR(rq))
goto out;
- rq->cmd[0] = REQ_UNPARK_HEADS;
- rq->cmd_len = 1;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
+ scsi_req(rq)->cmd_len = 1;
+ ide_req(rq)->type = ATA_PRIV_MISC;
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
out:
@@ -64,7 +66,7 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
struct ide_taskfile *tf = &cmd.tf;
memset(&cmd, 0, sizeof(cmd));
- if (rq->cmd[0] == REQ_PARK_HEADS) {
+ if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
drive->sleep = *(unsigned long *)rq->special;
drive->dev_flags |= IDE_DFLAG_SLEEPING;
tf->command = ATA_CMD_IDLEIMMEDIATE;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index a015acdffb39..ec951be4b0c8 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -18,8 +18,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
@@ -88,8 +89,9 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_PM_RESUME;
rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
@@ -221,10 +223,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
#ifdef DEBUG_PM
printk("%s: completing PM request, %s\n", drive->name,
- (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) ? "suspend" : "resume");
+ (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
#endif
spin_lock_irqsave(q->queue_lock, flags);
- if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND)
+ if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
blk_stop_queue(q);
else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
@@ -240,11 +242,13 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
struct ide_pm_state *pm = rq->special;
- if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND &&
+ if (blk_rq_is_private(rq) &&
+ ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED;
- else if (rq->cmd_type == REQ_TYPE_ATA_PM_RESUME &&
+ else if (blk_rq_is_private(rq) &&
+ ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
pm->pm_step == IDE_PM_START_RESUME) {
/*
* The first thing we do on wakeup is to wait for BSY bit to
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 330e319419e6..a74ae8df4bb8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -741,6 +741,14 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
}
}
+static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+{
+ struct ide_request *req = blk_mq_rq_to_pdu(rq);
+
+ req->sreq.sense = req->sense;
+ return 0;
+}
+
/*
* init request queue
*/
@@ -758,11 +766,18 @@ static int ide_init_queue(ide_drive_t *drive)
* limits and LBA48 we could raise it but as yet
* do not.
*/
-
- q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif));
+ q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif));
if (!q)
return 1;
+ q->request_fn = do_ide_request;
+ q->init_rq_fn = ide_init_rq;
+ q->cmd_size = sizeof(struct ide_request);
+ if (blk_init_allocated_queue(q) < 0) {
+ blk_cleanup_queue(q);
+ return 1;
+ }
+
q->queuedata = drive;
blk_queue_segment_boundary(q, 0xffff);
@@ -1131,10 +1146,12 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
ide_port_for_each_dev(i, drive, hwif) {
u8 j = (hwif->index * MAX_DRIVES) + i;
u16 *saved_id = drive->id;
+ struct request *saved_sense_rq = drive->sense_rq;
memset(drive, 0, sizeof(*drive));
memset(saved_id, 0, SECTOR_SIZE);
drive->id = saved_id;
+ drive->sense_rq = saved_sense_rq;
drive->media = ide_disk;
drive->select = (i << 4) | ATA_DEVICE_OBS;
@@ -1241,6 +1258,7 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
int i;
ide_port_for_each_dev(i, drive, hwif) {
+ kfree(drive->sense_rq);
kfree(drive->id);
kfree(drive);
}
@@ -1248,11 +1266,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
{
+ ide_drive_t *drive;
int i;
for (i = 0; i < MAX_DRIVES; i++) {
- ide_drive_t *drive;
-
drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
if (drive == NULL)
goto out_nomem;
@@ -1267,12 +1284,21 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
*/
drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
if (drive->id == NULL)
- goto out_nomem;
+ goto out_free_drive;
+
+ drive->sense_rq = kmalloc(sizeof(struct request) +
+ sizeof(struct ide_request), GFP_KERNEL);
+ if (!drive->sense_rq)
+ goto out_free_id;
hwif->devices[i] = drive;
}
return 0;
+out_free_id:
+ kfree(drive->id);
+out_free_drive:
+ kfree(drive);
out_nomem:
ide_port_free_devices(hwif);
return -ENOMEM;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 9ecf4e35adcd..3c1b7974d66d 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -282,7 +282,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
/* correct remaining bytes to transfer */
if (pc->flags & PC_FLAG_DMA_ERROR)
- rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
+ scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
/*
* If error was the result of a zero-length read or write command,
@@ -316,7 +316,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
pc->flags |= PC_FLAG_ABORT;
}
if (!(pc->flags & PC_FLAG_ABORT) &&
- (blk_rq_bytes(rq) - rq->resid_len))
+ (blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
}
}
@@ -348,7 +348,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
"itself - Aborting request!\n");
} else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
unsigned int blocks =
- (blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size;
+ (blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
tape->avg_size += blocks * tape->blk_size;
@@ -560,7 +560,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
pc->flags |= PC_FLAG_WRITING;
}
- memcpy(rq->cmd, pc->c, 12);
+ memcpy(scsi_req(rq)->cmd, pc->c, 12);
}
static ide_startstop_t idetape_do_request(ide_drive_t *drive,
@@ -570,14 +570,16 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc *pc = NULL;
struct ide_cmd cmd;
+ struct scsi_request *req = scsi_req(rq);
u8 stat;
ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u",
- rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
+ req->cmd[0], (unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq));
- BUG_ON(!(rq->cmd_type == REQ_TYPE_DRV_PRIV ||
- rq->cmd_type == REQ_TYPE_ATA_SENSE));
+ BUG_ON(!blk_rq_is_private(rq));
+ BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
+ ide_req(rq)->type != ATA_PRIV_SENSE);
/* Retry a failed packet command */
if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
@@ -592,7 +594,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
stat = hwif->tp_ops->read_status(hwif);
if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
- (rq->cmd[13] & REQ_IDETAPE_PC2) == 0)
+ (req->cmd[13] & REQ_IDETAPE_PC2) == 0)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
@@ -609,7 +611,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
} else if (time_after(jiffies, tape->dsc_timeout)) {
printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
tape->name);
- if (rq->cmd[13] & REQ_IDETAPE_PC2) {
+ if (req->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive);
return ide_stopped;
} else {
@@ -626,23 +628,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
tape->postponed_rq = false;
}
- if (rq->cmd[13] & REQ_IDETAPE_READ) {
+ if (req->cmd[13] & REQ_IDETAPE_READ) {
pc = &tape->queued_pc;
ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
goto out;
}
- if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
+ if (req->cmd[13] & REQ_IDETAPE_WRITE) {
pc = &tape->queued_pc;
ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
goto out;
}
- if (rq->cmd[13] & REQ_IDETAPE_PC1) {
+ if (req->cmd[13] & REQ_IDETAPE_PC1) {
pc = (struct ide_atapi_pc *)rq->special;
- rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
- rq->cmd[13] |= REQ_IDETAPE_PC2;
+ req->cmd[13] &= ~(REQ_IDETAPE_PC1);
+ req->cmd[13] |= REQ_IDETAPE_PC2;
goto out;
}
- if (rq->cmd[13] & REQ_IDETAPE_PC2) {
+ if (req->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive);
return ide_stopped;
}
@@ -852,9 +854,10 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
BUG_ON(size < 0 || size % tape->blk_size);
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd[13] = cmd;
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_MISC;
+ scsi_req(rq)->cmd[13] = cmd;
rq->rq_disk = tape->disk;
rq->__sector = tape->first_frame;
@@ -868,7 +871,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
blk_execute_rq(drive->queue, tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */
- size -= rq->resid_len;
+ size -= scsi_req(rq)->resid_len;
tape->cur = tape->buf;
if (cmd == REQ_IDETAPE_READ)
tape->valid = size;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index a716693417a3..247b9faccce1 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -428,10 +428,12 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
{
struct request *rq;
int error;
- int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
- rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
- rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq = blk_get_request(drive->queue,
+ (cmd->tf_flags & IDE_TFLAG_WRITE) ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
+ scsi_req_init(rq);
+ ide_req(rq)->type = ATA_PRIV_TASKFILE;
/*
* (ks) We transfer currently only whole sectors.
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 247853ea1368..c3062b53056f 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -54,7 +54,7 @@
#define DRV_NAME "sis5513"
/* registers layout and init values are chipset family dependent */
-
+#undef ATA_16
#define ATA_16 0x01
#define ATA_33 0x02
#define ATA_66 0x03
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 2bbf0c521beb..7d61b566e148 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
static int palmas_gpadc_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
static int palmas_gpadc_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
int ret;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 9a081465c42f..6bb23a49e81e 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
static int __maybe_unused afe4403_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
int ret;
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
static int __maybe_unused afe4403_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
int ret;
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 45266404f7e3..964f5231a831 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
static int __maybe_unused afe4404_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
int ret;
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
static int __maybe_unused afe4404_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
int ret;
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 90ab8a2d2846..183c14329d6e 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
mutex_lock(&data->lock);
- while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+ while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
ret = max30100_read_measurement(data);
if (ret)
break;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 9c47bc98f3ac..2a22ad920333 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -71,7 +71,8 @@
* a) select an implementation using busy loop polling on those systems
* b) use the checksum to do some probabilistic decoding
*/
-#define DHT11_START_TRANSMISSION 18 /* ms */
+#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
+#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
#define DHT11_MIN_TIMERES 34000 /* ns */
#define DHT11_THRESHOLD 49000 /* ns */
#define DHT11_AMBIG_LOW 23000 /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
ret = gpio_direction_output(dht11->gpio, 0);
if (ret)
goto err;
- msleep(DHT11_START_TRANSMISSION);
+ usleep_range(DHT11_START_TRANSMISSION_MIN,
+ DHT11_START_TRANSMISSION_MAX);
ret = gpio_direction_input(dht11->gpio);
if (ret)
goto err;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e7dcfac877ca..4eb5a80e5d81 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2467,14 +2467,12 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
struct net_device *dev;
prio = rt_tos2priority(tos);
- dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
- vlan_dev_real_dev(ndev) : ndev;
-
+ dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
if (dev->num_tc)
return netdev_get_prio_tc_map(dev, prio);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(ndev))
return (vlan_dev_get_egress_qos_mask(ndev, prio) &
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
#endif
@@ -2811,7 +2809,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family;
- if (dst_addr->sa_family == AF_INET6) {
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 1e62a5f0cb28..4609b921f899 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
if (access & IB_ACCESS_ON_DEMAND) {
+ put_pid(umem->pid);
ret = ib_umem_odp_get(context, umem);
if (ret) {
kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
+ put_pid(umem->pid);
kfree(umem);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index b9efadfffb4f..e66e75921797 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -55,14 +55,14 @@
#define put_ep(ep) { \
PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
- WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
+ ep, kref_read(&((ep)->kref))); \
+ WARN_ON(kref_read(&((ep)->kref)) < 1); \
kref_put(&((ep)->kref), __free_ep); \
}
#define get_ep(ep) { \
PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
+ ep, kref_read(&((ep)->kref))); \
kref_get(&((ep)->kref)); \
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9d5fe1853da4..6262dc035f3c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index d939980a708f..a9194db7f9b8 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -961,7 +961,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
case IWCH_QP_STATE_RTS:
switch (attrs->next_state) {
case IWCH_QP_STATE_CLOSING:
- BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+ BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
qhp->attr.state = IWCH_QP_STATE_CLOSING;
if (!internal) {
abort=0;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f1510cc76d2d..9398143d7c5e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
switch (ep->com.state) {
case MPA_REQ_SENT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
disconnect = process_mpa_request(ep, skb);
break;
case FPDU_MODE: {
struct c4iw_qp_attributes attrs;
+
+ update_rx_credits(ep, dlen);
BUG_ON(!ep->com.qp);
if (status)
pr_err("%s Unexpected streaming data." \
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 19c6477af19f..bec82a600d77 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
/*
+ * Special cqe for drain WR completions...
+ */
+ if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+ *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+ *cqe = *hw_cqe;
+ goto skip_cqe;
+ }
+
+ /*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
c4iw_invalidate_mr(qhp->rhp,
CQE_WRID_FR_STAG(&cqe));
break;
+ case C4IW_DRAIN_OPCODE:
+ wc->opcode = IB_WC_SEND;
+ break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
"in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq) {
- if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
- if (t4_sq_empty(wq))
- complete(&qhp->sq_drained);
- if (t4_rq_empty(wq))
- complete(&qhp->rq_drained);
- }
+ if (wq)
spin_unlock(&qhp->lock);
- }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 516b0ae6dc3f..40c0e7b9fc6e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
}
}
+ rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+ if (!rdev->free_workq) {
+ err = -ENOMEM;
+ goto err_free_status_page;
+ }
+
rdev->status_page->db_off = 0;
return 0;
+err_free_status_page:
+ free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev);
destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
+ destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4788e1a46fde..d19662f635b1 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -45,6 +45,7 @@
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
struct list_head qpids;
struct list_head cqids;
struct mutex lock;
+ struct kref kref;
};
enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
atomic_t wr_log_idx;
struct wr_log_entry *wr_log;
int wr_log_size;
+ struct workqueue_struct *free_workq;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
- struct completion rq_drained;
- struct completion sq_drained;
+ struct work_struct free_work;
+ struct c4iw_ucontext *ucontext;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
u32 key;
spinlock_t mmap_lock;
struct list_head mmaps;
+ struct kref kref;
};
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
+void _c4iw_free_ucontext(struct kref *kref);
+
+static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+{
+ kref_put(&ucontext->kref, _c4iw_free_ucontext);
+}
+
+static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+{
+ kref_get(&ucontext->kref);
+}
+
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return IB_QPS_ERR;
}
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
static inline u32 c4iw_ib_to_tpt_access(int a)
{
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -654,14 +672,14 @@ enum c4iw_mmid_state {
#define c4iw_put_ep(ep) { \
PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
- WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
+ ep, kref_read(&((ep)->kref))); \
+ WARN_ON(kref_read(&((ep)->kref)) < 1); \
kref_put(&((ep)->kref), _c4iw_free_ep); \
}
#define c4iw_get_ep(ep) { \
PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
- ep, atomic_read(&((ep)->kref.refcount))); \
+ ep, kref_read(&((ep)->kref))); \
kref_get(&((ep)->kref)); \
}
void _c4iw_free_ep(struct kref *kref);
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
-void c4iw_drain_rq(struct ib_qp *qp);
-void c4iw_drain_sq(struct ib_qp *qp);
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 49b51b7e0fd7..3345e1c312f7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
return -ENOSYS;
}
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+void _c4iw_free_ucontext(struct kref *kref)
{
- struct c4iw_dev *rhp = to_c4iw_dev(context->device);
- struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp;
- PDBG("%s context %p\n", __func__, context);
+ ucontext = container_of(kref, struct c4iw_ucontext, kref);
+ rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+ PDBG("%s ucontext %p\n", __func__, ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext);
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+
+ PDBG("%s context %p\n", __func__, context);
+ c4iw_put_ucontext(ucontext);
return 0;
}
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
+ kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
- dev->ibdev.drain_sq = c4iw_drain_sq;
- dev->ibdev.drain_rq = c4iw_drain_rq;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cda5542e13a2..d4fd2f5c8326 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return 0;
}
-static void _free_qp(struct kref *kref)
+static void free_qp_work(struct work_struct *work)
+{
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_qp *qhp;
+ struct c4iw_dev *rhp;
+
+ qhp = container_of(work, struct c4iw_qp, free_work);
+ ucontext = qhp->ucontext;
+ rhp = qhp->rhp;
+
+ PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+ if (ucontext)
+ c4iw_put_ucontext(ucontext);
+ kfree(qhp);
+}
+
+static void queue_qp_free(struct kref *kref)
{
struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref);
PDBG("%s qhp %p\n", __func__, qhp);
- kfree(qhp);
+ queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
}
void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
- kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
+ kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
}
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
return 0;
}
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *schp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ schp = to_c4iw_cq(qhp->ibqp.send_cq);
+ cq = &schp->cq;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_TYPE_V(1) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&schp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&schp->lock, flag);
+
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *rchp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ cq = &rchp->cq;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_TYPE_V(0) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&rchp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- *bad_wr = wr;
- return -EINVAL;
+ complete_sq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- *bad_wr = wr;
- return -EINVAL;
+ complete_rq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -1503,7 +1580,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_RTS:
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
- BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+ BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
t4_set_wq_in_error(&qhp->wq);
set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break;
case C4IW_QP_STATE_CLOSING:
- if (!internal) {
+
+ /*
+ * Allow kernel users to move to ERROR for qp draining.
+ */
+ if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+ C4IW_QP_STATE_ERROR)) {
ret = -EINVAL;
goto out;
}
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
struct c4iw_dev *rhp;
struct c4iw_qp *qhp;
struct c4iw_qp_attributes attrs;
- struct c4iw_ucontext *ucontext;
qhp = to_c4iw_qp(ib_qp);
rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
spin_unlock_irq(&rhp->lock);
free_ird(rhp, qhp->attr.max_ird);
- ucontext = ib_qp->uobject ?
- to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
- destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
c4iw_qp_rem_ref(ib_qp);
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
- init_completion(&qhp->sq_drained);
- init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
kref_init(&qhp->kref);
+ INIT_WORK(&qhp->free_work, free_qp_work);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ma_sync_key_mm->len = PAGE_SIZE;
insert_mmap(ucontext, ma_sync_key_mm);
}
+
+ c4iw_get_ucontext(ucontext);
+ qhp->ucontext = ucontext;
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
-
-static void move_qp_to_err(struct c4iw_qp *qp)
-{
- struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
-
- (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-}
-
-void c4iw_drain_sq(struct ib_qp *ibqp)
-{
- struct c4iw_qp *qp = to_c4iw_qp(ibqp);
- unsigned long flag;
- bool need_to_wait;
-
- move_qp_to_err(qp);
- spin_lock_irqsave(&qp->lock, flag);
- need_to_wait = !t4_sq_empty(&qp->wq);
- spin_unlock_irqrestore(&qp->lock, flag);
-
- if (need_to_wait)
- wait_for_completion(&qp->sq_drained);
-}
-
-void c4iw_drain_rq(struct ib_qp *ibqp)
-{
- struct c4iw_qp *qp = to_c4iw_qp(ibqp);
- unsigned long flag;
- bool need_to_wait;
-
- move_qp_to_err(qp);
- spin_lock_irqsave(&qp->lock, flag);
- need_to_wait = !t4_rq_empty(&qp->wq);
- spin_unlock_irqrestore(&qp->lock, flag);
-
- if (need_to_wait)
- wait_for_completion(&qp->rq_drained);
-}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381aa83c8..640d22148a3e 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -179,6 +179,7 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ u64 drain_cookie;
} u;
__be64 reserved;
__be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
/* macros for flit 3 of the cqe */
#define CQE_GENBIT_S 63
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 29e97df9e1a7..4c000d60d5c6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
if (netif_carrier_ok(iwdev->netdev))
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index b3ef47c3ab73..31803b367104 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
struct mlx5_ib_cq *cq = to_mcq(ibcq);
- void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+ void __iomem *uar_page = mdev->priv.uar->map;
unsigned long irq_flags;
int ret = 0;
@@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
mlx5_cq_arm(&cq->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
- uar_page,
- MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
- to_mcq(ibcq)->mcq.cons_index);
+ uar_page, to_mcq(ibcq)->mcq.cons_index);
return ret;
}
@@ -790,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- *index = to_mucontext(context)->uuari.uars[0].index;
+ *index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) {
if (unlikely((*cqe_size != 64) ||
@@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
MLX5_SET(cqc, cqc, log_page_size,
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- *index = dev->mdev->priv.uuari.uars[0].index;
+ *index = dev->mdev->priv.uar->index;
return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d566f6738833..9d8535385bb8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -53,6 +53,7 @@
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
@@ -672,17 +673,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
}
- if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
- uhw->outlen)) {
- resp.mlx5_ib_support_multi_pkt_send_wqes =
- MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
- resp.response_length +=
- sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
- }
-
- if (field_avail(typeof(resp), reserved, uhw->outlen))
- resp.response_length += sizeof(resp.reserved);
-
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
@@ -706,6 +696,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.packet_pacing_caps);
}
+ if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
+ uhw->outlen)) {
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
+ MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+ resp.response_length +=
+ sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+ }
+
+ if (field_avail(typeof(resp), reserved, uhw->outlen))
+ resp.response_length += sizeof(resp.reserved);
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -992,6 +993,86 @@ out:
return err;
}
+static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
+{
+ mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
+ caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
+}
+
+static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
+ struct mlx5_ib_alloc_ucontext_req_v2 *req,
+ u32 *num_sys_pages)
+{
+ int uars_per_sys_page;
+ int bfregs_per_sys_page;
+ int ref_bfregs = req->total_num_bfregs;
+
+ if (req->total_num_bfregs == 0)
+ return -EINVAL;
+
+ BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
+ BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
+
+ if (req->total_num_bfregs > MLX5_MAX_BFREGS)
+ return -ENOMEM;
+
+ uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
+ bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+ req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
+ *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+
+ if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
+ return -EINVAL;
+
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
+ MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
+ lib_uar_4k ? "yes" : "no", ref_bfregs,
+ req->total_num_bfregs, *num_sys_pages);
+
+ return 0;
+}
+
+static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+ struct mlx5_bfreg_info *bfregi;
+ int err;
+ int i;
+
+ bfregi = &context->bfregi;
+ for (i = 0; i < bfregi->num_sys_pages; i++) {
+ err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
+ if (err)
+ goto error;
+
+ mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
+ }
+ return 0;
+
+error:
+ for (--i; i >= 0; i--)
+ if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
+ mlx5_ib_warn(dev, "failed to free uar %d\n", i);
+
+ return err;
+}
+
+static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+ struct mlx5_bfreg_info *bfregi;
+ int err;
+ int i;
+
+ bfregi = &context->bfregi;
+ for (i = 0; i < bfregi->num_sys_pages; i++) {
+ err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to free uar %d\n", i);
+ return err;
+ }
+ }
+ return 0;
+}
+
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
@@ -999,17 +1080,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {};
struct mlx5_ib_ucontext *context;
- struct mlx5_uuar_info *uuari;
- struct mlx5_uar *uars;
- int gross_uuars;
- int num_uars;
+ struct mlx5_bfreg_info *bfregi;
int ver;
- int uuarn;
int err;
- int i;
size_t reqlen;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
max_cqe_version);
+ bool lib_uar_4k;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
@@ -1032,27 +1109,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (req.flags)
return ERR_PTR(-EINVAL);
- if (req.total_num_uuars > MLX5_MAX_UUARS)
- return ERR_PTR(-ENOMEM);
-
- if (req.total_num_uuars == 0)
- return ERR_PTR(-EINVAL);
-
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP);
- if (reqlen > sizeof(req) &&
- !ib_is_udata_cleared(udata, sizeof(req),
- reqlen - sizeof(req)))
- return ERR_PTR(-EOPNOTSUPP);
-
- req.total_num_uuars = ALIGN(req.total_num_uuars,
- MLX5_NON_FP_BF_REGS_PER_PAGE);
- if (req.num_low_latency_uuars > req.total_num_uuars - 1)
+ req.total_num_bfregs = ALIGN(req.total_num_bfregs,
+ MLX5_NON_FP_BFREGS_PER_UAR);
+ if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
return ERR_PTR(-EINVAL);
- num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
- gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
@@ -1065,6 +1129,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.cqe_version = min_t(__u8,
(__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
req.max_cqe_version);
+ resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
+ resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
resp.response_length = min(offsetof(typeof(resp), response_length) +
sizeof(resp.response_length), udata->outlen);
@@ -1072,58 +1140,58 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (!context)
return ERR_PTR(-ENOMEM);
- uuari = &context->uuari;
- mutex_init(&uuari->lock);
- uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
- if (!uars) {
- err = -ENOMEM;
+ lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
+ bfregi = &context->bfregi;
+
+ /* updates req->total_num_bfregs */
+ err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
+ if (err)
goto out_ctx;
- }
- uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
- sizeof(*uuari->bitmap),
+ mutex_init(&bfregi->lock);
+ bfregi->lib_uar_4k = lib_uar_4k;
+ bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
- if (!uuari->bitmap) {
+ if (!bfregi->count) {
err = -ENOMEM;
- goto out_uar_ctx;
- }
- /*
- * clear all fast path uuars
- */
- for (i = 0; i < gross_uuars; i++) {
- uuarn = i & 3;
- if (uuarn == 2 || uuarn == 3)
- set_bit(i, uuari->bitmap);
+ goto out_ctx;
}
- uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
- if (!uuari->count) {
+ bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
+ sizeof(*bfregi->sys_pages),
+ GFP_KERNEL);
+ if (!bfregi->sys_pages) {
err = -ENOMEM;
- goto out_bitmap;
+ goto out_count;
}
- for (i = 0; i < num_uars; i++) {
- err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
- if (err)
- goto out_count;
- }
+ err = allocate_uars(dev, context);
+ if (err)
+ goto out_sys_pages;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif
+ context->upd_xlt_page = __get_free_page(GFP_KERNEL);
+ if (!context->upd_xlt_page) {
+ err = -ENOMEM;
+ goto out_uars;
+ }
+ mutex_init(&context->upd_xlt_page_mutex);
+
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
err = mlx5_core_alloc_transport_domain(dev->mdev,
&context->tdn);
if (err)
- goto out_uars;
+ goto out_page;
}
INIT_LIST_HEAD(&context->vma_private_list);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- resp.tot_uuars = req.total_num_uuars;
+ resp.tot_bfregs = req.total_num_bfregs;
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
if (field_avail(typeof(resp), cqe_version, udata->outlen))
@@ -1135,32 +1203,46 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cmds_supp_uhw);
}
+ if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
+ if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
+ mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
+ resp.eth_min_inline++;
+ }
+ resp.response_length += sizeof(resp.eth_min_inline);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
* pretend we don't support reading the HCA's core clock. This is also
* forced by mmap function.
*/
- if (PAGE_SIZE <= 4096 &&
- field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
- resp.comp_mask |=
- MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
- resp.hca_core_clock_offset =
- offsetof(struct mlx5_init_seg, internal_timer_h) %
- PAGE_SIZE;
+ if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ if (PAGE_SIZE <= 4096) {
+ resp.comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
+ resp.hca_core_clock_offset =
+ offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
+ }
resp.response_length += sizeof(resp.hca_core_clock_offset) +
sizeof(resp.reserved2);
}
+ if (field_avail(typeof(resp), log_uar_size, udata->outlen))
+ resp.response_length += sizeof(resp.log_uar_size);
+
+ if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
+ resp.response_length += sizeof(resp.num_uars_per_page);
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_td;
- uuari->ver = ver;
- uuari->num_low_latency_uuars = req.num_low_latency_uuars;
- uuari->uars = uars;
- uuari->num_uars = num_uars;
+ bfregi->ver = ver;
+ bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
context->cqe_version = resp.cqe_version;
+ context->lib_caps = req.lib_caps;
+ print_lib_caps(dev, context->lib_caps);
return &context->ibucontext;
@@ -1168,20 +1250,21 @@ out_td:
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
+out_page:
+ free_page(context->upd_xlt_page);
+
out_uars:
- for (i--; i >= 0; i--)
- mlx5_cmd_free_uar(dev->mdev, uars[i].index);
-out_count:
- kfree(uuari->count);
+ deallocate_uars(dev, context);
-out_bitmap:
- kfree(uuari->bitmap);
+out_sys_pages:
+ kfree(bfregi->sys_pages);
-out_uar_ctx:
- kfree(uars);
+out_count:
+ kfree(bfregi->count);
out_ctx:
kfree(context);
+
return ERR_PTR(err);
}
@@ -1189,28 +1272,31 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
- struct mlx5_uuar_info *uuari = &context->uuari;
- int i;
+ struct mlx5_bfreg_info *bfregi;
+ bfregi = &context->bfregi;
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
- for (i = 0; i < uuari->num_uars; i++) {
- if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
- mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
- }
-
- kfree(uuari->count);
- kfree(uuari->bitmap);
- kfree(uuari->uars);
+ free_page(context->upd_xlt_page);
+ deallocate_uars(dev, context);
+ kfree(bfregi->sys_pages);
+ kfree(bfregi->count);
kfree(context);
return 0;
}
-static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
+static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi,
+ int idx)
{
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
+ int fw_uars_per_page;
+
+ fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
+
+ return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
+ bfregi->sys_pages[idx] / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -1365,11 +1451,23 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
{
- struct mlx5_uuar_info *uuari = &context->uuari;
+ struct mlx5_bfreg_info *bfregi = &context->bfregi;
int err;
unsigned long idx;
phys_addr_t pfn, pa;
pgprot_t prot;
+ int uars_per_page;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+ idx = get_index(vma->vm_pgoff);
+ if (idx % uars_per_page ||
+ idx * uars_per_page >= bfregi->num_sys_pages) {
+ mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
+ return -EINVAL;
+ }
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
@@ -1392,14 +1490,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL;
}
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- idx = get_index(vma->vm_pgoff);
- if (idx >= uuari->num_uars)
- return -EINVAL;
-
- pfn = uar_index2pfn(dev, uuari->uars[idx].index);
+ pfn = uar_index2pfn(dev, bfregi, idx);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot;
@@ -1622,9 +1713,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
if (ib_spec->eth.mask.vlan_tag) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
@@ -3060,8 +3151,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
-
if (!mlx5_lag_is_active(mdev))
name = "mlx5_%d";
else
@@ -3237,9 +3326,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_odp;
+ dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
+ if (!dev->mdev->priv.uar)
+ goto err_q_cnt;
+
+ err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
+ if (err)
+ goto err_uar_page;
+
+ err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
+ if (err)
+ goto err_bfreg;
+
err = ib_register_device(&dev->ib_dev, NULL);
if (err)
- goto err_q_cnt;
+ goto err_fp_bfreg;
err = create_umr_res(dev);
if (err)
@@ -3262,6 +3363,15 @@ err_umrc:
err_dev:
ib_unregister_device(&dev->ib_dev);
+err_fp_bfreg:
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+
+err_bfreg:
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+
+err_uar_page:
+ mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+
err_q_cnt:
mlx5_ib_dealloc_q_counters(dev);
@@ -3293,6 +3403,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
mlx5_remove_netdev_notifier(dev);
ib_unregister_device(&dev->ib_dev);
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+ mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
@@ -3307,6 +3420,9 @@ static struct mlx5_interface mlx5_ib_interface = {
.add = mlx5_ib_add,
.remove = mlx5_ib_remove,
.event = mlx5_ib_event,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ .pfault = mlx5_ib_pfault,
+#endif
.protocol = MLX5_INTERFACE_PROTOCOL_IB,
};
@@ -3317,25 +3433,14 @@ static int __init mlx5_ib_init(void)
if (deprecated_prof_sel != 2)
pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
- err = mlx5_ib_odp_init();
- if (err)
- return err;
-
err = mlx5_register_interface(&mlx5_ib_interface);
- if (err)
- goto clean_odp;
-
- return err;
-clean_odp:
- mlx5_ib_odp_cleanup();
return err;
}
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
- mlx5_ib_odp_cleanup();
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 6851357c16f4..778d8a18925f 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -159,7 +159,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
unsigned long umem_page_shift = ilog2(umem->page_size);
int shift = page_shift - umem_page_shift;
int mask = (1 << shift) - 1;
- int i, k;
+ int i, k, idx;
u64 cur = 0;
u64 base;
int len;
@@ -185,18 +185,36 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> umem_page_shift;
base = sg_dma_address(sg);
- for (k = 0; k < len; k++) {
+
+ /* Skip elements below offset */
+ if (i + len < offset << shift) {
+ i += len;
+ continue;
+ }
+
+ /* Skip pages below offset */
+ if (i < offset << shift) {
+ k = (offset << shift) - i;
+ i = offset << shift;
+ } else {
+ k = 0;
+ }
+
+ for (; k < len; k++) {
if (!(i & mask)) {
cur = base + (k << umem_page_shift);
cur |= access_flags;
+ idx = (i >> shift) - offset;
- pas[i >> shift] = cpu_to_be64(cur);
+ pas[idx] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
- i >> shift, be64_to_cpu(pas[i >> shift]));
- } else
- mlx5_ib_dbg(dev, "=====> 0x%llx\n",
- base + (k << umem_page_shift));
+ i >> shift, be64_to_cpu(pas[idx]));
+ }
i++;
+
+ /* Stop after num_pages reached */
+ if (i >> shift >= offset + num_pages)
+ return;
}
}
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 6c6057eb60ea..e1a4b93dce6b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -90,7 +90,6 @@ enum mlx5_ib_latency_class {
MLX5_IB_LATENCY_CLASS_LOW,
MLX5_IB_LATENCY_CLASS_MEDIUM,
MLX5_IB_LATENCY_CLASS_HIGH,
- MLX5_IB_LATENCY_CLASS_FAST_PATH
};
enum mlx5_ib_mad_ifc_flags {
@@ -100,7 +99,7 @@ enum mlx5_ib_mad_ifc_flags {
};
enum {
- MLX5_CROSS_CHANNEL_UUAR = 0,
+ MLX5_CROSS_CHANNEL_BFREG = 0,
};
enum {
@@ -120,11 +119,16 @@ struct mlx5_ib_ucontext {
/* protect doorbell record alloc/free
*/
struct mutex db_page_mutex;
- struct mlx5_uuar_info uuari;
+ struct mlx5_bfreg_info bfregi;
u8 cqe_version;
/* Transport Domain number */
u32 tdn;
struct list_head vma_private_list;
+
+ unsigned long upd_xlt_page;
+ /* protect ODP/KSM */
+ struct mutex upd_xlt_page_mutex;
+ u64 lib_caps;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -174,13 +178,12 @@ struct mlx5_ib_flow_db {
* enum ib_send_flags and enum ib_qp_type for low-level driver
*/
-#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
-#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
-#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
-
-#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
-#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
-#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
+#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
+#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
+#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
+#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
+#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
+#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
/*
@@ -190,6 +193,16 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
+#define MLX5_IB_UMR_OCTOWORD 16
+#define MLX5_IB_UMR_XLT_ALIGNMENT 64
+
+#define MLX5_IB_UPD_XLT_ZAP BIT(0)
+#define MLX5_IB_UPD_XLT_ENABLE BIT(1)
+#define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
+#define MLX5_IB_UPD_XLT_ADDR BIT(3)
+#define MLX5_IB_UPD_XLT_PD BIT(4)
+#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
+
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
* These flags are intended for internal use by the mlx5_ib driver, and they
@@ -264,29 +277,6 @@ struct mlx5_ib_rwq_ind_table {
u32 rqtn;
};
-/*
- * Connect-IB can trigger up to four concurrent pagefaults
- * per-QP.
- */
-enum mlx5_ib_pagefault_context {
- MLX5_IB_PAGEFAULT_RESPONDER_READ,
- MLX5_IB_PAGEFAULT_REQUESTOR_READ,
- MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
- MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
- MLX5_IB_PAGEFAULT_CONTEXTS
-};
-
-static inline enum mlx5_ib_pagefault_context
- mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
-{
- return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
-}
-
-struct mlx5_ib_pfault {
- struct work_struct work;
- struct mlx5_pagefault mpfault;
-};
-
struct mlx5_ib_ubuffer {
struct ib_umem *umem;
int buf_size;
@@ -334,6 +324,12 @@ struct mlx5_ib_raw_packet_qp {
struct mlx5_ib_rq rq;
};
+struct mlx5_bf {
+ int buf_size;
+ unsigned long offset;
+ struct mlx5_sq_bfreg *bfreg;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
union {
@@ -359,33 +355,19 @@ struct mlx5_ib_qp {
int wq_sig;
int scat_cqe;
int max_inline_data;
- struct mlx5_bf *bf;
+ struct mlx5_bf bf;
int has_rq;
/* only for user space QPs. For kernel
* we have it from the bf object
*/
- int uuarn;
+ int bfregn;
int create_type;
/* Store signature errors */
bool signature_en;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- /*
- * A flag that is true for QP's that are in a state that doesn't
- * allow page faults, and shouldn't schedule any more faults.
- */
- int disable_page_faults;
- /*
- * The disable_page_faults_lock protects a QP's disable_page_faults
- * field, allowing for a thread to atomically check whether the QP
- * allows page faults, and if so schedule a page fault.
- */
- spinlock_t disable_page_faults_lock;
- struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
-#endif
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
@@ -414,13 +396,11 @@ enum mlx5_ib_qp_flags {
struct mlx5_umr_wr {
struct ib_send_wr wr;
- union {
- u64 virt_addr;
- u64 offset;
- } target;
+ u64 virt_addr;
+ u64 offset;
struct ib_pd *pd;
unsigned int page_shift;
- unsigned int npages;
+ unsigned int xlt_size;
u64 length;
int access_flags;
u32 mkey;
@@ -617,7 +597,6 @@ struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
struct mlx5_roce roce;
- MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
int num_ports;
/* serialize update of capability mask
*/
@@ -634,6 +613,7 @@ struct mlx5_ib_dev {
int fill_delay;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps;
+ u64 odp_max_size;
/*
* Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler.
@@ -646,6 +626,8 @@ struct mlx5_ib_dev {
struct list_head qp_list;
/* Array with num_ports elements */
struct mlx5_ib_port *port;
+ struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg fp_bfreg;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -787,8 +769,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
-int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
- int npages, int zap);
+int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+ int page_shift, int flags);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -857,18 +839,13 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-extern struct workqueue_struct *mlx5_ib_page_fault_wq;
-
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
-void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault);
-void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
+void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
+ struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
-void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
unsigned long end);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
@@ -877,13 +854,10 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
return;
}
-static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
-static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
-static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
@@ -1001,4 +975,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
}
+
+static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
+{
+ return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_UARS_IN_PAGE : 1;
+}
+
+static inline int get_num_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8f608debe141..8cf2a67f9fb0 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -46,14 +46,9 @@ enum {
};
#define MLX5_UMR_ALIGN 2048
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-static __be64 mlx5_ib_update_mtt_emergency_buffer[
- MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
- __aligned(MLX5_UMR_ALIGN);
-static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
-#endif
static int clean_mr(struct mlx5_ib_mr *mr);
+static int use_umr(struct mlx5_ib_dev *dev, int order);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
@@ -134,6 +129,7 @@ static void reg_mr_callback(int status, void *context)
return;
}
+ mr->mmkey.type = MLX5_MKEY_MR;
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
key = dev->mdev->priv.mkey_key++;
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
@@ -629,7 +625,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->dev = dev;
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
- (mlx5_core_is_pf(dev->mdev)))
+ mlx5_core_is_pf(dev->mdev) &&
+ use_umr(dev, ent->order))
limit = dev->mdev->profile->mr_cache[i].limit;
else
limit = 0;
@@ -732,6 +729,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
goto err_in;
kfree(in);
+ mr->mmkey.type = MLX5_MKEY_MR;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->umem = NULL;
@@ -757,94 +755,13 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
return (npages + 1) / 2;
}
-static int use_umr(int order)
+static int use_umr(struct mlx5_ib_dev *dev, int order)
{
+ if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ return order < MAX_MR_CACHE_ENTRIES + 2;
return order <= MLX5_MAX_UMR_SHIFT;
}
-static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int npages, int page_shift, int *size,
- __be64 **mr_pas, dma_addr_t *dma)
-{
- __be64 *pas;
- struct device *ddev = dev->ib_dev.dma_device;
-
- /*
- * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the pas array, we allocate
- * a little more.
- */
- *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
- *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
- if (!(*mr_pas))
- return -ENOMEM;
-
- pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
- mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
- /* Clear padding after the actual pages. */
- memset(pas + npages, 0, *size - npages * sizeof(u64));
-
- *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, *dma)) {
- kfree(*mr_pas);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
- struct ib_sge *sg, u64 dma, int n, u32 key,
- int page_shift)
-{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- sg->addr = dma;
- sg->length = ALIGN(sizeof(u64) * n, 64);
- sg->lkey = dev->umrc.pd->local_dma_lkey;
-
- wr->next = NULL;
- wr->sg_list = sg;
- if (n)
- wr->num_sge = 1;
- else
- wr->num_sge = 0;
-
- wr->opcode = MLX5_IB_WR_UMR;
-
- umrwr->npages = n;
- umrwr->page_shift = page_shift;
- umrwr->mkey = key;
-}
-
-static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
- struct ib_sge *sg, u64 dma, int n, u32 key,
- int page_shift, u64 virt_addr, u64 len,
- int access_flags)
-{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
-
- wr->send_flags = 0;
-
- umrwr->target.virt_addr = virt_addr;
- umrwr->length = len;
- umrwr->access_flags = access_flags;
- umrwr->pd = pd;
-}
-
-static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
- struct ib_send_wr *wr, u32 key)
-{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
- wr->opcode = MLX5_IB_WR_UMR;
- umrwr->mkey = key;
-}
-
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int access_flags, struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
@@ -891,21 +808,39 @@ static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
init_completion(&context->done);
}
+static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
+ struct mlx5_umr_wr *umrwr)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_send_wr *bad;
+ int err;
+ struct mlx5_ib_umr_context umr_context;
+
+ mlx5_ib_init_umr_context(&umr_context);
+ umrwr->wr.wr_cqe = &umr_context.cqe;
+
+ down(&umrc->sem);
+ err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
+ } else {
+ wait_for_completion(&umr_context.done);
+ if (umr_context.status != IB_WC_SUCCESS) {
+ mlx5_ib_warn(dev, "reg umr failed (%u)\n",
+ umr_context.status);
+ err = -EFAULT;
+ }
+ }
+ up(&umrc->sem);
+ return err;
+}
+
static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
u64 virt_addr, u64 len, int npages,
int page_shift, int order, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct device *ddev = dev->ib_dev.dma_device;
- struct umr_common *umrc = &dev->umrc;
- struct mlx5_ib_umr_context umr_context;
- struct mlx5_umr_wr umrwr = {};
- struct ib_send_wr *bad;
struct mlx5_ib_mr *mr;
- struct ib_sge sg;
- int size;
- __be64 *mr_pas;
- dma_addr_t dma;
int err = 0;
int i;
@@ -924,173 +859,174 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
- err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
- &dma);
- if (err)
- goto free_mr;
-
- mlx5_ib_init_umr_context(&umr_context);
-
- umrwr.wr.wr_cqe = &umr_context.cqe;
- prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
- page_shift, virt_addr, len, access_flags);
-
- down(&umrc->sem);
- err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
- if (err) {
- mlx5_ib_warn(dev, "post send failed, err %d\n", err);
- goto unmap_dma;
- } else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "reg umr failed\n");
- err = -EFAULT;
- }
- }
-
+ mr->ibmr.pd = pd;
+ mr->umem = umem;
+ mr->access_flags = access_flags;
+ mr->desc_size = sizeof(struct mlx5_mtt);
mr->mmkey.iova = virt_addr;
mr->mmkey.size = len;
mr->mmkey.pd = to_mpd(pd)->pdn;
- mr->live = 1;
+ err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
+ MLX5_IB_UPD_XLT_ENABLE);
-unmap_dma:
- up(&umrc->sem);
- dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
-
- kfree(mr_pas);
-
-free_mr:
if (err) {
free_cached_mr(dev, mr);
return ERR_PTR(err);
}
+ mr->live = 1;
+
return mr;
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
- int zap)
+static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
+ void *xlt, int page_shift, size_t size,
+ int flags)
{
struct mlx5_ib_dev *dev = mr->dev;
- struct device *ddev = dev->ib_dev.dma_device;
- struct umr_common *umrc = &dev->umrc;
- struct mlx5_ib_umr_context umr_context;
struct ib_umem *umem = mr->umem;
+
+ npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
+
+ if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
+ __mlx5_ib_populate_pas(dev, umem, page_shift,
+ idx, npages, xlt,
+ MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the pages
+ * brought from the umem.
+ */
+ memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
+ size - npages * sizeof(struct mlx5_mtt));
+ }
+
+ return npages;
+}
+
+#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
+ MLX5_UMR_MTT_ALIGNMENT)
+#define MLX5_SPARE_UMR_CHUNK 0x10000
+
+int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+ int page_shift, int flags)
+{
+ struct mlx5_ib_dev *dev = mr->dev;
+ struct device *ddev = dev->ib_dev.dma_device;
+ struct mlx5_ib_ucontext *uctx = NULL;
int size;
- __be64 *pas;
+ void *xlt;
dma_addr_t dma;
- struct ib_send_wr *bad;
struct mlx5_umr_wr wr;
struct ib_sge sg;
int err = 0;
- const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
- const int page_index_mask = page_index_alignment - 1;
+ int desc_size = sizeof(struct mlx5_mtt);
+ const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
+ const int page_mask = page_align - 1;
size_t pages_mapped = 0;
size_t pages_to_map = 0;
size_t pages_iter = 0;
- int use_emergency_buf = 0;
+ gfp_t gfp;
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
- * so we need to align the offset and length accordingly */
- if (start_page_index & page_index_mask) {
- npages += start_page_index & page_index_mask;
- start_page_index &= ~page_index_mask;
+ * so we need to align the offset and length accordingly
+ */
+ if (idx & page_mask) {
+ npages += idx & page_mask;
+ idx &= ~page_mask;
}
- pages_to_map = ALIGN(npages, page_index_alignment);
+ gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
+ gfp |= __GFP_ZERO | __GFP_NOWARN;
- if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
- return -EINVAL;
+ pages_to_map = ALIGN(npages, page_align);
+ size = desc_size * pages_to_map;
+ size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
+
+ xlt = (void *)__get_free_pages(gfp, get_order(size));
+ if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
+ mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
+ size, get_order(size), MLX5_SPARE_UMR_CHUNK);
- size = sizeof(u64) * pages_to_map;
- size = min_t(int, PAGE_SIZE, size);
- /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
- * code, when we are called from an invalidation. The pas buffer must
- * be 2k-aligned for Connect-IB. */
- pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
- if (!pas) {
- mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
- pas = mlx5_ib_update_mtt_emergency_buffer;
- size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
- use_emergency_buf = 1;
- mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
- memset(pas, 0, size);
+ size = MLX5_SPARE_UMR_CHUNK;
+ xlt = (void *)__get_free_pages(gfp, get_order(size));
}
- pages_iter = size / sizeof(u64);
- dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+
+ if (!xlt) {
+ uctx = to_mucontext(mr->ibmr.uobject->context);
+ mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
+ size = PAGE_SIZE;
+ xlt = (void *)uctx->upd_xlt_page;
+ mutex_lock(&uctx->upd_xlt_page_mutex);
+ memset(xlt, 0, size);
+ }
+ pages_iter = size / desc_size;
+ dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
if (dma_mapping_error(ddev, dma)) {
- mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
+ mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
err = -ENOMEM;
- goto free_pas;
+ goto free_xlt;
}
+ sg.addr = dma;
+ sg.lkey = dev->umrc.pd->local_dma_lkey;
+
+ memset(&wr, 0, sizeof(wr));
+ wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
+ if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
+ wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ wr.wr.sg_list = &sg;
+ wr.wr.num_sge = 1;
+ wr.wr.opcode = MLX5_IB_WR_UMR;
+
+ wr.pd = mr->ibmr.pd;
+ wr.mkey = mr->mmkey.key;
+ wr.length = mr->mmkey.size;
+ wr.virt_addr = mr->mmkey.iova;
+ wr.access_flags = mr->access_flags;
+ wr.page_shift = page_shift;
+
for (pages_mapped = 0;
pages_mapped < pages_to_map && !err;
- pages_mapped += pages_iter, start_page_index += pages_iter) {
+ pages_mapped += pages_iter, idx += pages_iter) {
dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
-
- npages = min_t(size_t,
- pages_iter,
- ib_umem_num_pages(umem) - start_page_index);
-
- if (!zap) {
- __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
- start_page_index, npages, pas,
- MLX5_IB_MTT_PRESENT);
- /* Clear padding after the pages brought from the
- * umem. */
- memset(pas + npages, 0, size - npages * sizeof(u64));
- }
+ npages = populate_xlt(mr, idx, pages_iter, xlt,
+ page_shift, size, flags);
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
- mlx5_ib_init_umr_context(&umr_context);
-
- memset(&wr, 0, sizeof(wr));
- wr.wr.wr_cqe = &umr_context.cqe;
-
- sg.addr = dma;
- sg.length = ALIGN(npages * sizeof(u64),
- MLX5_UMR_MTT_ALIGNMENT);
- sg.lkey = dev->umrc.pd->local_dma_lkey;
+ sg.length = ALIGN(npages * desc_size,
+ MLX5_UMR_MTT_ALIGNMENT);
+
+ if (pages_mapped + pages_iter >= pages_to_map) {
+ if (flags & MLX5_IB_UPD_XLT_ENABLE)
+ wr.wr.send_flags |=
+ MLX5_IB_SEND_UMR_ENABLE_MR |
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
+ MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+ if (flags & MLX5_IB_UPD_XLT_PD ||
+ flags & MLX5_IB_UPD_XLT_ACCESS)
+ wr.wr.send_flags |=
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
+ if (flags & MLX5_IB_UPD_XLT_ADDR)
+ wr.wr.send_flags |=
+ MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+ }
- wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
- MLX5_IB_SEND_UMR_UPDATE_MTT;
- wr.wr.sg_list = &sg;
- wr.wr.num_sge = 1;
- wr.wr.opcode = MLX5_IB_WR_UMR;
- wr.npages = sg.length / sizeof(u64);
- wr.page_shift = PAGE_SHIFT;
- wr.mkey = mr->mmkey.key;
- wr.target.offset = start_page_index;
+ wr.offset = idx * desc_size;
+ wr.xlt_size = sg.length;
- down(&umrc->sem);
- err = ib_post_send(umrc->qp, &wr.wr, &bad);
- if (err) {
- mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
- } else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_err(dev, "UMR completion failed, code %d\n",
- umr_context.status);
- err = -EFAULT;
- }
- }
- up(&umrc->sem);
+ err = mlx5_ib_post_send_wait(dev, &wr);
}
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
-free_pas:
- if (!use_emergency_buf)
- free_page((unsigned long)pas);
+free_xlt:
+ if (uctx)
+ mutex_unlock(&uctx->upd_xlt_page_mutex);
else
- mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+ free_pages((unsigned long)xlt, get_order(size));
return err;
}
-#endif
/*
* If ibmr is NULL it will be allocated by reg_create.
@@ -1122,8 +1058,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
goto err_1;
}
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- mlx5_ib_populate_pas(dev, umem, page_shift, pas,
- pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+ if (!(access_flags & IB_ACCESS_ON_DEMAND))
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas,
+ pg_cap ? MLX5_IB_MTT_PRESENT : 0);
/* The pg_access bit allows setting the access flags
* in the page list submitted with the command. */
@@ -1153,6 +1090,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
}
+ mr->mmkey.type = MLX5_MKEY_MR;
mr->umem = umem;
mr->dev = dev;
mr->live = 1;
@@ -1204,14 +1142,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
- if (use_umr(order)) {
+ if (use_umr(dev, order)) {
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d", order);
mr = NULL;
}
- } else if (access_flags & IB_ACCESS_ON_DEMAND) {
+ } else if (access_flags & IB_ACCESS_ON_DEMAND &&
+ !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
goto error;
@@ -1248,106 +1187,39 @@ error:
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_core_dev *mdev = dev->mdev;
- struct umr_common *umrc = &dev->umrc;
- struct mlx5_ib_umr_context umr_context;
struct mlx5_umr_wr umrwr = {};
- struct ib_send_wr *bad;
- int err;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
- mlx5_ib_init_umr_context(&umr_context);
-
- umrwr.wr.wr_cqe = &umr_context.cqe;
- prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
+ umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
+ MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ umrwr.wr.opcode = MLX5_IB_WR_UMR;
+ umrwr.mkey = mr->mmkey.key;
- down(&umrc->sem);
- err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
- if (err) {
- up(&umrc->sem);
- mlx5_ib_dbg(dev, "err %d\n", err);
- goto error;
- } else {
- wait_for_completion(&umr_context.done);
- up(&umrc->sem);
- }
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "unreg umr failed\n");
- err = -EFAULT;
- goto error;
- }
- return 0;
-
-error:
- return err;
+ return mlx5_ib_post_send_wait(dev, &umrwr);
}
-static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
- u64 length, int npages, int page_shift, int order,
+static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
int access_flags, int flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct device *ddev = dev->ib_dev.dma_device;
- struct mlx5_ib_umr_context umr_context;
- struct ib_send_wr *bad;
struct mlx5_umr_wr umrwr = {};
- struct ib_sge sg;
- struct umr_common *umrc = &dev->umrc;
- dma_addr_t dma = 0;
- __be64 *mr_pas = NULL;
- int size;
int err;
- mlx5_ib_init_umr_context(&umr_context);
-
- umrwr.wr.wr_cqe = &umr_context.cqe;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
- if (flags & IB_MR_REREG_TRANS) {
- err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
- &mr_pas, &dma);
- if (err)
- return err;
+ umrwr.wr.opcode = MLX5_IB_WR_UMR;
+ umrwr.mkey = mr->mmkey.key;
- umrwr.target.virt_addr = virt_addr;
- umrwr.length = length;
- umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
- }
-
- prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
- page_shift);
-
- if (flags & IB_MR_REREG_PD) {
+ if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
umrwr.pd = pd;
- umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
- }
-
- if (flags & IB_MR_REREG_ACCESS) {
umrwr.access_flags = access_flags;
- umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
+ umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
}
- /* post send request to UMR QP */
- down(&umrc->sem);
- err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
-
- if (err) {
- mlx5_ib_warn(dev, "post send failed, err %d\n", err);
- } else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "reg umr failed (%u)\n",
- umr_context.status);
- err = -EFAULT;
- }
- }
+ err = mlx5_ib_post_send_wait(dev, &umrwr);
- up(&umrc->sem);
- if (flags & IB_MR_REREG_TRANS) {
- dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
- kfree(mr_pas);
- }
return err;
}
@@ -1364,6 +1236,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
int page_shift = 0;
+ int upd_flags = 0;
int npages = 0;
int ncont = 0;
int order = 0;
@@ -1372,6 +1245,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
+ atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+
if (flags != IB_MR_REREG_PD) {
/*
* Replace umem. This needs to be done whether or not UMR is
@@ -1382,7 +1257,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err < 0) {
- mr->umem = NULL;
+ clean_mr(mr);
return err;
}
}
@@ -1414,32 +1289,37 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
/*
* Send a UMR WQE
*/
- err = rereg_umr(pd, mr, addr, len, npages, page_shift,
- order, access_flags, flags);
+ mr->ibmr.pd = pd;
+ mr->access_flags = access_flags;
+ mr->mmkey.iova = addr;
+ mr->mmkey.size = len;
+ mr->mmkey.pd = to_mpd(pd)->pdn;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ upd_flags = MLX5_IB_UPD_XLT_ADDR;
+ if (flags & IB_MR_REREG_PD)
+ upd_flags |= MLX5_IB_UPD_XLT_PD;
+ if (flags & IB_MR_REREG_ACCESS)
+ upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
+ err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
+ upd_flags);
+ } else {
+ err = rereg_umr(pd, mr, access_flags, flags);
+ }
+
if (err) {
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
+ ib_umem_release(mr->umem);
+ clean_mr(mr);
return err;
}
}
- if (flags & IB_MR_REREG_PD) {
- ib_mr->pd = pd;
- mr->mmkey.pd = to_mpd(pd)->pdn;
- }
+ set_mr_fileds(dev, mr, npages, len, access_flags);
- if (flags & IB_MR_REREG_ACCESS)
- mr->access_flags = access_flags;
-
- if (flags & IB_MR_REREG_TRANS) {
- atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
- set_mr_fileds(dev, mr, npages, len, access_flags);
- mr->mmkey.iova = addr;
- mr->mmkey.size = len;
- }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
update_odp_mr(mr);
#endif
-
return 0;
}
@@ -1603,11 +1483,11 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
err = mlx5_alloc_priv_descs(pd->device, mr,
- ndescs, sizeof(u64));
+ ndescs, sizeof(struct mlx5_mtt));
if (err)
goto err_free_in;
- mr->desc_size = sizeof(u64);
+ mr->desc_size = sizeof(struct mlx5_mtt);
mr->max_descs = ndescs;
} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
@@ -1656,6 +1536,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
if (err)
goto err_destroy_psv;
+ mr->mmkey.type = MLX5_MKEY_MR;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->umem = NULL;
@@ -1736,6 +1617,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
if (err)
goto free;
+ mw->mmkey.type = MLX5_MKEY_MW;
mw->ibmw.rkey = mw->mmkey.key;
resp.response_length = min(offsetof(typeof(resp), response_length) +
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index cacb631a7b0a..e5bc267aca73 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -41,13 +41,12 @@
* a pagefault. */
#define MMU_NOTIFIER_TIMEOUT 1000
-struct workqueue_struct *mlx5_ib_page_fault_wq;
-
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
unsigned long end)
{
struct mlx5_ib_mr *mr;
- const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1;
+ const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
+ sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
int in_block = 0;
u64 addr;
@@ -90,16 +89,21 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
u64 umr_offset = idx & umr_block_mask;
if (in_block && umr_offset == 0) {
- mlx5_ib_update_mtt(mr, blk_start_idx,
- idx - blk_start_idx, 1);
+ mlx5_ib_update_xlt(mr, blk_start_idx,
+ idx - blk_start_idx,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC);
in_block = 0;
}
}
}
if (in_block)
- mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1,
- 1);
-
+ mlx5_ib_update_xlt(mr, blk_start_idx,
+ idx - blk_start_idx + 1,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC);
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -120,6 +124,11 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
caps->general_caps = IB_ODP_SUPPORT;
+ if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ dev->odp_max_size = U64_MAX;
+ else
+ dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
+
if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
@@ -135,6 +144,9 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+
return;
}
@@ -143,46 +155,51 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
{
u32 base_key = mlx5_base_mkey(key);
struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
- struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ struct mlx5_ib_mr *mr;
- if (!mmkey || mmkey->key != key || !mr->live)
+ if (!mmkey || mmkey->key != key || mmkey->type != MLX5_MKEY_MR)
+ return NULL;
+
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+ if (!mr->live)
return NULL;
return container_of(mmkey, struct mlx5_ib_mr, mmkey);
}
-static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault,
+static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault,
int error)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
- u32 qpn = qp->trans_qp.base.mqp.qpn;
+ int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
+ pfault->wqe.wq_num : pfault->token;
int ret = mlx5_core_page_fault_resume(dev->mdev,
- qpn,
- pfault->mpfault.flags,
+ pfault->token,
+ wq_num,
+ pfault->type,
error);
if (ret)
- pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn);
+ mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
+ wq_num);
}
/*
- * Handle a single data segment in a page-fault WQE.
+ * Handle a single data segment in a page-fault WQE or RDMA region.
*
- * Returns number of pages retrieved on success. The caller will continue to
+ * Returns number of pages retrieved on success. The caller may continue to
* the next data segment.
* Can return the following error codes:
* -EAGAIN to designate a temporary error. The caller will abort handling the
* page fault and resolve it.
* -EFAULT when there's an error mapping the requested pages. The caller will
- * abort the page fault handling and possibly move the QP to an error state.
- * On other errors the QP should also be closed with an error.
+ * abort the page fault handling.
*/
-static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault,
+static int pagefault_single_data_segment(struct mlx5_ib_dev *mib_dev,
u32 key, u64 io_virt, size_t bcnt,
+ u32 *bytes_committed,
u32 *bytes_mapped)
{
- struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device);
int srcu_key;
unsigned int current_seq;
u64 start_idx;
@@ -208,12 +225,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
key);
if (bytes_mapped)
*bytes_mapped +=
- (bcnt - pfault->mpfault.bytes_committed);
- goto srcu_unlock;
- }
- if (mr->ibmr.pd != qp->ibqp.pd) {
- pr_err("Page-fault with different PDs for QP and MR.\n");
- ret = -EFAULT;
+ (bcnt - *bytes_committed);
goto srcu_unlock;
}
@@ -229,8 +241,8 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
* in all iterations (in iteration 2 and above,
* bytes_committed == 0).
*/
- io_virt += pfault->mpfault.bytes_committed;
- bcnt -= pfault->mpfault.bytes_committed;
+ io_virt += *bytes_committed;
+ bcnt -= *bytes_committed;
start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
@@ -251,7 +263,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
* this MR, since ib_umem_odp_map_dma_pages already
* checks this.
*/
- ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0);
+ ret = mlx5_ib_update_xlt(mr, start_idx, npages,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ATOMIC);
} else {
ret = -EAGAIN;
}
@@ -287,7 +301,7 @@ srcu_unlock:
}
}
srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
- pfault->mpfault.bytes_committed = 0;
+ *bytes_committed = 0;
return ret ? ret : npages;
}
@@ -309,8 +323,9 @@ srcu_unlock:
* Returns the number of pages loaded if positive, zero for an empty WQE, or a
* negative error code.
*/
-static int pagefault_data_segments(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault, void *wqe,
+static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault,
+ struct mlx5_ib_qp *qp, void *wqe,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, int receive_queue)
{
@@ -354,22 +369,23 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
if (!inline_segment && total_wqe_bytes) {
*total_wqe_bytes += bcnt - min_t(size_t, bcnt,
- pfault->mpfault.bytes_committed);
+ pfault->bytes_committed);
}
/* A zero length data segment designates a length of 2GB. */
if (bcnt == 0)
bcnt = 1U << 31;
- if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) {
- pfault->mpfault.bytes_committed -=
+ if (inline_segment || bcnt <= pfault->bytes_committed) {
+ pfault->bytes_committed -=
min_t(size_t, bcnt,
- pfault->mpfault.bytes_committed);
+ pfault->bytes_committed);
continue;
}
- ret = pagefault_single_data_segment(qp, pfault, key, io_virt,
- bcnt, bytes_mapped);
+ ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
+ &pfault->bytes_committed,
+ bytes_mapped);
if (ret < 0)
break;
npages += ret;
@@ -378,17 +394,29 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
return ret < 0 ? ret : npages;
}
+static const u32 mlx5_ib_odp_opcode_cap[] = {
+ [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
+ [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
+ [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
+ [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
+ [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
+ [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
+ [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
+ [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
+};
+
/*
* Parse initiator WQE. Advances the wqe pointer to point at the
* scatter-gather list, and set wqe_end to the end of the WQE.
*/
static int mlx5_ib_mr_initiator_pfault_handler(
- struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
- void **wqe, void **wqe_end, int wqe_length)
+ struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
+ struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
- u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ u16 wqe_index = pfault->wqe.wqe_index;
+ u32 transport_caps;
+ struct mlx5_base_av *av;
unsigned ds, opcode;
#if defined(DEBUG)
u32 ctrl_wqe_index, ctrl_qpn;
@@ -434,53 +462,49 @@ static int mlx5_ib_mr_initiator_pfault_handler(
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK;
+
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
- switch (opcode) {
- case MLX5_OPCODE_SEND:
- case MLX5_OPCODE_SEND_IMM:
- case MLX5_OPCODE_SEND_INVAL:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_SEND))
- goto invalid_transport_or_opcode;
- break;
- case MLX5_OPCODE_RDMA_WRITE:
- case MLX5_OPCODE_RDMA_WRITE_IMM:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_WRITE))
- goto invalid_transport_or_opcode;
- *wqe += sizeof(struct mlx5_wqe_raddr_seg);
- break;
- case MLX5_OPCODE_RDMA_READ:
- if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
- IB_ODP_SUPPORT_READ))
- goto invalid_transport_or_opcode;
- *wqe += sizeof(struct mlx5_wqe_raddr_seg);
- break;
- default:
- goto invalid_transport_or_opcode;
- }
+ transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
break;
case IB_QPT_UD:
- switch (opcode) {
- case MLX5_OPCODE_SEND:
- case MLX5_OPCODE_SEND_IMM:
- if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
- IB_ODP_SUPPORT_SEND))
- goto invalid_transport_or_opcode;
- *wqe += sizeof(struct mlx5_wqe_datagram_seg);
- break;
- default:
- goto invalid_transport_or_opcode;
- }
+ transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
break;
default:
-invalid_transport_or_opcode:
- mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
- qp->ibqp.qp_type, opcode);
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
+ qp->ibqp.qp_type);
return -EFAULT;
}
+ if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) /
+ sizeof(mlx5_ib_odp_opcode_cap[0]) ||
+ !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
+ opcode);
+ return -EFAULT;
+ }
+
+ if (qp->ibqp.qp_type != IB_QPT_RC) {
+ av = *wqe;
+ if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
+ *wqe += sizeof(struct mlx5_av);
+ else
+ *wqe += sizeof(struct mlx5_base_av);
+ }
+
+ switch (opcode) {
+ case MLX5_OPCODE_RDMA_WRITE:
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ case MLX5_OPCODE_RDMA_READ:
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ case MLX5_OPCODE_ATOMIC_CS:
+ case MLX5_OPCODE_ATOMIC_FA:
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ *wqe += sizeof(struct mlx5_wqe_atomic_seg);
+ break;
+ }
+
return 0;
}
@@ -489,10 +513,9 @@ invalid_transport_or_opcode:
* scatter-gather list, and set wqe_end to the end of the WQE.
*/
static int mlx5_ib_mr_responder_pfault_handler(
- struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
- void **wqe, void **wqe_end, int wqe_length)
+ struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
+ struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
struct mlx5_ib_wq *wq = &qp->rq;
int wqe_size = 1 << wq->wqe_shift;
@@ -529,70 +552,83 @@ invalid_transport_or_opcode:
return 0;
}
-static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault)
+static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
+ u32 wq_num)
+{
+ struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
+
+ if (!mqp) {
+ mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
+ return NULL;
+ }
+
+ return to_mibqp(mqp);
+}
+
+static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
int ret;
void *wqe, *wqe_end;
u32 bytes_mapped, total_wqe_bytes;
char *buffer = NULL;
- int resume_with_error = 0;
- u16 wqe_index = pfault->mpfault.wqe.wqe_index;
- int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
- u32 qpn = qp->trans_qp.base.mqp.qpn;
+ int resume_with_error = 1;
+ u16 wqe_index = pfault->wqe.wqe_index;
+ int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
+ struct mlx5_ib_qp *qp;
buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer) {
mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
- resume_with_error = 1;
goto resolve_page_fault;
}
+ qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
+ if (!qp)
+ goto resolve_page_fault;
+
ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
PAGE_SIZE, &qp->trans_qp.base);
if (ret < 0) {
- mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
- -ret, wqe_index, qpn);
- resume_with_error = 1;
+ mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
+ ret, wqe_index, pfault->token);
goto resolve_page_fault;
}
wqe = buffer;
if (requestor)
- ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe,
+ ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
&wqe_end, ret);
else
- ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe,
+ ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
&wqe_end, ret);
- if (ret < 0) {
- resume_with_error = 1;
+ if (ret < 0)
goto resolve_page_fault;
- }
if (wqe >= wqe_end) {
mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
- resume_with_error = 1;
goto resolve_page_fault;
}
- ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped,
- &total_wqe_bytes, !requestor);
+ ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
+ &bytes_mapped, &total_wqe_bytes,
+ !requestor);
if (ret == -EAGAIN) {
+ resume_with_error = 0;
goto resolve_page_fault;
} else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
- mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n",
- -ret);
- resume_with_error = 1;
+ if (ret != -ENOENT)
+ mlx5_ib_err(dev, "Error getting user pages for page fault. Error: %d\n",
+ ret);
goto resolve_page_fault;
}
+ resume_with_error = 0;
resolve_page_fault:
- mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
- mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
- qpn, resume_with_error,
- pfault->mpfault.flags);
-
+ mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
+ pfault->token, resume_with_error,
+ pfault->type);
free_page((unsigned long)buffer);
}
@@ -602,15 +638,14 @@ static int pages_in_range(u64 address, u32 length)
(address & PAGE_MASK)) >> PAGE_SHIFT;
}
-static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault)
+static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
{
- struct mlx5_pagefault *mpfault = &pfault->mpfault;
u64 address;
u32 length;
- u32 prefetch_len = mpfault->bytes_committed;
+ u32 prefetch_len = pfault->bytes_committed;
int prefetch_activated = 0;
- u32 rkey = mpfault->rdma.r_key;
+ u32 rkey = pfault->rdma.r_key;
int ret;
/* The RDMA responder handler handles the page fault in two parts.
@@ -619,38 +654,40 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
* prefetches more pages. The second operation cannot use the pfault
* context and therefore uses the dummy_pfault context allocated on
* the stack */
- struct mlx5_ib_pfault dummy_pfault = {};
-
- dummy_pfault.mpfault.bytes_committed = 0;
+ pfault->rdma.rdma_va += pfault->bytes_committed;
+ pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
+ pfault->rdma.rdma_op_len);
+ pfault->bytes_committed = 0;
- mpfault->rdma.rdma_va += mpfault->bytes_committed;
- mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed,
- mpfault->rdma.rdma_op_len);
- mpfault->bytes_committed = 0;
-
- address = mpfault->rdma.rdma_va;
- length = mpfault->rdma.rdma_op_len;
+ address = pfault->rdma.rdma_va;
+ length = pfault->rdma.rdma_op_len;
/* For some operations, the hardware cannot tell the exact message
* length, and in those cases it reports zero. Use prefetch
* logic. */
if (length == 0) {
prefetch_activated = 1;
- length = mpfault->rdma.packet_size;
+ length = pfault->rdma.packet_size;
prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
}
- ret = pagefault_single_data_segment(qp, pfault, rkey, address, length,
- NULL);
+ ret = pagefault_single_data_segment(dev, rkey, address, length,
+ &pfault->bytes_committed, NULL);
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
} else if (ret < 0 || pages_in_range(address, length) > ret) {
- mlx5_ib_page_fault_resume(qp, pfault, 1);
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
+ if (ret != -ENOENT)
+ mlx5_ib_warn(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
+ ret, pfault->token, pfault->type);
return;
}
- mlx5_ib_page_fault_resume(qp, pfault, 0);
+ mlx5_ib_page_fault_resume(dev, pfault, 0);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
+ pfault->token, pfault->type,
+ prefetch_activated);
/* At this point, there might be a new pagefault already arriving in
* the eq, switch to the dummy pagefault for the rest of the
@@ -658,112 +695,39 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
* work-queue is being fenced. */
if (prefetch_activated) {
- ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey,
- address,
+ u32 bytes_committed = 0;
+
+ ret = pagefault_single_data_segment(dev, rkey, address,
prefetch_len,
- NULL);
+ &bytes_committed, NULL);
if (ret < 0) {
- pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
- ret, prefetch_activated,
- qp->ibqp.qp_num, address, prefetch_len);
+ mlx5_ib_warn(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
+ ret, pfault->token, address,
+ prefetch_len);
}
}
}
-void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
- struct mlx5_ib_pfault *pfault)
+void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
+ struct mlx5_pagefault *pfault)
{
- u8 event_subtype = pfault->mpfault.event_subtype;
+ struct mlx5_ib_dev *dev = context;
+ u8 event_subtype = pfault->event_subtype;
switch (event_subtype) {
case MLX5_PFAULT_SUBTYPE_WQE:
- mlx5_ib_mr_wqe_pfault_handler(qp, pfault);
+ mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
break;
case MLX5_PFAULT_SUBTYPE_RDMA:
- mlx5_ib_mr_rdma_pfault_handler(qp, pfault);
+ mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
break;
default:
- pr_warn("Invalid page fault event subtype: 0x%x\n",
- event_subtype);
- mlx5_ib_page_fault_resume(qp, pfault, 1);
- break;
+ mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
+ event_subtype);
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
}
}
-static void mlx5_ib_qp_pfault_action(struct work_struct *work)
-{
- struct mlx5_ib_pfault *pfault = container_of(work,
- struct mlx5_ib_pfault,
- work);
- enum mlx5_ib_pagefault_context context =
- mlx5_ib_get_pagefault_context(&pfault->mpfault);
- struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
- pagefaults[context]);
- mlx5_ib_mr_pfault_handler(qp, pfault);
-}
-
-void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
- qp->disable_page_faults = 1;
- spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
-
- /*
- * Note that at this point, we are guarenteed that no more
- * work queue elements will be posted to the work queue with
- * the QP we are closing.
- */
- flush_workqueue(mlx5_ib_page_fault_wq);
-}
-
-void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
- qp->disable_page_faults = 0;
- spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
-}
-
-static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
- struct mlx5_pagefault *pfault)
-{
- /*
- * Note that we will only get one fault event per QP per context
- * (responder/initiator, read/write), until we resolve the page fault
- * with the mlx5_ib_page_fault_resume command. Since this function is
- * called from within the work element, there is no risk of missing
- * events.
- */
- struct mlx5_ib_qp *mibqp = to_mibqp(qp);
- enum mlx5_ib_pagefault_context context =
- mlx5_ib_get_pagefault_context(pfault);
- struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
-
- qp_pfault->mpfault = *pfault;
-
- /* No need to stop interrupts here since we are in an interrupt */
- spin_lock(&mibqp->disable_page_faults_lock);
- if (!mibqp->disable_page_faults)
- queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
- spin_unlock(&mibqp->disable_page_faults_lock);
-}
-
-void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
-{
- int i;
-
- qp->disable_page_faults = 1;
- spin_lock_init(&qp->disable_page_faults_lock);
-
- qp->trans_qp.base.mqp.pfault_handler = mlx5_ib_pfault_handler;
-
- for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
- INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
-}
-
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
{
int ret;
@@ -780,17 +744,3 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
cleanup_srcu_struct(&ibdev->mr_srcu);
}
-int __init mlx5_ib_odp_init(void)
-{
- mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults",
- WQ_MEM_RECLAIM);
- if (!mlx5_ib_page_fault_wq)
- return -ENOMEM;
-
- return 0;
-}
-
-void mlx5_ib_odp_cleanup(void)
-{
- destroy_workqueue(mlx5_ib_page_fault_wq);
-}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a1b3125f0a6e..e31bf11ae64f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -475,60 +475,53 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
-static int first_med_uuar(void)
+static int first_med_bfreg(void)
{
return 1;
}
-static int next_uuar(int n)
-{
- n++;
-
- while (((n % 4) & 2))
- n++;
+enum {
+ /* this is the first blue flame register in the array of bfregs assigned
+ * to a processes. Since we do not use it for blue flame but rather
+ * regular 64 bit doorbells, we do not need a lock for maintaiing
+ * "odd/even" order
+ */
+ NUM_NON_BLUE_FLAME_BFREGS = 1,
+};
- return n;
+static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
+{
+ return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
}
-static int num_med_uuar(struct mlx5_uuar_info *uuari)
+static int num_med_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
int n;
- n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
- uuari->num_low_latency_uuars - 1;
+ n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
+ NUM_NON_BLUE_FLAME_BFREGS;
return n >= 0 ? n : 0;
}
-static int max_uuari(struct mlx5_uuar_info *uuari)
-{
- return uuari->num_uars * 4;
-}
-
-static int first_hi_uuar(struct mlx5_uuar_info *uuari)
+static int first_hi_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
int med;
- int i;
- int t;
-
- med = num_med_uuar(uuari);
- for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
- t++;
- if (t == med)
- return next_uuar(i);
- }
- return 0;
+ med = num_med_bfreg(dev, bfregi);
+ return ++med;
}
-static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
+static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
int i;
- for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
- if (!test_bit(i, uuari->bitmap)) {
- set_bit(i, uuari->bitmap);
- uuari->count[i]++;
+ for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
+ if (!bfregi->count[i]) {
+ bfregi->count[i]++;
return i;
}
}
@@ -536,87 +529,61 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
return -ENOMEM;
}
-static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
+static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- int minidx = first_med_uuar();
+ int minidx = first_med_bfreg();
int i;
- for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
- if (uuari->count[i] < uuari->count[minidx])
+ for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
+ if (bfregi->count[i] < bfregi->count[minidx])
minidx = i;
+ if (!bfregi->count[minidx])
+ break;
}
- uuari->count[minidx]++;
+ bfregi->count[minidx]++;
return minidx;
}
-static int alloc_uuar(struct mlx5_uuar_info *uuari,
- enum mlx5_ib_latency_class lat)
+static int alloc_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi,
+ enum mlx5_ib_latency_class lat)
{
- int uuarn = -EINVAL;
+ int bfregn = -EINVAL;
- mutex_lock(&uuari->lock);
+ mutex_lock(&bfregi->lock);
switch (lat) {
case MLX5_IB_LATENCY_CLASS_LOW:
- uuarn = 0;
- uuari->count[uuarn]++;
+ BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
+ bfregn = 0;
+ bfregi->count[bfregn]++;
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
- if (uuari->ver < 2)
- uuarn = -ENOMEM;
+ if (bfregi->ver < 2)
+ bfregn = -ENOMEM;
else
- uuarn = alloc_med_class_uuar(uuari);
+ bfregn = alloc_med_class_bfreg(dev, bfregi);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
- if (uuari->ver < 2)
- uuarn = -ENOMEM;
+ if (bfregi->ver < 2)
+ bfregn = -ENOMEM;
else
- uuarn = alloc_high_class_uuar(uuari);
- break;
-
- case MLX5_IB_LATENCY_CLASS_FAST_PATH:
- uuarn = 2;
+ bfregn = alloc_high_class_bfreg(dev, bfregi);
break;
}
- mutex_unlock(&uuari->lock);
-
- return uuarn;
-}
+ mutex_unlock(&bfregi->lock);
-static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
-{
- clear_bit(uuarn, uuari->bitmap);
- --uuari->count[uuarn];
-}
-
-static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
-{
- clear_bit(uuarn, uuari->bitmap);
- --uuari->count[uuarn];
+ return bfregn;
}
-static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
+static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{
- int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
- int high_uuar = nuuars - uuari->num_low_latency_uuars;
-
- mutex_lock(&uuari->lock);
- if (uuarn == 0) {
- --uuari->count[uuarn];
- goto out;
- }
-
- if (uuarn < high_uuar) {
- free_med_class_uuar(uuari, uuarn);
- goto out;
- }
-
- free_high_class_uuar(uuari, uuarn);
-
-out:
- mutex_unlock(&uuari->lock);
+ mutex_lock(&bfregi->lock);
+ bfregi->count[bfregn]--;
+ mutex_unlock(&bfregi->lock);
}
static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
@@ -657,9 +624,20 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
-static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
+static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, int bfregn)
{
- return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
+ int bfregs_per_sys_page;
+ int index_of_sys_page;
+ int offset;
+
+ bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
+ MLX5_NON_FP_BFREGS_PER_UAR;
+ index_of_sys_page = bfregn / bfregs_per_sys_page;
+
+ offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+
+ return bfregi->sys_pages[index_of_sys_page] + offset;
}
static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
@@ -762,6 +740,13 @@ err_umem:
return err;
}
+static int adjust_bfregn(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, int bfregn)
+{
+ return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
+ bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
+}
+
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct ib_qp_init_attr *attr,
@@ -776,7 +761,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
int uar_index;
int npages;
u32 offset = 0;
- int uuarn;
+ int bfregn;
int ncont = 0;
__be64 *pas;
void *qpc;
@@ -794,27 +779,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
*/
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
/* In CROSS_CHANNEL CQ and QP must use the same UAR */
- uuarn = MLX5_CROSS_CHANNEL_UUAR;
+ bfregn = MLX5_CROSS_CHANNEL_BFREG;
else {
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
- if (uuarn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
+ bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
+ if (bfregn < 0) {
+ mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
mlx5_ib_dbg(dev, "reverting to medium latency\n");
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
- if (uuarn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
+ bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
+ if (bfregn < 0) {
+ mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
mlx5_ib_dbg(dev, "reverting to high latency\n");
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
- if (uuarn < 0) {
- mlx5_ib_warn(dev, "uuar allocation failed\n");
- return uuarn;
+ bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
+ if (bfregn < 0) {
+ mlx5_ib_warn(dev, "bfreg allocation failed\n");
+ return bfregn;
}
}
}
}
- uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
- mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
+ mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -822,7 +807,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = set_user_buf_size(dev, qp, &ucmd, base, attr);
if (err)
- goto err_uuar;
+ goto err_bfreg;
if (ucmd.buf_addr && ubuffer->buf_size) {
ubuffer->buf_addr = ucmd.buf_addr;
@@ -831,7 +816,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
&ubuffer->umem, &npages, &page_shift,
&ncont, &offset);
if (err)
- goto err_uuar;
+ goto err_bfreg;
} else {
ubuffer->umem = NULL;
}
@@ -854,8 +839,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, page_offset, offset);
MLX5_SET(qpc, qpc, uar_page, uar_index);
- resp->uuar_index = uuarn;
- qp->uuarn = uuarn;
+ resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ qp->bfregn = bfregn;
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
if (err) {
@@ -882,13 +867,13 @@ err_umem:
if (ubuffer->umem)
ib_umem_release(ubuffer->umem);
-err_uuar:
- free_uuar(&context->uuari, uuarn);
+err_bfreg:
+ free_bfreg(dev, &context->bfregi, bfregn);
return err;
}
-static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
- struct mlx5_ib_qp_base *base)
+static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
{
struct mlx5_ib_ucontext *context;
@@ -896,7 +881,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
- free_uuar(&context->uuari, qp->uuarn);
+ free_bfreg(dev, &context->bfregi, qp->bfregn);
}
static int create_kernel_qp(struct mlx5_ib_dev *dev,
@@ -905,14 +890,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
u32 **in, int *inlen,
struct mlx5_ib_qp_base *base)
{
- enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
- struct mlx5_uuar_info *uuari;
int uar_index;
void *qpc;
- int uuarn;
int err;
- uuari = &dev->mdev->priv.uuari;
if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO |
@@ -920,21 +901,17 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
- lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
-
- uuarn = alloc_uuar(uuari, lc);
- if (uuarn < 0) {
- mlx5_ib_dbg(dev, "\n");
- return -ENOMEM;
- }
+ qp->bf.bfreg = &dev->fp_bfreg;
+ else
+ qp->bf.bfreg = &dev->bfreg;
- qp->bf = &uuari->bfs[uuarn];
- uar_index = qp->bf->uar->index;
+ qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+ uar_index = qp->bf.bfreg->index;
err = calc_sq_size(dev, init_attr, qp);
if (err < 0) {
mlx5_ib_dbg(dev, "err %d\n", err);
- goto err_uuar;
+ return err;
}
qp->rq.offset = 0;
@@ -944,7 +921,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
- goto err_uuar;
+ return err;
}
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
@@ -994,34 +971,30 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
return 0;
err_wrid:
- mlx5_db_free(dev->mdev, &qp->db);
kfree(qp->sq.wqe_head);
kfree(qp->sq.w_list);
kfree(qp->sq.wrid);
kfree(qp->sq.wr_data);
kfree(qp->rq.wrid);
+ mlx5_db_free(dev->mdev, &qp->db);
err_free:
kvfree(*in);
err_buf:
mlx5_buf_free(dev->mdev, &qp->buf);
-
-err_uuar:
- free_uuar(&dev->mdev->priv.uuari, uuarn);
return err;
}
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
- mlx5_db_free(dev->mdev, &qp->db);
kfree(qp->sq.wqe_head);
kfree(qp->sq.w_list);
kfree(qp->sq.wrid);
kfree(qp->sq.wr_data);
kfree(qp->rq.wrid);
+ mlx5_db_free(dev->mdev, &qp->db);
mlx5_buf_free(dev->mdev, &qp->buf);
- free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
}
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -1353,7 +1326,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (init_attr->create_flags || init_attr->send_cq)
return -EINVAL;
- min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
+ min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
if (udata->outlen < min_resp_len)
return -EINVAL;
@@ -1526,9 +1499,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
- if (init_attr->qp_type != IB_QPT_RAW_PACKET)
- mlx5_ib_odp_create_qp(qp);
-
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -1795,7 +1765,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err_create:
if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(pd, qp, base);
+ destroy_qp_user(dev, pd, qp, base);
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
@@ -1923,7 +1893,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_qp_disable_pagefaults(qp);
err = mlx5_core_qp_modify(dev->mdev,
MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp);
@@ -1974,7 +1943,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(&get_pd(qp)->ibpd, qp, base);
+ destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
}
static const char *ib_qp_type_str(enum ib_qp_type type)
@@ -2823,16 +2792,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (mlx5_st < 0)
goto out;
- /* If moving to a reset or error state, we must disable page faults on
- * this QP and flush all current page faults. Otherwise a stale page
- * fault may attempt to work on this QP after it is reset and moved
- * again to RTS, and may cause the driver and the device to get out of
- * sync. */
- if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
- (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) &&
- (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
- mlx5_ib_qp_disable_pagefaults(qp);
-
if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
!optab[mlx5_cur][mlx5_new])
goto out;
@@ -2864,10 +2823,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (err)
goto out;
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT &&
- (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
- mlx5_ib_qp_enable_pagefaults(qp);
-
qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS)
@@ -3029,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
if (wr->opcode == IB_WR_LSO) {
struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
- int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
+ int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start);
u64 left, leftlen, copysz;
void *pdata = ud_wr->header;
left = ud_wr->hlen;
eseg->mss = cpu_to_be16(ud_wr->mss);
- eseg->inline_hdr_sz = cpu_to_be16(left);
+ eseg->inline_hdr.sz = cpu_to_be16(left);
/*
* check if there is space till the end of queue, if yes,
* copy all in one shot, otherwise copy till the end of queue,
* rollback and than the copy the left
*/
- leftlen = qend - (void *)eseg->inline_hdr_start;
+ leftlen = qend - (void *)eseg->inline_hdr.start;
copysz = min_t(u64, leftlen, left);
memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
@@ -3080,9 +3035,10 @@ static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
dseg->addr = cpu_to_be64(sg->addr);
}
-static __be16 get_klm_octo(int npages)
+static u64 get_xlt_octo(u64 bytes)
{
- return cpu_to_be16(ALIGN(npages, 8) / 2);
+ return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
+ MLX5_IB_UMR_OCTOWORD;
}
static __be64 frwr_mkey_mask(void)
@@ -3127,18 +3083,14 @@ static __be64 sig_mkey_mask(void)
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr)
+ struct mlx5_ib_mr *mr)
{
- int ndescs = mr->ndescs;
+ int size = mr->ndescs * mr->desc_size;
memset(umr, 0, sizeof(*umr));
- if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
- /* KLMs take twice the size of MTTs */
- ndescs *= 2;
-
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
- umr->klm_octowords = get_klm_octo(ndescs);
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
umr->mkey_mask = frwr_mkey_mask();
}
@@ -3149,37 +3101,17 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
umr->flags = MLX5_UMR_INLINE;
}
-static __be64 get_umr_reg_mr_mask(int atomic)
+static __be64 get_umr_enable_mr_mask(void)
{
u64 result;
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_PD |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
+ result = MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_FREE;
- if (atomic)
- result |= MLX5_MKEY_MASK_A;
-
return cpu_to_be64(result);
}
-static __be64 get_umr_unreg_mr_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_FREE;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_mtt_mask(void)
+static __be64 get_umr_disable_mr_mask(void)
{
u64 result;
@@ -3194,23 +3126,22 @@ static __be64 get_umr_update_translation_mask(void)
result = MLX5_MKEY_MASK_LEN |
MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_FREE;
+ MLX5_MKEY_MASK_START_ADDR;
return cpu_to_be64(result);
}
-static __be64 get_umr_update_access_mask(void)
+static __be64 get_umr_update_access_mask(int atomic)
{
u64 result;
- result = MLX5_MKEY_MASK_LW |
+ result = MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_FREE;
+ MLX5_MKEY_MASK_RW;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
return cpu_to_be64(result);
}
@@ -3219,9 +3150,7 @@ static __be64 get_umr_update_pd_mask(void)
{
u64 result;
- result = MLX5_MKEY_MASK_PD |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_FREE;
+ result = MLX5_MKEY_MASK_PD;
return cpu_to_be64(result);
}
@@ -3238,24 +3167,24 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
else
umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
- if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
- umr->klm_octowords = get_klm_octo(umrwr->npages);
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
- umr->mkey_mask = get_umr_update_mtt_mask();
- umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
- umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
- }
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
- umr->mkey_mask |= get_umr_update_translation_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS)
- umr->mkey_mask |= get_umr_update_access_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
- umr->mkey_mask |= get_umr_update_pd_mask();
- if (!umr->mkey_mask)
- umr->mkey_mask = get_umr_reg_mr_mask(atomic);
- } else {
- umr->mkey_mask = get_umr_unreg_mr_mask();
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
+ u64 offset = get_xlt_octo(umrwr->offset);
+
+ umr->xlt_offset = cpu_to_be16(offset & 0xffff);
+ umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
+ umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+ }
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+ umr->mkey_mask |= get_umr_update_translation_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
+ umr->mkey_mask |= get_umr_update_access_mask(atomic);
+ umr->mkey_mask |= get_umr_update_pd_mask();
}
+ if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
+ umr->mkey_mask |= get_umr_enable_mr_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+ umr->mkey_mask |= get_umr_disable_mr_mask();
if (!wr->num_sge)
umr->flags |= MLX5_UMR_INLINE;
@@ -3303,17 +3232,17 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(seg, 0, sizeof(*seg));
- if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
seg->status = MLX5_MKEY_STATUS_FREE;
- return;
- }
seg->flags = convert_access(umrwr->access_flags);
- if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
- if (umrwr->pd)
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
- seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
- }
+ if (umrwr->pd)
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
+ !umrwr->length)
+ seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
+
+ seg->start_addr = cpu_to_be64(umrwr->virt_addr);
seg->len = cpu_to_be64(umrwr->length);
seg->log2_page_size = umrwr->page_shift;
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
@@ -3611,7 +3540,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
}
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_sig_handover_wr *wr, u32 nelements,
+ struct ib_sig_handover_wr *wr, u32 size,
u32 length, u32 pdn)
{
struct ib_mr *sig_mr = wr->sig_mr;
@@ -3626,17 +3555,17 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
MLX5_MKEY_BSF_EN | pdn);
seg->len = cpu_to_be64(length);
- seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
+ seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
}
static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- u32 nelements)
+ u32 size)
{
memset(umr, 0, sizeof(*umr));
umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
- umr->klm_octowords = get_klm_octo(nelements);
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
umr->mkey_mask = sig_mkey_mask();
}
@@ -3648,7 +3577,7 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
u32 pdn = get_pd(qp)->pdn;
- u32 klm_oct_size;
+ u32 xlt_size;
int region_len, ret;
if (unlikely(wr->wr.num_sge != 1) ||
@@ -3670,15 +3599,15 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
* then we use strided block format (3 octowords),
* else we use single KLM (1 octoword)
**/
- klm_oct_size = wr->prot ? 3 : 1;
+ xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm);
- set_sig_umr_segment(*seg, klm_oct_size);
+ set_sig_umr_segment(*seg, xlt_size);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((*seg == qp->sq.qend)))
*seg = mlx5_get_send_wqe(qp, 0);
- set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
+ set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn);
*seg += sizeof(struct mlx5_mkey_seg);
*size += sizeof(struct mlx5_mkey_seg) / 16;
if (unlikely((*seg == qp->sq.qend)))
@@ -3784,24 +3713,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
}
}
-static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
- unsigned bytecnt, struct mlx5_ib_qp *qp)
-{
- while (bytecnt > 0) {
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- __iowrite64_copy(dst++, src++, 8);
- bytecnt -= 64;
- if (unlikely(src == qp->sq.qend))
- src = mlx5_get_send_wqe(qp, 0);
- }
-}
-
static u8 get_fence(u8 fence, struct ib_send_wr *wr)
{
if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
@@ -3897,7 +3808,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
qp = to_mqp(ibqp);
- bf = qp->bf;
+ bf = &qp->bf;
qend = qp->sq.qend;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -4170,28 +4081,13 @@ out:
* we hit doorbell */
wmb();
- if (bf->need_lock)
- spin_lock(&bf->lock);
- else
- __acquire(&bf->lock);
-
- /* TBD enable WC */
- if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
- mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
- /* wc_wmb(); */
- } else {
- mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
- MLX5_GET_DOORBELL_LOCK(&bf->lock32));
- /* Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- mmiowb();
- }
+ /* currently we support only regular doorbells */
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
+ /* Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ mmiowb();
bf->offset ^= bf->buf_size;
- if (bf->need_lock)
- spin_unlock(&bf->lock);
- else
- __release(&bf->lock);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -4559,14 +4455,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
qp_init_attr);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- /*
- * Wait for any outstanding page faults, in case the user frees memory
- * based upon this query's result.
- */
- flush_workqueue(mlx5_ib_page_fault_wq);
-#endif
-
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index aff9fb14768b..5a31f3c6a421 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096;
-
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
props->lmc = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09a8217..3ac8aa5ef37d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
return 0;
}
-void qedr_unaffiliated_event(void *context,
- u8 event_code)
+void qedr_unaffiliated_event(void *context, u8 event_code)
{
pr_err("unaffiliated event not implemented yet\n");
}
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
goto sysfs_err;
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
ib_dealloc_device(&dev->ibdev);
}
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
{
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
- return 0;
+ if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
}
static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
qedr_remove(dev);
}
+static void qedr_open(struct qedr_dev *dev)
+{
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
static void qedr_mac_address_change(struct qedr_dev *dev)
{
union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
if (rc)
DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
{
switch (event) {
case QEDE_UP:
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ qedr_open(dev);
break;
case QEDE_DOWN:
qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd7d4fb..bb32e4792ec9 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
struct qed_rdma_events events;
};
+#define QEDR_ENET_STATE_BIT (0)
+
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
+
+ unsigned long enet_state;
};
#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
#define QEDR_MAX_PORT (1)
+#define QEDR_PORT (1)
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
@@ -251,9 +256,6 @@ struct qedr_cq {
u16 icid;
- /* Lock to protect completion handler */
- spinlock_t comp_handler_lock;
-
/* Lock to protect multiplem CQ's */
spinlock_t cq_lock;
u8 arm_flags;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890ebb72bd..a9a8d8745d2e 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
qedr_inc_sw_gsi_cons(&qp->sq);
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (cq->ibcq.comp_handler) {
- spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
- }
}
void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (cq->ibcq.comp_handler) {
- spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
- }
}
static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
}
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
- else
packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+ else
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 57c8de208077..c7d6c9a783bd 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- struct qedr_ucontext *uctx = NULL;
- struct qedr_alloc_pd_uresp uresp;
struct qedr_pd *pd;
u16 pd_id;
int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (!pd)
return ERR_PTR(-ENOMEM);
- dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ if (rc)
+ goto err;
- uresp.pd_id = pd_id;
pd->pd_id = pd_id;
if (udata && context) {
+ struct qedr_alloc_pd_uresp uresp;
+
+ uresp.pd_id = pd_id;
+
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
- if (rc)
+ if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
- uctx = get_qedr_ucontext(context);
- uctx->pd = pd;
- pd->uctx = uctx;
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+ goto err;
+ }
+
+ pd->uctx = get_qedr_ucontext(context);
+ pd->uctx->pd = pd;
}
return &pd->ibpd;
+
+err:
+ kfree(pd);
+ return ERR_PTR(rc);
}
int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
return ERR_PTR(-EFAULT);
}
-enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
{
switch (qp_state) {
case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
return IB_QPS_ERR;
}
-enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+ enum ib_qp_state qp_state)
{
switch (qp_state) {
case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
int status = 0;
if (new_state == qp->state)
- return 1;
+ return 0;
switch (qp->state) {
case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* ERR->XXX */
switch (new_state) {
case QED_ROCE_QP_STATE_RESET:
+ if ((qp->rq.prod != qp->rq.cons) ||
+ (qp->sq.prod != qp->sq.cons)) {
+ DP_NOTICE(dev,
+ "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+ qp->rq.prod, qp->rq.cons, qp->sq.prod,
+ qp->sq.cons);
+ status = -EINVAL;
+ }
break;
default:
status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
qp_params.remote_mac_addr);
-;
qp_params.mtu = qp->mtu;
qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->qp_state = qedr_get_ibqp_state(params.state);
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
- qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+ qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
qp_attr->path_mig_state = IB_MIG_MIGRATED;
qp_attr->rq_psn = params.rq_psn;
qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
return rc;
}
-struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+ int max_page_list_len)
{
struct qedr_pd *pd = get_qedr_pd(ibpd);
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
return 0;
}
-enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
{
switch (opcode) {
case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
-inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
{
int wq_is_full, err_wr, pbl_is_full;
struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
return true;
}
-int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
IB_WC_SUCCESS, 0);
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ if (qp->state != QED_ROCE_QP_STATE_ERR)
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1);
break;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 80ef3f8998c8..04443242e258 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -80,7 +80,7 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
left = PAGE_SIZE;
mutex_lock(&us_ibdev->usdev_lock);
- if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
+ if (kref_read(&us_ibdev->vf_cnt) > 0) {
char *busname;
/*
@@ -99,7 +99,7 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
PCI_FUNC(us_ibdev->pdev->devfn),
netdev_name(us_ibdev->netdev),
us_ibdev->ufdev->mac,
- atomic_read(&us_ibdev->vf_cnt.refcount));
+ kref_read(&us_ibdev->vf_cnt));
UPDATE_PTR_LEFT(n, ptr, left);
for (res_type = USNIC_VNIC_RES_TYPE_EOL;
@@ -147,7 +147,7 @@ usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
return scnprintf(buf, PAGE_SIZE, "%u\n",
- atomic_read(&us_ibdev->vf_cnt.refcount));
+ kref_read(&us_ibdev->vf_cnt));
}
static ssize_t
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 74819a7951e2..69df8e353123 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -291,11 +291,11 @@ int usnic_ib_query_device(struct ib_device *ibdev,
qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
props->max_qp = qp_per_vf *
- atomic_read(&us_ibdev->vf_cnt.refcount);
+ kref_read(&us_ibdev->vf_cnt);
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
- atomic_read(&us_ibdev->vf_cnt.refcount);
+ kref_read(&us_ibdev->vf_cnt);
props->max_pd = USNIC_UIOM_MAX_PD_CNT;
props->max_mr = USNIC_UIOM_MAX_MR_CNT;
props->local_ca_ack_delay = 0;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 231a1ce1f4be..bd8fbd3d2032 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "failed to allocate interrupts\n");
ret = -ENOMEM;
- goto err_netdevice;
+ goto err_free_cq_ring;
}
/* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
err_free_intrs:
pvrdma_free_irq(dev);
pvrdma_disable_msi_all(dev);
-err_netdevice:
- unregister_netdevice_notifier(&dev->nb_netdev);
err_free_cq_ring:
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
err_free_async_ring:
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 54891370d18a..c2aa52638dcb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
- struct pvrdma_alloc_ucontext_resp uresp;
+ struct pvrdma_alloc_ucontext_resp uresp = {0};
int ret;
void *ptr;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index d0faca294006..86a6585b847d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
case RXE_MEM_TYPE_MR:
case RXE_MEM_TYPE_FMR:
- return ((iova < mem->iova) ||
- ((iova + length) > (mem->iova + mem->length))) ?
- -EFAULT : 0;
+ if (iova < mem->iova ||
+ length > mem->length ||
+ iova > mem->iova + mem->length - length)
+ return -EFAULT;
+ return 0;
default:
return -EFAULT;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 342e78163613..d9d15561eb5d 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -118,7 +118,7 @@ static struct device *dma_device(struct rxe_dev *rxe)
ndev = rxe->ndev;
- if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
return ndev->dev.parent;
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
}
spin_lock_bh(&dev_list_lock);
- list_add_tail(&rxe_dev_list, &rxe->list);
+ list_add_tail(&rxe->list, &rxe_dev_list);
spin_unlock_bh(&dev_list_lock);
return rxe;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 486d576e55bc..44b2108253bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
del_timer_sync(&qp->rnr_nak_timer);
rxe_cleanup_task(&qp->req.task);
- if (qp_type(qp) == IB_QPT_RC)
- rxe_cleanup_task(&qp->comp.task);
+ rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
__rxe_do_task(&qp->req.task);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3435efff8799..5bcf07328972 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
goto err2;
}
- resid = mtu;
+ qp->resp.resid = mtu;
} else {
if (pktlen != resid) {
state = RESPST_ERR_LENGTH;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9104e6b8cac9..30a6985909e0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
SHOST_DIX_GUARD_CRC);
}
- /*
- * Limit the sg_tablesize and max_sectors based on the device
- * max fastreg page list length.
- */
- shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
- ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-
if (iscsi_host_add(shost,
ib_conn->device->ib_device->dma_device)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+ iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+ iser_conn, shost->sg_tablesize,
+ shost->max_sectors);
+
if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds);
@@ -997,6 +994,7 @@ static struct scsi_host_template iscsi_iser_sht = {
.change_queue_depth = scsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
.cmd_per_lun = ISER_DEF_CMD_PER_LUN,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c5ddb5..9d0b22ad58c1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -496,7 +496,6 @@ struct ib_conn {
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
- unsigned int scsi_max_sectors;
bool snd_w_inv;
};
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8ae7a3beddb7..6a9d1cb548ee 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
device->ib_device->attrs.max_fast_reg_page_list_len);
- if (sg_tablesize > sup_sg_tablesize) {
- sg_tablesize = sup_sg_tablesize;
- iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
- } else {
- iser_conn->scsi_max_sectors = max_sectors;
- }
-
- iser_conn->scsi_sg_tablesize = sg_tablesize;
-
- iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
- iser_conn, iser_conn->scsi_sg_tablesize,
- iser_conn->scsi_max_sectors);
+ iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
}
/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8ddc07123193..36529e390e48 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
struct srp_fr_desc *d;
struct ib_mr *mr;
int i, ret = -EINVAL;
+ enum ib_mr_type mr_type;
if (pool_size <= 0)
goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_list);
+ if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
- mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- max_page_list_len);
+ mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
if (ret == -ENOMEM)
@@ -2864,6 +2869,7 @@ static struct scsi_host_template srp_template = {
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.change_queue_depth = srp_change_queue_depth,
+ .eh_timed_out = srp_timed_out,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
@@ -3694,6 +3700,12 @@ static int __init srp_init_module(void)
indirect_sg_entries = cmd_sg_entries;
}
+ if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+ pr_warn("Clamping indirect_sg_entries to %u\n",
+ SG_MAX_SEGMENTS);
+ indirect_sg_entries = SG_MAX_SEGMENTS;
+ }
+
srp_remove_wq = create_workqueue("srp_remove");
if (!srp_remove_wq) {
ret = -ENOMEM;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 92595b98e7ed..022be0e22eba 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev)
return -EINVAL;
}
- if (test_bit(ABS_MT_SLOT, dev->absbit)) {
- nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
- error = input_mt_init_slots(dev, nslot, 0);
- if (error)
+ if (test_bit(EV_ABS, dev->evbit)) {
+ input_alloc_absinfo(dev);
+ if (!dev->absinfo) {
+ error = -EINVAL;
goto fail1;
- } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
- input_set_events_per_packet(dev, 60);
+ }
+
+ if (test_bit(ABS_MT_SLOT, dev->absbit)) {
+ nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
+ error = input_mt_init_slots(dev, nslot, 0);
+ if (error)
+ goto fail1;
+ } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+ input_set_events_per_packet(dev, 60);
+ }
}
if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index fa598f7f4372..1e1d0ad406f2 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1231,6 +1231,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0605", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 8993983e3fe4..bb7762bf2879 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -42,13 +42,19 @@ config RMI4_SMB
config RMI4_F03
bool "RMI4 Function 03 (PS2 Guest)"
depends on RMI4_CORE
- depends on SERIO=y || RMI4_CORE=SERIO
help
Say Y here if you want to add support for RMI4 function 03.
Function 03 provides PS2 guest support for RMI4 devices. This
includes support for TrackPoints on TouchPads.
+config RMI4_F03_SERIO
+ tristate
+ depends on RMI4_CORE
+ depends on RMI4_F03
+ default RMI4_CORE
+ select SERIO
+
config RMI4_2D_SENSOR
bool
depends on RMI4_CORE
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 11447ab1055c..bf5c36e229ba 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
data->enabled = true;
if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
retval = disable_irq_wake(irq);
- if (!retval)
+ if (retval)
dev_warn(&rmi_dev->dev,
"Failed to disable irq for wake: %d\n",
retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
disable_irq(irq);
if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
retval = enable_irq_wake(irq);
- if (!retval)
+ if (retval)
dev_warn(&rmi_dev->dev,
"Failed to enable irq for wake: %d\n",
retval);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 83cf11312fd9..c9d1c91e1887 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata->batt_pdata;
+ wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8ee54d71c7eb..37e204f3d9be 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -352,9 +352,6 @@ config MTK_IOMMU_V1
select IOMMU_API
select MEMORY
select MTK_SMI
- select COMMON_CLK_MT2701_MMSYS
- select COMMON_CLK_MT2701_IMGSYS
- select COMMON_CLK_MT2701_VDECSYS
help
Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
Multimedia Memory Managememt Unit. This option enables remapping of
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 3ef0f42984f2..1b5b8c5361c5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -112,7 +112,7 @@ static struct timer_list queue_timer;
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
-static const struct iommu_ops amd_iommu_ops;
+const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
static int iommu_init_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
int devid;
if (dev->archdata.iommu)
@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
if (devid < 0)
return devid;
+ iommu = amd_iommu_rlookup_table[devid];
+
dev_data = find_dev_data(devid);
if (!dev_data)
return -ENOMEM;
@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
dev->archdata.iommu = dev_data;
- iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
- dev);
+ iommu_device_link(&iommu->iommu, dev);
return 0;
}
@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
static void iommu_uninit_device(struct device *dev)
{
- int devid;
struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
+ iommu = amd_iommu_rlookup_table[devid];
+
dev_data = search_dev_data(devid);
if (!dev_data)
return;
@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
if (dev_data->domain)
detach_device(dev);
- iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
- dev);
+ iommu_device_unlink(&iommu->iommu, dev);
iommu_group_remove_device(dev);
@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return false;
}
-static void amd_iommu_get_dm_regions(struct device *dev,
- struct list_head *head)
+static void amd_iommu_get_resv_regions(struct device *dev,
+ struct list_head *head)
{
+ struct iommu_resv_region *region;
struct unity_map_entry *entry;
int devid;
@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
- struct iommu_dm_region *region;
+ size_t length;
+ int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end)
continue;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
+ length = entry->address_end - entry->address_start;
+ if (entry->prot & IOMMU_PROT_IR)
+ prot |= IOMMU_READ;
+ if (entry->prot & IOMMU_PROT_IW)
+ prot |= IOMMU_WRITE;
+
+ region = iommu_alloc_resv_region(entry->address_start,
+ length, prot,
+ IOMMU_RESV_DIRECT);
if (!region) {
pr_err("Out of memory allocating dm-regions for %s\n",
dev_name(dev));
return;
}
-
- region->start = entry->address_start;
- region->length = entry->address_end - entry->address_start;
- if (entry->prot & IOMMU_PROT_IR)
- region->prot |= IOMMU_READ;
- if (entry->prot & IOMMU_PROT_IW)
- region->prot |= IOMMU_WRITE;
-
list_add_tail(&region->list, head);
}
+
+ region = iommu_alloc_resv_region(MSI_RANGE_START,
+ MSI_RANGE_END - MSI_RANGE_START + 1,
+ 0, IOMMU_RESV_RESERVED);
+ if (!region)
+ return;
+ list_add_tail(&region->list, head);
+
+ region = iommu_alloc_resv_region(HT_RANGE_START,
+ HT_RANGE_END - HT_RANGE_START + 1,
+ 0, IOMMU_RESV_RESERVED);
+ if (!region)
+ return;
+ list_add_tail(&region->list, head);
}
-static void amd_iommu_put_dm_regions(struct device *dev,
+static void amd_iommu_put_resv_regions(struct device *dev,
struct list_head *head)
{
- struct iommu_dm_region *entry, *next;
+ struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
-static void amd_iommu_apply_dm_region(struct device *dev,
+static void amd_iommu_apply_resv_region(struct device *dev,
struct iommu_domain *domain,
- struct iommu_dm_region *region)
+ struct iommu_resv_region *region)
{
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
unsigned long start, end;
@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
}
-static const struct iommu_ops amd_iommu_ops = {
+const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
.domain_free = amd_iommu_domain_free,
@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
- .get_dm_regions = amd_iommu_get_dm_regions,
- .put_dm_regions = amd_iommu_put_dm_regions,
- .apply_dm_region = amd_iommu_apply_dm_region,
+ .get_resv_regions = amd_iommu_get_resv_regions,
+ .put_resv_regions = amd_iommu_put_resv_regions,
+ .apply_resv_region = amd_iommu_apply_resv_region,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 6799cf9713f7..04cdac7ab3e3 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -94,6 +94,8 @@
* out of it.
*/
+extern const struct iommu_ops amd_iommu_ops;
+
/*
* structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys.
@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
- iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
- amd_iommu_groups, "ivhd%d",
- iommu->index);
+ iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+ amd_iommu_groups, "ivhd%d", iommu->index);
+ iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
+ iommu_device_register(&iommu->iommu);
return pci_enable_device(iommu->dev);
}
@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
*/
ret = check_ivrs_checksum(ivrs_base);
if (ret)
- return ret;
+ goto out;
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 0d91785ebdc3..af00f381a7b1 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -535,8 +535,8 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */
bool need_sync;
- /* IOMMU sysfs device */
- struct device *iommu_dev;
+ /* Handle for IOMMU core code */
+ struct iommu_device iommu;
/*
* We can't rely on the BIOS to restore all values on reinit, so we
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 4d6ec444a9d6..5806a6acc94e 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -269,9 +269,6 @@
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
#define STRTAB_STE_1_SHCFG_SHIFT 44
-#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
-#define STRTAB_STE_1_PRIVCFG_SHIFT 48
-
#define STRTAB_STE_2_S2VMID_SHIFT 0
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
#define STRTAB_STE_2_VTCR_SHIFT 32
@@ -412,6 +409,9 @@
/* High-level queue structures */
#define ARM_SMMU_POLL_TIMEOUT_US 100
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
static bool disable_bypass;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
@@ -616,6 +616,9 @@ struct arm_smmu_device {
unsigned int sid_bits;
struct arm_smmu_strtab_cfg strtab_cfg;
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
};
/* SMMU private data for each master */
@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
}
- /* Nuke the existing Config, as we're going to rewrite it */
- val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
-
- if (ste->valid)
- val |= STRTAB_STE_0_V;
- else
- val &= ~STRTAB_STE_0_V;
+ /* Nuke the existing STE_0 value, as we're going to rewrite it */
+ val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#ifdef CONFIG_PCI_ATS
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
#endif
- STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
- STRTAB_STE_1_PRIVCFG_UNPRIV <<
- STRTAB_STE_1_PRIVCFG_SHIFT);
+ STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
if (smmu->features & ARM_SMMU_FEAT_STALLS)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
-
}
if (ste->s2_cfg) {
@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
- case IOMMU_CAP_INTR_REMAP:
- return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (!IS_ERR(group))
+ if (!IS_ERR(group)) {
iommu_group_put(group);
+ iommu_device_link(&smmu->iommu, dev);
+ }
return PTR_ERR_OR_ZERO(group);
}
@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_master_data *master;
+ struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
master = fwspec->iommu_priv;
+ smmu = master->smmu;
if (master && master->ste.valid)
arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev);
+ iommu_device_unlink(&smmu->iommu, dev);
kfree(master);
iommu_fwspec_free(dev);
}
@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static void arm_smmu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+ prot, IOMMU_RESV_MSI);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+}
+
+static void arm_smmu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
+ .get_resv_regions = arm_smmu_get_resv_regions,
+ .put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
u32 size, l1size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
- /*
- * If we can resolve everything with a single L2 table, then we
- * just need a single L1 descriptor. Otherwise, calculate the L1
- * size, capped to the SIDSIZE.
- */
- if (smmu->sid_bits < STRTAB_SPLIT) {
- size = 0;
- } else {
- size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
- size = min(size, smmu->sid_bits - STRTAB_SPLIT);
- }
+ /* Calculate the L1 size, capped to the SIDSIZE. */
+ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+ size = min(size, smmu->sid_bits - STRTAB_SPLIT);
cfg->num_l1_ents = 1 << size;
size += STRTAB_SPLIT;
@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
+ /*
+ * If the SMMU supports fewer bits than would fill a single L2 stream
+ * table, use a linear table instead.
+ */
+ if (smmu->sid_bits <= STRTAB_SPLIT)
+ smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
struct resource *res;
+ resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
bool bypass;
@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
}
+ ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return ret;
/* And we're up. Go go go! */
- iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+ ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+ "smmu3.%pa", &ioaddr);
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+ ret = iommu_device_register(&smmu->iommu);
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a60cded8a6ed..abf6496843a6 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,6 +24,7 @@
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
* - Context fault reporting
+ * - Extended Stream ID (16 bit)
*/
#define pr_fmt(fmt) "arm-smmu: " fmt
@@ -87,6 +88,7 @@
#define sCR0_CLIENTPD (1 << 0)
#define sCR0_GFRE (1 << 1)
#define sCR0_GFIE (1 << 2)
+#define sCR0_EXIDENABLE (1 << 3)
#define sCR0_GCFGFRE (1 << 4)
#define sCR0_GCFGFIE (1 << 5)
#define sCR0_USFCFG (1 << 10)
@@ -126,6 +128,7 @@
#define ID0_NUMIRPT_MASK 0xff
#define ID0_NUMSIDB_SHIFT 9
#define ID0_NUMSIDB_MASK 0xf
+#define ID0_EXIDS (1 << 8)
#define ID0_NUMSMRG_SHIFT 0
#define ID0_NUMSMRG_MASK 0xff
@@ -169,6 +172,7 @@
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
#define S2CR_CBNDX_SHIFT 0
#define S2CR_CBNDX_MASK 0xff
+#define S2CR_EXIDVALID (1 << 10)
#define S2CR_TYPE_SHIFT 16
#define S2CR_TYPE_MASK 0x3
enum arm_smmu_s2cr_type {
@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
+#define TTBCR2_AS (1 << 4)
#define TTBRn_ASID_SHIFT 48
@@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg {
#define FSYNR0_WNR (1 << 4)
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
@@ -351,6 +359,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
+#define ARM_SMMU_FEAT_EXIDS (1 << 12)
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,6 +389,9 @@ struct arm_smmu_device {
unsigned int *irqs;
u32 cavium_id_base; /* Specific to Cavium */
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
};
enum arm_smmu_context_fmt {
@@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
reg2 |= TTBCR2_SEP_UPSTREAM;
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ reg2 |= TTBCR2_AS;
}
if (smmu->version > ARM_SMMU_V1)
writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
@@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
struct arm_smmu_smr *smr = smmu->smrs + idx;
u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
- if (smr->valid)
+ if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
reg |= SMR_VALID;
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
}
@@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
(s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
(s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
+ if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
+ smmu->smrs[idx].valid)
+ reg |= S2CR_EXIDVALID;
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
}
@@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
arm_smmu_write_smr(smmu, idx);
}
+/*
+ * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
+ * should be called after sCR0 is written.
+ */
+static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
+{
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ u32 smr;
+
+ if (!smmu->smrs)
+ return;
+
+ /*
+ * SMR.ID bits may not be preserved if the corresponding MASK
+ * bits are set, so check each one separately. We can reject
+ * masters later if they try to claim IDs outside these masks.
+ */
+ smr = smmu->streamid_mask << SMR_ID_SHIFT;
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ smmu->streamid_mask = smr >> SMR_ID_SHIFT;
+
+ smr = smmu->streamid_mask << SMR_MASK_SHIFT;
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+}
+
static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
{
struct arm_smmu_smr *smrs = smmu->smrs;
@@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
continue;
s2cr[idx].type = type;
- s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
+ s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
s2cr[idx].cbndx = cbndx;
arm_smmu_write_s2cr(smmu, idx);
}
@@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
* requests.
*/
return true;
- case IOMMU_CAP_INTR_REMAP:
- return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_free;
+ iommu_device_link(&smmu->iommu, dev);
+
return 0;
out_free:
@@ -1456,10 +1501,17 @@ out_free:
static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_device *smmu;
+
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
+ cfg = fwspec->iommu_priv;
+ smmu = cfg->smmu;
+
+ iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_master_free_smes(fwspec);
iommu_group_remove_device(dev);
kfree(fwspec->iommu_priv);
@@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
+static void arm_smmu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+ prot, IOMMU_RESV_MSI);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+}
+
+static void arm_smmu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
+ .get_resv_regions = arm_smmu_get_resv_regions,
+ .put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
if (smmu->features & ARM_SMMU_FEAT_VMID16)
reg |= sCR0_VMID16EN;
+ if (smmu->features & ARM_SMMU_FEAT_EXIDS)
+ reg |= sCR0_EXIDENABLE;
+
/* Push the button */
__arm_smmu_tlb_sync(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
"\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
- size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+ if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+ smmu->features |= ARM_SMMU_FEAT_EXIDS;
+ size = 1 << 16;
+ } else {
+ size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+ }
smmu->streamid_mask = size - 1;
if (id & ID0_SMS) {
- u32 smr;
-
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
if (size == 0) {
@@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
- /*
- * SMR.ID bits may not be preserved if the corresponding MASK
- * bits are set, so check each one separately. We can reject
- * masters later if they try to claim IDs outside these masks.
- */
- smr = smmu->streamid_mask << SMR_ID_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
- smmu->streamid_mask = smr >> SMR_ID_SHIFT;
-
- smr = smmu->streamid_mask << SMR_MASK_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
- smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
-
/* Zero-initialised to mark as invalid */
smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
GFP_KERNEL);
@@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENOMEM;
dev_notice(smmu->dev,
- "\tstream matching with %lu register groups, mask 0x%x",
- size, smmu->smr_mask_mask);
+ "\tstream matching with %lu register groups", size);
}
/* s2cr->type == 0 means translation, so initialise explicitly */
smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
@@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
+ resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
@@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
@@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
}
- iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+ err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
+ "smmu.%pa", &ioaddr);
+ if (err) {
+ dev_err(dev, "Failed to register iommu in sysfs\n");
+ return err;
+ }
+
+ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+ err = iommu_device_register(&smmu->iommu);
+ if (err) {
+ dev_err(dev, "Failed to register iommu\n");
+ return err;
+ }
+
platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
+ arm_smmu_test_smr_masks(smmu);
/* Oh, for a proper bus abstraction */
if (!iommu_present(&platform_bus_type))
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2db0d641cf45..48d36ce59efb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
phys_addr_t phys;
};
+enum iommu_dma_cookie_type {
+ IOMMU_DMA_IOVA_COOKIE,
+ IOMMU_DMA_MSI_COOKIE,
+};
+
struct iommu_dma_cookie {
- struct iova_domain iovad;
- struct list_head msi_page_list;
- spinlock_t msi_lock;
+ enum iommu_dma_cookie_type type;
+ union {
+ /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
+ struct iova_domain iovad;
+ /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
+ dma_addr_t msi_iova;
+ };
+ struct list_head msi_page_list;
+ spinlock_t msi_lock;
};
+static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
+{
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
+ return cookie->iovad.granule;
+ return PAGE_SIZE;
+}
+
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
{
- return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
+ return &cookie->iovad;
+ return NULL;
+}
+
+static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
+{
+ struct iommu_dma_cookie *cookie;
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (cookie) {
+ spin_lock_init(&cookie->msi_lock);
+ INIT_LIST_HEAD(&cookie->msi_page_list);
+ cookie->type = type;
+ }
+ return cookie;
}
int iommu_dma_init(void)
@@ -62,25 +97,53 @@ int iommu_dma_init(void)
*/
int iommu_get_dma_cookie(struct iommu_domain *domain)
{
+ if (domain->iova_cookie)
+ return -EEXIST;
+
+ domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
+ if (!domain->iova_cookie)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(iommu_get_dma_cookie);
+
+/**
+ * iommu_get_msi_cookie - Acquire just MSI remapping resources
+ * @domain: IOMMU domain to prepare
+ * @base: Start address of IOVA region for MSI mappings
+ *
+ * Users who manage their own IOVA allocation and do not want DMA API support,
+ * but would still like to take advantage of automatic MSI remapping, can use
+ * this to initialise their own domain appropriately. Users should reserve a
+ * contiguous IOVA region, starting at @base, large enough to accommodate the
+ * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
+ * used by the devices attached to @domain.
+ */
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
struct iommu_dma_cookie *cookie;
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
if (domain->iova_cookie)
return -EEXIST;
- cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
if (!cookie)
return -ENOMEM;
- spin_lock_init(&cookie->msi_lock);
- INIT_LIST_HEAD(&cookie->msi_page_list);
+ cookie->msi_iova = base;
domain->iova_cookie = cookie;
return 0;
}
-EXPORT_SYMBOL(iommu_get_dma_cookie);
+EXPORT_SYMBOL(iommu_get_msi_cookie);
/**
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
+ * iommu_get_msi_cookie()
*
* IOMMU drivers should normally call this from their domain_free callback.
*/
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie)
return;
- if (cookie->iovad.granule)
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
put_iova_domain(&cookie->iovad);
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
@@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
- struct iova_domain *iovad = cookie_iovad(domain);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ bool pci = dev && dev_is_pci(dev);
- if (!iovad)
- return -ENODEV;
+ if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
+ return -EINVAL;
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
@@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
end_pfn = min_t(unsigned long, end_pfn,
domain->geometry.aperture_end >> order);
}
+ /*
+ * PCI devices may have larger DMA masks, but still prefer allocating
+ * within a 32-bit mask to avoid DAC addressing. Such limitations don't
+ * apply to the typical platform device, so for those we may as well
+ * leave the cache limit at the top of their range to save an rb_last()
+ * traversal on every allocation.
+ */
+ if (pci)
+ end_pfn &= DMA_BIT_MASK(32) >> order;
- /* All we can safely do with an existing domain is enlarge it */
+ /* start_pfn is always nonzero for an already-initialised domain */
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
- base_pfn != iovad->start_pfn ||
- end_pfn < iovad->dma_32bit_pfn) {
+ base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
return -EFAULT;
}
- iovad->dma_32bit_pfn = end_pfn;
+ /*
+ * If we have devices with different DMA masks, move the free
+ * area cache limit down for the benefit of the smaller one.
+ */
+ iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
} else {
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
- if (dev && dev_is_pci(dev))
+ if (pci)
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
}
return 0;
@@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
EXPORT_SYMBOL(iommu_dma_init_domain);
/**
- * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
+ * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
+ * page flags.
* @dir: Direction of DMA transfer
* @coherent: Is the DMA master cache-coherent?
+ * @attrs: DMA attributes for the mapping
*
* Return: corresponding IOMMU API page protection flags
*/
-int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
+int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+ unsigned long attrs)
{
int prot = coherent ? IOMMU_CACHE : 0;
+ if (attrs & DMA_ATTR_PRIVILEGED)
+ prot |= IOMMU_PRIV;
+
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
@@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
}
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
- dma_addr_t dma_limit)
+ dma_addr_t dma_limit, struct device *dev)
{
struct iova_domain *iovad = cookie_iovad(domain);
unsigned long shift = iova_shift(iovad);
unsigned long length = iova_align(iovad, size) >> shift;
+ struct iova *iova = NULL;
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
+
+ /* Try to get PCI devices a SAC address */
+ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
+ iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
+ true);
/*
* Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain...
*/
- return alloc_iova(iovad, length, dma_limit >> shift, true);
+ if (!iova)
+ iova = alloc_iova(iovad, length, dma_limit >> shift, true);
+
+ return iova;
}
/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
@@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (!pages)
return NULL;
- iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
+ iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
@@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
struct iova_domain *iovad = cookie_iovad(domain);
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
+ struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
if (!iova)
return DMA_ERROR_CODE;
@@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
+ iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;
@@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_direction_to_prot(dir, false) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
}
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
}
-int iommu_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * 'Special' IOMMUs which don't have the same addressing capability
- * as the CPU will have to wait until we have some way to query that
- * before they'll be able to use this framework.
- */
- return 1;
-}
-
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == DMA_ERROR_CODE;
@@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
- struct iova_domain *iovad = &cookie->iovad;
+ struct iova_domain *iovad = cookie_iovad(domain);
struct iova *iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+ size_t size = cookie_msi_granule(cookie);
- msi_addr &= ~(phys_addr_t)iova_mask(iovad);
+ msi_addr &= ~(phys_addr_t)(size - 1);
list_for_each_entry(msi_page, &cookie->msi_page_list, list)
if (msi_page->phys == msi_addr)
return msi_page;
@@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
- if (!iova)
- goto out_free_page;
-
msi_page->phys = msi_addr;
- msi_page->iova = iova_dma_addr(iovad, iova);
- if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
+ if (iovad) {
+ iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
+ goto out_free_page;
+ msi_page->iova = iova_dma_addr(iovad, iova);
+ } else {
+ msi_page->iova = cookie->msi_iova;
+ cookie->msi_iova += size;
+ }
+
+ if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list);
@@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;
out_free_iova:
- __free_iova(iovad, iova);
+ if (iovad)
+ __free_iova(iovad, iova);
+ else
+ cookie->msi_iova -= size;
out_free_page:
kfree(msi_page);
return NULL;
@@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
msg->data = ~0U;
} else {
msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= iova_mask(&cookie->iovad);
+ msg->address_lo &= cookie_msi_granule(cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova);
}
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8ccbd7023194..d9c0decfc91a 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
+extern const struct iommu_ops intel_iommu_ops;
+
static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
if (intel_iommu_enabled) {
- iommu->iommu_dev = iommu_device_create(NULL, iommu,
- intel_iommu_groups,
- "%s", iommu->name);
+ err = iommu_device_sysfs_add(&iommu->iommu, NULL,
+ intel_iommu_groups,
+ "%s", iommu->name);
+ if (err)
+ goto err_unmap;
- if (IS_ERR(iommu->iommu_dev)) {
- err = PTR_ERR(iommu->iommu_dev);
+ iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+
+ err = iommu_device_register(&iommu->iommu);
+ if (err)
goto err_unmap;
- }
}
drhd->iommu = iommu;
@@ -1103,7 +1108,8 @@ error:
static void free_iommu(struct intel_iommu *iommu)
{
- iommu_device_destroy(iommu->iommu_dev);
+ iommu_device_sysfs_remove(&iommu->iommu);
+ iommu_device_unregister(&iommu->iommu);
if (iommu->irq) {
if (iommu->pr_irq) {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 57ba0d3091ea..a7e0821c9967 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -276,6 +276,8 @@ struct sysmmu_drvdata {
struct list_head owner_node; /* node for owner controllers list */
phys_addr_t pgtable; /* assigned page table structure */
unsigned int version; /* our version */
+
+ struct iommu_device iommu; /* IOMMU core handle */
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data,
{
sysmmu_pte_t *ent;
- dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
- finfo->name, fault_addr, &data->pgtable);
+ dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
+ dev_name(data->master), finfo->name, fault_addr);
+ dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
- dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
+ dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr);
- dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
+ dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
}
}
@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
data->sysmmu = dev;
spin_lock_init(&data->lock);
+ ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ dev_name(data->sysmmu));
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
+ iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, data);
__sysmmu_get_version(data);
@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
-
return 0;
}
@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
DMA_TO_DEVICE);
/* For mapping page table entries we rely on dma == phys */
BUG_ON(handle != virt_to_phys(domain->pgtable));
+ if (dma_mapping_error(dma_dev, handle))
+ goto err_lv2ent;
spin_lock_init(&domain->lock);
spin_lock_init(&domain->pgtablelock);
@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
return &domain->domain;
+err_lv2ent:
+ free_pages((unsigned long)domain->lv2entcnt, 1);
err_counter:
free_pages((unsigned long)domain->pgtable, 2);
err_dma_cookie:
@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
}
if (lv1ent_fault(sent)) {
+ dma_addr_t handle;
sysmmu_pte_t *pent;
bool need_flush_flpd_cache = lv1ent_zero(sent);
@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
- dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
+ handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, handle)) {
+ kmem_cache_free(lv2table_kmem_cache, pent);
+ return ERR_PTR(-EADDRINUSE);
+ }
/*
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev)
static void exynos_iommu_remove_device(struct device *dev)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+
if (!has_sysmmu(dev))
return;
+ if (owner->domain) {
+ struct iommu_group *group = iommu_group_get(dev);
+
+ if (group) {
+ WARN_ON(owner->domain !=
+ iommu_group_default_domain(group));
+ exynos_iommu_detach_device(owner->domain, dev);
+ iommu_group_put(group);
+ }
+ }
iommu_group_remove_device(dev);
}
@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
- struct sysmmu_drvdata *data;
+ struct sysmmu_drvdata *data, *entry;
if (!sysmmu)
return -ENODEV;
@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev,
dev->archdata.iommu = owner;
}
+ list_for_each_entry(entry, &owner->controllers, owner_node)
+ if (entry == data)
+ return 0;
+
list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 8a185250ae5a..f5e02f8e7371 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
+ struct iommu_resv_region *resv; /* reserved region handle */
};
struct dmar_atsr_unit {
@@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
-static const struct iommu_ops intel_iommu_ops;
+const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
@@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
- level_pfn = pfn & level_mask(level - 1);
+ level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2)
@@ -3325,13 +3326,14 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_GFX;
#endif
+ check_tylersburg_isoch();
+
if (iommu_identity_mapping) {
ret = si_domain_init(hw_pass_through);
if (ret)
goto free_iommu;
}
- check_tylersburg_isoch();
/*
* If we copied translations from a previous kernel in the kdump
@@ -4246,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {}
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
+ int prot = DMA_PTE_READ|DMA_PTE_WRITE;
struct dmar_rmrr_unit *rmrru;
+ size_t length;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
- return -ENOMEM;
+ goto out;
rmrru->hdr = header;
rmrr = (struct acpi_dmar_reserved_memory *)header;
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
+
+ length = rmrr->end_address - rmrr->base_address + 1;
+ rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
+ IOMMU_RESV_DIRECT);
+ if (!rmrru->resv)
+ goto free_rmrru;
+
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt);
- if (rmrru->devices_cnt && rmrru->devices == NULL) {
- kfree(rmrru);
- return -ENOMEM;
- }
+ if (rmrru->devices_cnt && rmrru->devices == NULL)
+ goto free_all;
list_add(&rmrru->list, &dmar_rmrr_units);
return 0;
+free_all:
+ kfree(rmrru->resv);
+free_rmrru:
+ kfree(rmrru);
+out:
+ return -ENOMEM;
}
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
@@ -4480,6 +4495,7 @@ static void intel_iommu_free_dmars(void)
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+ kfree(rmrru->resv);
kfree(rmrru);
}
@@ -4853,10 +4869,13 @@ int __init intel_iommu_init(void)
init_iommu_pm_ops();
- for_each_active_iommu(iommu, drhd)
- iommu->iommu_dev = iommu_device_create(NULL, iommu,
- intel_iommu_groups,
- "%s", iommu->name);
+ for_each_active_iommu(iommu, drhd) {
+ iommu_device_sysfs_add(&iommu->iommu, NULL,
+ intel_iommu_groups,
+ "%s", iommu->name);
+ iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+ iommu_device_register(&iommu->iommu);
+ }
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
bus_register_notifier(&pci_bus_type, &device_nb);
@@ -5178,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev)
if (!iommu)
return -ENODEV;
- iommu_device_link(iommu->iommu_dev, dev);
+ iommu_device_link(&iommu->iommu, dev);
group = iommu_group_get_for_dev(dev);
@@ -5200,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
- iommu_device_unlink(iommu->iommu_dev, dev);
+ iommu_device_unlink(&iommu->iommu, dev);
+}
+
+static void intel_iommu_get_resv_regions(struct device *device,
+ struct list_head *head)
+{
+ struct iommu_resv_region *reg;
+ struct dmar_rmrr_unit *rmrr;
+ struct device *i_dev;
+ int i;
+
+ rcu_read_lock();
+ for_each_rmrr_units(rmrr) {
+ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+ i, i_dev) {
+ if (i_dev != device)
+ continue;
+
+ list_add_tail(&rmrr->resv->list, head);
+ }
+ }
+ rcu_read_unlock();
+
+ reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
+ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
+ 0, IOMMU_RESV_RESERVED);
+ if (!reg)
+ return;
+ list_add_tail(&reg->list, head);
+}
+
+static void intel_iommu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list) {
+ if (entry->type == IOMMU_RESV_RESERVED)
+ kfree(entry);
+ }
}
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -5332,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
-static const struct iommu_ops intel_iommu_ops = {
- .capable = intel_iommu_capable,
- .domain_alloc = intel_iommu_domain_alloc,
- .domain_free = intel_iommu_domain_free,
- .attach_dev = intel_iommu_attach_device,
- .detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map,
- .unmap = intel_iommu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
- .device_group = pci_device_group,
- .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+const struct iommu_ops intel_iommu_ops = {
+ .capable = intel_iommu_capable,
+ .domain_alloc = intel_iommu_domain_alloc,
+ .domain_free = intel_iommu_domain_free,
+ .attach_dev = intel_iommu_attach_device,
+ .detach_dev = intel_iommu_detach_device,
+ .map = intel_iommu_map,
+ .unmap = intel_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .add_device = intel_iommu_add_device,
+ .remove_device = intel_iommu_remove_device,
+ .get_resv_regions = intel_iommu_get_resv_regions,
+ .put_resv_regions = intel_iommu_put_resv_regions,
+ .device_group = pci_device_group,
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 0769276c0537..1c049e2e12bf 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
if (!(prot & IOMMU_MMIO))
pte |= ARM_V7S_ATTR_TEX(1);
if (ap) {
- pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
+ pte |= ARM_V7S_PTE_AF;
+ if (!(prot & IOMMU_PRIV))
+ pte |= ARM_V7S_PTE_AP_UNPRIV;
if (!(prot & IOMMU_WRITE))
pte |= ARM_V7S_PTE_AP_RDONLY;
}
@@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
+ if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
+ prot |= IOMMU_PRIV;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a40ce3406fef..feacc54bec68 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
- pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+ pte = ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
+ if (!(prot & IOMMU_PRIV))
+ pte |= ARM_LPAE_PTE_AP_UNPRIV;
+
if (prot & IOMMU_MMIO)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index 39b2d9127dbf..c58351ed61c1 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void)
postcore_initcall(iommu_dev_init);
/*
- * Create an IOMMU device and return a pointer to it. IOMMU specific
- * attributes can be provided as an attribute group, allowing a unique
- * namespace per IOMMU type.
+ * Init the struct device for the IOMMU. IOMMU specific attributes can
+ * be provided as an attribute group, allowing a unique namespace per
+ * IOMMU type.
*/
-struct device *iommu_device_create(struct device *parent, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...)
+int iommu_device_sysfs_add(struct iommu_device *iommu,
+ struct device *parent,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
{
- struct device *dev;
va_list vargs;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
+ device_initialize(&iommu->dev);
- device_initialize(dev);
-
- dev->class = &iommu_class;
- dev->parent = parent;
- dev->groups = groups;
- dev_set_drvdata(dev, drvdata);
+ iommu->dev.class = &iommu_class;
+ iommu->dev.parent = parent;
+ iommu->dev.groups = groups;
va_start(vargs, fmt);
- ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
+ ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
va_end(vargs);
if (ret)
goto error;
- ret = device_add(dev);
+ ret = device_add(&iommu->dev);
if (ret)
goto error;
- return dev;
+ return 0;
error:
- put_device(dev);
- return ERR_PTR(ret);
+ put_device(&iommu->dev);
+ return ret;
}
-void iommu_device_destroy(struct device *dev)
+void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
- if (!dev || IS_ERR(dev))
- return;
-
- device_unregister(dev);
+ device_unregister(&iommu->dev);
}
-
/*
* IOMMU drivers can indicate a device is managed by a given IOMMU using
* this interface. A link to the device will be created in the "devices"
* directory of the IOMMU device in sysfs and an "iommu" link will be
* created under the linked device, pointing back at the IOMMU device.
*/
-int iommu_device_link(struct device *dev, struct device *link)
+int iommu_device_link(struct iommu_device *iommu, struct device *link)
{
int ret;
- if (!dev || IS_ERR(dev))
+ if (!iommu || IS_ERR(iommu))
return -ENODEV;
- ret = sysfs_add_link_to_group(&dev->kobj, "devices",
+ ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
&link->kobj, dev_name(link));
if (ret)
return ret;
- ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu");
+ ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
if (ret)
- sysfs_remove_link_from_group(&dev->kobj, "devices",
+ sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
dev_name(link));
return ret;
}
-void iommu_device_unlink(struct device *dev, struct device *link)
+void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
{
- if (!dev || IS_ERR(dev))
+ if (!iommu || IS_ERR(iommu))
return;
sysfs_remove_link(&link->kobj, "iommu");
- sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link));
+ sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index dbe7f653bb7c..8ea14f41a979 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -55,7 +55,7 @@ struct iommu_group {
struct iommu_domain *domain;
};
-struct iommu_device {
+struct group_device {
struct list_head list;
struct device *dev;
char *name;
@@ -68,6 +68,12 @@ struct iommu_group_attribute {
const char *buf, size_t count);
};
+static const char * const iommu_group_resv_type_string[] = {
+ [IOMMU_RESV_DIRECT] = "direct",
+ [IOMMU_RESV_RESERVED] = "reserved",
+ [IOMMU_RESV_MSI] = "msi",
+};
+
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
@@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
#define to_iommu_group(_kobj) \
container_of(_kobj, struct iommu_group, kobj)
+static LIST_HEAD(iommu_device_list);
+static DEFINE_SPINLOCK(iommu_device_lock);
+
+int iommu_device_register(struct iommu_device *iommu)
+{
+ spin_lock(&iommu_device_lock);
+ list_add_tail(&iommu->list, &iommu_device_list);
+ spin_unlock(&iommu_device_lock);
+
+ return 0;
+}
+
+void iommu_device_unregister(struct iommu_device *iommu)
+{
+ spin_lock(&iommu_device_lock);
+ list_del(&iommu->list);
+ spin_unlock(&iommu_device_lock);
+}
+
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain,
@@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
return sprintf(buf, "%s\n", group->name);
}
+/**
+ * iommu_insert_resv_region - Insert a new region in the
+ * list of reserved regions.
+ * @new: new region to insert
+ * @regions: list of regions
+ *
+ * The new element is sorted by address with respect to the other
+ * regions of the same type. In case it overlaps with another
+ * region of the same type, regions are merged. In case it
+ * overlaps with another region of different type, regions are
+ * not merged.
+ */
+static int iommu_insert_resv_region(struct iommu_resv_region *new,
+ struct list_head *regions)
+{
+ struct iommu_resv_region *region;
+ phys_addr_t start = new->start;
+ phys_addr_t end = new->start + new->length - 1;
+ struct list_head *pos = regions->next;
+
+ while (pos != regions) {
+ struct iommu_resv_region *entry =
+ list_entry(pos, struct iommu_resv_region, list);
+ phys_addr_t a = entry->start;
+ phys_addr_t b = entry->start + entry->length - 1;
+ int type = entry->type;
+
+ if (end < a) {
+ goto insert;
+ } else if (start > b) {
+ pos = pos->next;
+ } else if ((start >= a) && (end <= b)) {
+ if (new->type == type)
+ goto done;
+ else
+ pos = pos->next;
+ } else {
+ if (new->type == type) {
+ phys_addr_t new_start = min(a, start);
+ phys_addr_t new_end = max(b, end);
+
+ list_del(&entry->list);
+ entry->start = new_start;
+ entry->length = new_end - new_start + 1;
+ iommu_insert_resv_region(entry, regions);
+ } else {
+ pos = pos->next;
+ }
+ }
+ }
+insert:
+ region = iommu_alloc_resv_region(new->start, new->length,
+ new->prot, new->type);
+ if (!region)
+ return -ENOMEM;
+
+ list_add_tail(&region->list, pos);
+done:
+ return 0;
+}
+
+static int
+iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
+ struct list_head *group_resv_regions)
+{
+ struct iommu_resv_region *entry;
+ int ret = 0;
+
+ list_for_each_entry(entry, dev_resv_regions, list) {
+ ret = iommu_insert_resv_region(entry, group_resv_regions);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+int iommu_get_group_resv_regions(struct iommu_group *group,
+ struct list_head *head)
+{
+ struct group_device *device;
+ int ret = 0;
+
+ mutex_lock(&group->mutex);
+ list_for_each_entry(device, &group->devices, list) {
+ struct list_head dev_resv_regions;
+
+ INIT_LIST_HEAD(&dev_resv_regions);
+ iommu_get_resv_regions(device->dev, &dev_resv_regions);
+ ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
+ iommu_put_resv_regions(device->dev, &dev_resv_regions);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
+
+static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
+ char *buf)
+{
+ struct iommu_resv_region *region, *next;
+ struct list_head group_resv_regions;
+ char *str = buf;
+
+ INIT_LIST_HEAD(&group_resv_regions);
+ iommu_get_group_resv_regions(group, &group_resv_regions);
+
+ list_for_each_entry_safe(region, next, &group_resv_regions, list) {
+ str += sprintf(str, "0x%016llx 0x%016llx %s\n",
+ (long long int)region->start,
+ (long long int)(region->start +
+ region->length - 1),
+ iommu_group_resv_type_string[region->type]);
+ kfree(region);
+ }
+
+ return (str - buf);
+}
+
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
+static IOMMU_GROUP_ATTR(reserved_regions, 0444,
+ iommu_group_show_resv_regions, NULL);
+
static void iommu_group_release(struct kobject *kobj)
{
struct iommu_group *group = to_iommu_group(kobj);
@@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void)
*/
kobject_put(&group->kobj);
+ ret = iommu_group_create_file(group,
+ &iommu_group_attr_reserved_regions);
+ if (ret)
+ return ERR_PTR(ret);
+
pr_debug("Allocated group %d\n", group->id);
return group;
@@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
struct device *dev)
{
struct iommu_domain *domain = group->default_domain;
- struct iommu_dm_region *entry;
+ struct iommu_resv_region *entry;
struct list_head mappings;
unsigned long pg_size;
int ret = 0;
@@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings);
- iommu_get_dm_regions(dev, &mappings);
+ iommu_get_resv_regions(dev, &mappings);
/* We need to consider overlapping regions for different devices */
list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr;
- if (domain->ops->apply_dm_region)
- domain->ops->apply_dm_region(dev, domain, entry);
+ if (domain->ops->apply_resv_region)
+ domain->ops->apply_resv_region(dev, domain, entry);
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
+ if (entry->type != IOMMU_RESV_DIRECT)
+ continue;
+
for (addr = start; addr < end; addr += pg_size) {
phys_addr_t phys_addr;
@@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
}
out:
- iommu_put_dm_regions(dev, &mappings);
+ iommu_put_resv_regions(dev, &mappings);
return ret;
}
@@ -374,7 +530,7 @@ out:
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{
int ret, i = 0;
- struct iommu_device *device;
+ struct group_device *device;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
@@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
- if (ret) {
- kfree(device);
- return ret;
- }
+ if (ret)
+ goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name);
if (ret) {
- kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
+ kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
-
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return ret;
+ goto err_free_name;
}
kobject_get(group->devices_kobj);
@@ -424,8 +574,10 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain)
- __iommu_attach_device(group->domain, dev);
+ ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
+ if (ret)
+ goto err_put_group;
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
@@ -436,6 +588,21 @@ rename:
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0;
+
+err_put_group:
+ mutex_lock(&group->mutex);
+ list_del(&device->list);
+ mutex_unlock(&group->mutex);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+err_free_name:
+ kfree(device->name);
+err_remove_link:
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+ kfree(device);
+ pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
void iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
- struct iommu_device *tmp_device, *device = NULL;
+ struct group_device *tmp_device, *device = NULL;
pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
@@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
static int iommu_group_device_count(struct iommu_group *group)
{
- struct iommu_device *entry;
+ struct group_device *entry;
int ret = 0;
list_for_each_entry(entry, &group->devices, list)
@@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group)
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
- struct iommu_device *device;
+ struct group_device *device;
int ret = 0;
list_for_each_entry(device, &group->devices, list) {
@@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
-void iommu_get_dm_regions(struct device *dev, struct list_head *list)
+void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
- if (ops && ops->get_dm_regions)
- ops->get_dm_regions(dev, list);
+ if (ops && ops->get_resv_regions)
+ ops->get_resv_regions(dev, list);
}
-void iommu_put_dm_regions(struct device *dev, struct list_head *list)
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
- if (ops && ops->put_dm_regions)
- ops->put_dm_regions(dev, list);
+ if (ops && ops->put_resv_regions)
+ ops->put_resv_regions(dev, list);
+}
+
+struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
+ size_t length,
+ int prot, int type)
+{
+ struct iommu_resv_region *region;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return NULL;
+
+ INIT_LIST_HEAD(&region->list);
+ region->start = start;
+ region->length = length;
+ region->prot = prot;
+ region->type = type;
+ return region;
}
/* Request that a device is direct mapped by the IOMMU */
@@ -1628,43 +1813,18 @@ out:
return ret;
}
-struct iommu_instance {
- struct list_head list;
- struct fwnode_handle *fwnode;
- const struct iommu_ops *ops;
-};
-static LIST_HEAD(iommu_instance_list);
-static DEFINE_SPINLOCK(iommu_instance_lock);
-
-void iommu_register_instance(struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
- struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
- if (WARN_ON(!iommu))
- return;
-
- of_node_get(to_of_node(fwnode));
- INIT_LIST_HEAD(&iommu->list);
- iommu->fwnode = fwnode;
- iommu->ops = ops;
- spin_lock(&iommu_instance_lock);
- list_add_tail(&iommu->list, &iommu_instance_list);
- spin_unlock(&iommu_instance_lock);
-}
-
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
-{
- struct iommu_instance *instance;
const struct iommu_ops *ops = NULL;
+ struct iommu_device *iommu;
- spin_lock(&iommu_instance_lock);
- list_for_each_entry(instance, &iommu_instance_list, list)
- if (instance->fwnode == fwnode) {
- ops = instance->ops;
+ spin_lock(&iommu_device_lock);
+ list_for_each_entry(iommu, &iommu_device_list, list)
+ if (iommu->fwnode == fwnode) {
+ ops = iommu->ops;
break;
}
- spin_unlock(&iommu_instance_lock);
+ spin_unlock(&iommu_device_lock);
return ops;
}
@@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
if (!fwspec)
return -ENOMEM;
+
+ dev->iommu_fwspec = fwspec;
}
for (i = 0; i < num_ids; i++)
fwspec->ids[fwspec->num_ids + i] = ids[i];
fwspec->num_ids += num_ids;
- dev->iommu_fwspec = fwspec;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 080beca0197d..b7268a14184f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
else {
struct rb_node *prev_node = rb_prev(iovad->cached32_node);
struct iova *curr_iova =
- container_of(iovad->cached32_node, struct iova, node);
+ rb_entry(iovad->cached32_node, struct iova, node);
*limit_pfn = curr_iova->pfn_lo - 1;
return prev_node;
}
@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
if (!iovad->cached32_node)
return;
curr = iovad->cached32_node;
- cached_iova = container_of(curr, struct iova, node);
+ cached_iova = rb_entry(curr, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo) {
struct rb_node *node = rb_next(&free->node);
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
/* only cache if it's below 32bit pfn */
if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, &limit_pfn);
prev = curr;
while (curr) {
- struct iova *curr_iova = container_of(curr, struct iova, node);
+ struct iova *curr_iova = rb_entry(curr, struct iova, node);
if (limit_pfn < curr_iova->pfn_lo)
goto move_left;
@@ -171,8 +171,7 @@ move_left:
/* Figure out where to put new node */
while (*entry) {
- struct iova *this = container_of(*entry,
- struct iova, node);
+ struct iova *this = rb_entry(*entry, struct iova, node);
parent = *entry;
if (new->pfn_lo < this->pfn_lo)
@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
struct rb_node **new = &(root->rb_node), *parent = NULL;
/* Figure out where to put new node */
while (*new) {
- struct iova *this = container_of(*new, struct iova, node);
+ struct iova *this = rb_entry(*new, struct iova, node);
parent = *new;
@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
assert_spin_locked(&iovad->iova_rbtree_lock);
while (node) {
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
/* If pfn falls within iova's range, return iova */
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
node = rb_first(&iovad->rbroot);
while (node) {
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
rb_erase(node, &iovad->rbroot);
free_iova_mem(iova);
@@ -477,7 +476,7 @@ static int
__is_range_overlap(struct rb_node *node,
unsigned long pfn_lo, unsigned long pfn_hi)
{
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
return 1;
@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
- iova = container_of(node, struct iova, node);
+ iova = rb_entry(node, struct iova, node);
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
if ((pfn_lo >= iova->pfn_lo) &&
(pfn_hi <= iova->pfn_hi))
@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
spin_lock_irqsave(&from->iova_rbtree_lock, flags);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
struct iova *new_iova;
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ace331da6459..b7e14ee863f9 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops;
+ domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ domain->io_domain.geometry.force_aperture = true;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index b09692bb5b0a..d0448353d501 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
return 0;
}
+/* Must be called under msm_iommu_lock */
+static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
+{
+ struct msm_iommu_dev *iommu, *ret = NULL;
+ struct msm_iommu_ctx_dev *master;
+
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev,
+ list);
+ if (master->of_node == dev->of_node) {
+ ret = iommu;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int msm_iommu_add_device(struct device *dev)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ iommu = find_iommu_for_dev(dev);
+ if (iommu)
+ iommu_device_link(&iommu->iommu, dev);
+ else
+ ret = -ENODEV;
+
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+ return ret;
+}
+
+static void msm_iommu_remove_device(struct device *dev)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ iommu = find_iommu_for_dev(dev);
+ if (iommu)
+ iommu_device_unlink(&iommu->iommu, dev);
+
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
+
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = {
.unmap = msm_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
+ .add_device = msm_iommu_add_device,
+ .remove_device = msm_iommu_remove_device,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
};
@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = {
static int msm_iommu_probe(struct platform_device *pdev)
{
struct resource *r;
+ resource_size_t ioaddr;
struct msm_iommu_dev *iommu;
int ret, par, val;
@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
ret = PTR_ERR(iommu->base);
goto fail;
}
+ ioaddr = r->start;
iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) {
@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev)
}
list_add(&iommu->dev_node, &qcom_iommu_devices);
- of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+ ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
+ "msm-smmu.%pa", &ioaddr);
+ if (ret) {
+ pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
+ goto fail;
+ }
+
+ iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
+ iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&iommu->iommu);
+ if (ret) {
+ pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
+ goto fail;
+ }
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 4ca25d50d679..ae92d2779c42 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -19,6 +19,7 @@
#define MSM_IOMMU_H
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/clk.h>
/* Sharability attributes of MSM IOMMU mappings */
@@ -68,6 +69,8 @@ struct msm_iommu_dev {
struct list_head dom_node;
struct list_head ctx_list;
DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
+
+ struct iommu_device iommu;
};
/**
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 1479c76ece9e..5d14cd15198d 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static int mtk_iommu_add_device(struct device *dev)
{
+ struct mtk_iommu_data *data;
struct iommu_group *group;
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_link(&data->iommu, dev);
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
+ struct mtk_iommu_data *data;
+
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_unlink(&data->iommu, dev);
+
iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
struct mtk_iommu_data *data;
struct device *dev = &pdev->dev;
struct resource *res;
+ resource_size_t ioaddr;
struct component_match *match = NULL;
void *protect;
int i, larb_nr, ret;
@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
+ ioaddr = res->start;
data->irq = platform_get_irq(pdev, 0);
if (data->irq < 0)
@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
+ "mtk-iommu.%pa", &ioaddr);
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+ iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+ return ret;
+
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
{
struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+ iommu_device_sysfs_remove(&data->iommu);
+ iommu_device_unregister(&data->iommu);
+
if (iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, NULL);
@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np)
return ret;
}
- of_iommu_set_ops(np, &mtk_iommu_ops);
return 0;
}
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 50177f738e4e..2a28eadeea0e 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -47,6 +47,8 @@ struct mtk_iommu_data {
struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB;
+
+ struct iommu_device iommu;
};
static inline int compare_of(struct device *dev, void *data)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 0f57ddc4ecc2..2683e9fc0dcf 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -127,7 +127,7 @@ static const struct iommu_ops
"iommu-map-mask", &iommu_spec.np, iommu_spec.args))
return NULL;
- ops = of_iommu_get_ops(iommu_spec.np);
+ ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
if (!ops || !ops->of_xlate ||
iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
ops->of_xlate(&pdev->dev, &iommu_spec))
@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
"#iommu-cells", idx,
&iommu_spec)) {
np = iommu_spec.np;
- ops = of_iommu_get_ops(np);
+ ops = iommu_ops_from_fwnode(&np->fwnode);
if (!ops || !ops->of_xlate ||
iommu_fwspec_init(dev, &np->fwnode, ops) ||
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index ae96731cd2fb..125528f39e92 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -283,3 +283,12 @@ config EZNPS_GIC
config STM32_EXTI
bool
select IRQ_DOMAIN
+
+config QCOM_IRQ_COMBINER
+ bool "QCOM IRQ combiner support"
+ depends on ARCH_QCOM && ACPI
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to add support for the IRQ combiner devices embedded
+ in Qualcomm Technologies chips.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 0e55d94065bf..152bc40b6762 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_ARCH_GEMINI) += irq-gemini.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
@@ -75,3 +76,4 @@ obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
+obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
diff --git a/drivers/irqchip/irq-gemini.c b/drivers/irqchip/irq-gemini.c
new file mode 100644
index 000000000000..495224c743ee
--- /dev/null
+++ b/drivers/irqchip/irq-gemini.c
@@ -0,0 +1,185 @@
+/*
+ * irqchip for the Cortina Systems Gemini Copyright (C) 2017 Linus
+ * Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-gemini/irq.c
+ * Copyright (C) 2001-2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/versatile-fpga.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define GEMINI_NUM_IRQS 32
+
+#define GEMINI_IRQ_SOURCE(base_addr) (base_addr + 0x00)
+#define GEMINI_IRQ_MASK(base_addr) (base_addr + 0x04)
+#define GEMINI_IRQ_CLEAR(base_addr) (base_addr + 0x08)
+#define GEMINI_IRQ_MODE(base_addr) (base_addr + 0x0C)
+#define GEMINI_IRQ_POLARITY(base_addr) (base_addr + 0x10)
+#define GEMINI_IRQ_STATUS(base_addr) (base_addr + 0x14)
+#define GEMINI_FIQ_SOURCE(base_addr) (base_addr + 0x20)
+#define GEMINI_FIQ_MASK(base_addr) (base_addr + 0x24)
+#define GEMINI_FIQ_CLEAR(base_addr) (base_addr + 0x28)
+#define GEMINI_FIQ_MODE(base_addr) (base_addr + 0x2C)
+#define GEMINI_FIQ_POLARITY(base_addr) (base_addr + 0x30)
+#define GEMINI_FIQ_STATUS(base_addr) (base_addr + 0x34)
+
+/**
+ * struct gemini_irq_data - irq data container for the Gemini IRQ controller
+ * @base: memory offset in virtual memory
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct gemini_irq_data {
+ void __iomem *base;
+ struct irq_chip chip;
+ struct irq_domain *domain;
+};
+
+static void gemini_irq_mask(struct irq_data *d)
+{
+ struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+ unsigned int mask;
+
+ mask = readl(GEMINI_IRQ_MASK(g->base));
+ mask &= ~BIT(irqd_to_hwirq(d));
+ writel(mask, GEMINI_IRQ_MASK(g->base));
+}
+
+static void gemini_irq_unmask(struct irq_data *d)
+{
+ struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+ unsigned int mask;
+
+ mask = readl(GEMINI_IRQ_MASK(g->base));
+ mask |= BIT(irqd_to_hwirq(d));
+ writel(mask, GEMINI_IRQ_MASK(g->base));
+}
+
+static void gemini_irq_ack(struct irq_data *d)
+{
+ struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(irqd_to_hwirq(d)), GEMINI_IRQ_CLEAR(g->base));
+}
+
+static int gemini_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ struct gemini_irq_data *g = irq_data_get_irq_chip_data(d);
+ int offset = irqd_to_hwirq(d);
+ u32 mode, polarity;
+
+ mode = readl(GEMINI_IRQ_MODE(g->base));
+ polarity = readl(GEMINI_IRQ_POLARITY(g->base));
+
+ if (trigger & (IRQ_TYPE_LEVEL_HIGH)) {
+ irq_set_handler_locked(d, handle_level_irq);
+ /* Disable edge detection */
+ mode &= ~BIT(offset);
+ polarity &= ~BIT(offset);
+ } else if (trigger & IRQ_TYPE_EDGE_RISING) {
+ irq_set_handler_locked(d, handle_edge_irq);
+ mode |= BIT(offset);
+ polarity |= BIT(offset);
+ } else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+ irq_set_handler_locked(d, handle_edge_irq);
+ mode |= BIT(offset);
+ polarity &= ~BIT(offset);
+ } else {
+ irq_set_handler_locked(d, handle_bad_irq);
+ pr_warn("GEMINI IRQ: no supported trigger selected for line %d\n",
+ offset);
+ }
+
+ writel(mode, GEMINI_IRQ_MODE(g->base));
+ writel(polarity, GEMINI_IRQ_POLARITY(g->base));
+
+ return 0;
+}
+
+static struct irq_chip gemini_irq_chip = {
+ .name = "GEMINI",
+ .irq_ack = gemini_irq_ack,
+ .irq_mask = gemini_irq_mask,
+ .irq_unmask = gemini_irq_unmask,
+ .irq_set_type = gemini_irq_set_type,
+};
+
+/* Local static for the IRQ entry call */
+static struct gemini_irq_data girq;
+
+asmlinkage void __exception_irq_entry gemini_irqchip_handle_irq(struct pt_regs *regs)
+{
+ struct gemini_irq_data *g = &girq;
+ int irq;
+ u32 status;
+
+ while ((status = readl(GEMINI_IRQ_STATUS(g->base)))) {
+ irq = ffs(status) - 1;
+ handle_domain_irq(g->domain, irq, regs);
+ }
+}
+
+static int gemini_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct gemini_irq_data *g = d->host_data;
+
+ irq_set_chip_data(irq, g);
+ /* All IRQs should set up their type, flags as bad by default */
+ irq_set_chip_and_handler(irq, &gemini_irq_chip, handle_bad_irq);
+ irq_set_probe(irq);
+
+ return 0;
+}
+
+static void gemini_irqdomain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops gemini_irqdomain_ops = {
+ .map = gemini_irqdomain_map,
+ .unmap = gemini_irqdomain_unmap,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+int __init gemini_of_init_irq(struct device_node *node,
+ struct device_node *parent)
+{
+ struct gemini_irq_data *g = &girq;
+
+ /*
+ * Disable the idle handler by default since it is buggy
+ * For more info see arch/arm/mach-gemini/idle.c
+ */
+ cpu_idle_poll_ctrl(true);
+
+ g->base = of_iomap(node, 0);
+ WARN(!g->base, "unable to map gemini irq registers\n");
+
+ /* Disable all interrupts */
+ writel(0, GEMINI_IRQ_MASK(g->base));
+ writel(0, GEMINI_FIQ_MASK(g->base));
+
+ g->domain = irq_domain_add_simple(node, GEMINI_NUM_IRQS, 0,
+ &gemini_irqdomain_ops, g);
+ set_handle_irq(gemini_irqchip_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(gemini, "cortina,gemini-interrupt-controller",
+ gemini_of_init_irq);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 69b040f47d56..23201004fd7a 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -161,7 +161,7 @@ struct its_cmd_desc {
struct its_device *dev;
u32 phys_id;
u32 event_id;
- } its_mapvi_cmd;
+ } its_mapti_cmd;
struct {
struct its_device *dev;
@@ -193,58 +193,56 @@ struct its_cmd_block {
typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
struct its_cmd_desc *);
+static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
+{
+ u64 mask = GENMASK_ULL(h, l);
+ *raw_cmd &= ~mask;
+ *raw_cmd |= (val << l) & mask;
+}
+
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
{
- cmd->raw_cmd[0] &= ~0xffULL;
- cmd->raw_cmd[0] |= cmd_nr;
+ its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
}
static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
{
- cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
- cmd->raw_cmd[0] |= ((u64)devid) << 32;
+ its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
}
static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
{
- cmd->raw_cmd[1] &= ~0xffffffffULL;
- cmd->raw_cmd[1] |= id;
+ its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
}
static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
{
- cmd->raw_cmd[1] &= 0xffffffffULL;
- cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
+ its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
}
static void its_encode_size(struct its_cmd_block *cmd, u8 size)
{
- cmd->raw_cmd[1] &= ~0x1fULL;
- cmd->raw_cmd[1] |= size & 0x1f;
+ its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
}
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- cmd->raw_cmd[2] &= ~0xffffffffffffULL;
- cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL;
+ its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
{
- cmd->raw_cmd[2] &= ~(1ULL << 63);
- cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
+ its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
}
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- cmd->raw_cmd[2] &= ~(0xffffffffULL << 16);
- cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16));
+ its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
{
- cmd->raw_cmd[2] &= ~0xffffULL;
- cmd->raw_cmd[2] |= col;
+ its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
}
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
@@ -289,18 +287,18 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
return desc->its_mapc_cmd.col;
}
-static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
- col = dev_event_to_col(desc->its_mapvi_cmd.dev,
- desc->its_mapvi_cmd.event_id);
+ col = dev_event_to_col(desc->its_mapti_cmd.dev,
+ desc->its_mapti_cmd.event_id);
- its_encode_cmd(cmd, GITS_CMD_MAPVI);
- its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
- its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
- its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
+ its_encode_cmd(cmd, GITS_CMD_MAPTI);
+ its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
+ its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
its_encode_collection(cmd, col->col_id);
its_fixup_cmd(cmd);
@@ -413,6 +411,12 @@ static struct its_cmd_block *its_allocate_entry(struct its_node *its)
if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
its->cmd_write = its->cmd_base;
+ /* Clear command */
+ cmd->raw_cmd[0] = 0;
+ cmd->raw_cmd[1] = 0;
+ cmd->raw_cmd[2] = 0;
+ cmd->raw_cmd[3] = 0;
+
return cmd;
}
@@ -531,15 +535,15 @@ static void its_send_mapc(struct its_node *its, struct its_collection *col,
its_send_single_command(its, its_build_mapc_cmd, &desc);
}
-static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
+static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
{
struct its_cmd_desc desc;
- desc.its_mapvi_cmd.dev = dev;
- desc.its_mapvi_cmd.phys_id = irq_id;
- desc.its_mapvi_cmd.event_id = id;
+ desc.its_mapti_cmd.dev = dev;
+ desc.its_mapti_cmd.phys_id = irq_id;
+ desc.its_mapti_cmd.event_id = id;
- its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
+ its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
}
static void its_send_movi(struct its_device *dev,
@@ -824,7 +828,7 @@ static int __init its_alloc_lpi_tables(void)
static const char *its_base_type_string[] = {
[GITS_BASER_TYPE_DEVICE] = "Devices",
[GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
- [GITS_BASER_TYPE_CPU] = "Physical CPUs",
+ [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
[GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
[GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
[GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
@@ -960,7 +964,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
u32 psz, u32 *order)
{
u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
- u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
+ u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
u32 ids = its->device_ids;
u32 new_order = *order;
bool indirect = false;
@@ -1025,7 +1029,7 @@ static int its_alloc_tables(struct its_node *its)
u64 typer = gic_read_typer(its->base + GITS_TYPER);
u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
- u64 cache = GITS_BASER_WaWb;
+ u64 cache = GITS_BASER_RaWaWb;
u32 psz = SZ_64K;
int err, i;
@@ -1122,7 +1126,7 @@ static void its_cpu_init_lpis(void)
/* set PROPBASE */
val = (page_to_phys(gic_rdists->prop_page) |
GICR_PROPBASER_InnerShareable |
- GICR_PROPBASER_WaWb |
+ GICR_PROPBASER_RaWaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
@@ -1147,7 +1151,7 @@ static void its_cpu_init_lpis(void)
/* set PENDBASE */
val = (page_to_phys(pend_page) |
GICR_PENDBASER_InnerShareable |
- GICR_PENDBASER_WaWb);
+ GICR_PENDBASER_RaWaWb);
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
@@ -1498,7 +1502,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
/* Map the GIC IRQ and event to the device */
- its_send_mapvi(its_dev, d->hwirq, event);
+ its_send_mapti(its_dev, d->hwirq, event);
}
static void its_irq_domain_deactivate(struct irq_domain *domain,
@@ -1642,6 +1646,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain->parent = its_parent;
inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain->host_data = info;
@@ -1693,7 +1698,8 @@ static int __init its_probe_one(struct resource *res,
its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->numa_node = numa_node;
- its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
+ its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
if (!its->cmd_base) {
err = -ENOMEM;
goto out_free_its;
@@ -1711,7 +1717,7 @@ static int __init its_probe_one(struct resource *res,
goto out_free_tables;
baser = (virt_to_phys(its->cmd_base) |
- GITS_CBASER_WaWb |
+ GITS_CBASER_RaWaWb |
GITS_CBASER_InnerShareable |
(ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
GITS_CBASER_VALID);
@@ -1751,7 +1757,7 @@ static int __init its_probe_one(struct resource *res,
out_free_tables:
its_free_tables(its);
out_free_cmd:
- kfree(its->cmd_base);
+ free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_free_its:
kfree(its);
out_unmap:
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 54a5e870a8f5..efbcf8435185 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -19,9 +19,9 @@
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
@@ -39,6 +39,7 @@ struct keystone_irq_device {
struct irq_domain *irqd;
struct regmap *devctrl_regs;
u32 devctrl_offset;
+ raw_spinlock_t wa_lock;
};
static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d)
/* nothing to do here */
}
-static void keystone_irq_handler(struct irq_desc *desc)
+static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
{
- unsigned int irq = irq_desc_get_irq(desc);
- struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
+ struct keystone_irq_device *kirq = keystone_irq;
+ unsigned long wa_lock_flags;
unsigned long pending;
int src, virq;
dev_dbg(kirq->dev, "start irq %d\n", irq);
- chained_irq_enter(irq_desc_get_chip(desc), desc);
-
pending = keystone_irq_readl(kirq);
keystone_irq_writel(kirq, pending);
@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc)
if (!virq)
dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
src, virq);
+ raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
generic_handle_irq(virq);
+ raw_spin_unlock_irqrestore(&kirq->wa_lock,
+ wa_lock_flags);
}
}
- chained_irq_exit(irq_desc_get_chip(desc), desc);
-
dev_dbg(kirq->dev, "end irq %d\n", irq);
+ return IRQ_HANDLED;
}
static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev)
return -ENODEV;
}
+ raw_spin_lock_init(&kirq->wa_lock);
+
platform_set_drvdata(pdev, kirq);
- irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq);
+ ret = request_irq(kirq->irq, keystone_irq_handler,
+ 0, dev_name(dev), kirq);
+ if (ret) {
+ irq_domain_remove(kirq->irqd);
+ return ret;
+ }
/* clear all source bits */
keystone_irq_writel(kirq, ~0x0);
@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev)
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
+ free_irq(kirq->irq, kirq);
+
for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c01c09e9916d..11d12bccc4e7 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -968,6 +968,34 @@ static struct irq_domain_ops gic_ipi_domain_ops = {
.match = gic_ipi_domain_match,
};
+static void __init gic_map_single_int(struct device_node *node,
+ unsigned int irq)
+{
+ unsigned int linux_irq;
+ struct irq_fwspec local_int_fwspec = {
+ .fwnode = &node->fwnode,
+ .param_count = 3,
+ .param = {
+ [0] = GIC_LOCAL,
+ [1] = irq,
+ [2] = IRQ_TYPE_NONE,
+ },
+ };
+
+ if (!gic_local_irq_is_routable(irq))
+ return;
+
+ linux_irq = irq_create_fwspec_mapping(&local_int_fwspec);
+ WARN_ON(!linux_irq);
+}
+
+static void __init gic_map_interrupts(struct device_node *node)
+{
+ gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
+ gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
+ gic_map_single_int(node, GIC_LOCAL_INT_FDC);
+}
+
static void __init __gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size,
unsigned int cpu_vec, unsigned int irqbase,
@@ -1067,6 +1095,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
}
gic_basic_init();
+ gic_map_interrupts(node);
}
void __init gic_init(unsigned long gic_base_addr,
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 17304705f2cf..05fa9f7af53c 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = icoll_mask_irq,
.irq_unmask = icoll_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
static struct irq_chip asm9260_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = asm9260_mask_irq,
.irq_unmask = asm9260_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
new file mode 100644
index 000000000000..226558698344
--- /dev/null
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -0,0 +1,296 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Driver for interrupt combiners in the Top-level Control and Status
+ * Registers (TCSR) hardware block in Qualcomm Technologies chips.
+ * An interrupt combiner in this block combines a set of interrupts by
+ * OR'ing the individual interrupt signals into a summary interrupt
+ * signal routed to a parent interrupt controller, and provides read-
+ * only, 32-bit registers to query the status of individual interrupts.
+ * The status bit for IRQ n is bit (n % 32) within register (n / 32)
+ * of the given combiner. Thus, each combiner can be described as a set
+ * of register offsets and the number of IRQs managed.
+ */
+
+#define pr_fmt(fmt) "QCOM80B1:" fmt
+
+#include <linux/acpi.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+
+#define REG_SIZE 32
+
+struct combiner_reg {
+ void __iomem *addr;
+ unsigned long enabled;
+};
+
+struct combiner {
+ struct irq_domain *domain;
+ int parent_irq;
+ u32 nirqs;
+ u32 nregs;
+ struct combiner_reg regs[0];
+};
+
+static inline int irq_nr(u32 reg, u32 bit)
+{
+ return reg * REG_SIZE + bit;
+}
+
+/*
+ * Handler for the cascaded IRQ.
+ */
+static void combiner_handle_irq(struct irq_desc *desc)
+{
+ struct combiner *combiner = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 reg;
+
+ chained_irq_enter(chip, desc);
+
+ for (reg = 0; reg < combiner->nregs; reg++) {
+ int virq;
+ int hwirq;
+ u32 bit;
+ u32 status;
+
+ bit = readl_relaxed(combiner->regs[reg].addr);
+ status = bit & combiner->regs[reg].enabled;
+ if (!status)
+ pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
+ smp_processor_id(), bit,
+ combiner->regs[reg].enabled,
+ combiner->regs[reg].addr);
+
+ while (status) {
+ bit = __ffs(status);
+ status &= ~(1 << bit);
+ hwirq = irq_nr(reg, bit);
+ virq = irq_find_mapping(combiner->domain, hwirq);
+ if (virq > 0)
+ generic_handle_irq(virq);
+
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void combiner_irq_chip_mask_irq(struct irq_data *data)
+{
+ struct combiner *combiner = irq_data_get_irq_chip_data(data);
+ struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+ clear_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static void combiner_irq_chip_unmask_irq(struct irq_data *data)
+{
+ struct combiner *combiner = irq_data_get_irq_chip_data(data);
+ struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+ set_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static struct irq_chip irq_chip = {
+ .irq_mask = combiner_irq_chip_mask_irq,
+ .irq_unmask = combiner_irq_chip_unmask_irq,
+ .name = "qcom-irq-combiner"
+};
+
+static int combiner_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_noprobe(irq);
+ return 0;
+}
+
+static void combiner_irq_unmap(struct irq_domain *domain, unsigned int irq)
+{
+ irq_domain_reset_irq_data(irq_get_irq_data(irq));
+}
+
+static int combiner_irq_translate(struct irq_domain *d, struct irq_fwspec *fws,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct combiner *combiner = d->host_data;
+
+ if (is_acpi_node(fws->fwnode)) {
+ if (WARN_ON((fws->param_count != 2) ||
+ (fws->param[0] >= combiner->nirqs) ||
+ (fws->param[1] & IORESOURCE_IRQ_LOWEDGE) ||
+ (fws->param[1] & IORESOURCE_IRQ_HIGHEDGE)))
+ return -EINVAL;
+
+ *hwirq = fws->param[0];
+ *type = fws->param[1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct irq_domain_ops domain_ops = {
+ .map = combiner_irq_map,
+ .unmap = combiner_irq_unmap,
+ .translate = combiner_irq_translate
+};
+
+static acpi_status count_registers_cb(struct acpi_resource *ares, void *context)
+{
+ int *count = context;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ ++(*count);
+ return AE_OK;
+}
+
+static int count_registers(struct platform_device *pdev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+ acpi_status status;
+ int count = 0;
+
+ if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+ return -EINVAL;
+
+ status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+ count_registers_cb, &count);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ return count;
+}
+
+struct get_registers_context {
+ struct device *dev;
+ struct combiner *combiner;
+ int err;
+};
+
+static acpi_status get_registers_cb(struct acpi_resource *ares, void *context)
+{
+ struct get_registers_context *ctx = context;
+ struct acpi_resource_generic_register *reg;
+ phys_addr_t paddr;
+ void __iomem *vaddr;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ return AE_OK;
+
+ reg = &ares->data.generic_reg;
+ paddr = reg->address;
+ if ((reg->space_id != ACPI_SPACE_MEM) ||
+ (reg->bit_offset != 0) ||
+ (reg->bit_width > REG_SIZE)) {
+ dev_err(ctx->dev, "Bad register resource @%pa\n", &paddr);
+ ctx->err = -EINVAL;
+ return AE_ERROR;
+ }
+
+ vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE);
+ if (!vaddr) {
+ dev_err(ctx->dev, "Can't map register @%pa\n", &paddr);
+ ctx->err = -ENOMEM;
+ return AE_ERROR;
+ }
+
+ ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr;
+ ctx->combiner->nirqs += reg->bit_width;
+ ctx->combiner->nregs++;
+ return AE_OK;
+}
+
+static int get_registers(struct platform_device *pdev, struct combiner *comb)
+{
+ acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+ acpi_status status;
+ struct get_registers_context ctx;
+
+ if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+ return -EINVAL;
+
+ ctx.dev = &pdev->dev;
+ ctx.combiner = comb;
+ ctx.err = 0;
+
+ status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+ get_registers_cb, &ctx);
+ if (ACPI_FAILURE(status))
+ return ctx.err;
+ return 0;
+}
+
+static int __init combiner_probe(struct platform_device *pdev)
+{
+ struct combiner *combiner;
+ size_t alloc_sz;
+ u32 nregs;
+ int err;
+
+ nregs = count_registers(pdev);
+ if (nregs <= 0) {
+ dev_err(&pdev->dev, "Error reading register resources\n");
+ return -EINVAL;
+ }
+
+ alloc_sz = sizeof(*combiner) + sizeof(struct combiner_reg) * nregs;
+ combiner = devm_kzalloc(&pdev->dev, alloc_sz, GFP_KERNEL);
+ if (!combiner)
+ return -ENOMEM;
+
+ err = get_registers(pdev, combiner);
+ if (err < 0)
+ return err;
+
+ combiner->parent_irq = platform_get_irq(pdev, 0);
+ if (combiner->parent_irq <= 0) {
+ dev_err(&pdev->dev, "Error getting IRQ resource\n");
+ return -EPROBE_DEFER;
+ }
+
+ combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs,
+ &domain_ops, combiner);
+ if (!combiner->domain)
+ /* Errors printed by irq_domain_create_linear */
+ return -ENODEV;
+
+ irq_set_chained_handler_and_data(combiner->parent_irq,
+ combiner_handle_irq, combiner);
+
+ dev_info(&pdev->dev, "Initialized with [p=%d,n=%d,r=%p]\n",
+ combiner->parent_irq, combiner->nirqs, combiner->regs[0].addr);
+ return 0;
+}
+
+static const struct acpi_device_id qcom_irq_combiner_ids[] = {
+ { "QCOM80B1", },
+ { }
+};
+
+static struct platform_driver qcom_irq_combiner_probe = {
+ .driver = {
+ .name = "qcom-irq-combiner",
+ .acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
+ },
+ .probe = combiner_probe,
+};
+
+static int __init register_qcom_irq_combiner(void)
+{
+ return platform_driver_register(&qcom_irq_combiner_probe);
+}
+device_initcall(register_qcom_irq_combiner);
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 1a1d99704fe6..3b11422b1cce 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *);
static void listen_check(DIVA_CAPI_ADAPTER *);
static byte AddInfo(byte **, byte **, byte *, byte *);
static byte getChannel(API_PARSE *);
-static void IndParse(PLCI *, word *, byte **, byte);
+static void IndParse(PLCI *, const word *, byte **, byte);
static byte ie_compare(byte *, byte *);
static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
@@ -4858,7 +4858,7 @@ static void sig_ind(PLCI *plci)
/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
/* SMSG is situated at the end because its 0 (for compatibility reasons */
/* (see Info_Mask Bit 4, first IE. then the message type) */
- word parms_id[] =
+ static const word parms_id[] =
{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
@@ -4866,12 +4866,12 @@ static void sig_ind(PLCI *plci)
/* 14 FTY repl by ESC_CHI */
/* 18 PI repl by ESC_LAW */
/* removed OAD changed to 0xff for future use, OAD is multiIE now */
- word multi_fac_id[] = {1, FTY};
- word multi_pi_id[] = {1, PI};
- word multi_CiPN_id[] = {1, OAD};
- word multi_ssext_id[] = {1, ESC_SSEXT};
+ static const word multi_fac_id[] = {1, FTY};
+ static const word multi_pi_id[] = {1, PI};
+ static const word multi_CiPN_id[] = {1, OAD};
+ static const word multi_ssext_id[] = {1, ESC_SSEXT};
- word multi_vswitch_id[] = {1, ESC_VSWITCH};
+ static const word multi_vswitch_id[] = {1, ESC_VSWITCH};
byte *cau;
word ncci;
@@ -8924,7 +8924,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a)
/* functions for all parameters sent in INDs */
/*------------------------------------------------------------------*/
-static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
+static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
{
word ploc; /* points to current location within packet */
byte w;
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
((CAPI_MSG *) msg)->header.ncci = 0;
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
- PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
+ ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
+ ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
if (w != _QUEUE_FULL)
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b621fbc3..b324474c0c12 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -203,7 +203,7 @@ mISDNStackd(void *data)
{
struct mISDNstack *st = data;
#ifdef MISDN_MSG_STATS
- cputime_t utime, stime;
+ u64 utime, stime;
#endif
int err = 0;
@@ -308,7 +308,7 @@ mISDNStackd(void *data)
st->stopped_cnt);
task_cputime(st->thread, &utime, &stime);
printk(KERN_DEBUG
- "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
+ "mISDNStackd daemon for %s utime(%llu) stime(%llu)\n",
dev_name(&st->dev->dev), utime, stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index c621cbbb5768..275f467956ee 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -29,6 +29,15 @@ config LEDS_CLASS_FLASH
for the flash related features of a LED device. It can be built
as a module.
+config LEDS_BRIGHTNESS_HW_CHANGED
+ bool "LED Class brightness_hw_changed attribute support"
+ depends on LEDS_CLASS
+ help
+ This option enables support for the brightness_hw_changed attribute
+ for led sysfs class devices under /sys/class/leds.
+
+ See Documentation/ABI/testing/sysfs-class-led for details.
+
comment "LED drivers"
config LEDS_88PM860X
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 326ee6e925a2..f2b0a80a62b4 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -103,6 +103,68 @@ static const struct attribute_group *led_groups[] = {
NULL,
};
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+static ssize_t brightness_hw_changed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ if (led_cdev->brightness_hw_changed == -1)
+ return -ENODATA;
+
+ return sprintf(buf, "%u\n", led_cdev->brightness_hw_changed);
+}
+
+static DEVICE_ATTR_RO(brightness_hw_changed);
+
+static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev;
+ int ret;
+
+ ret = device_create_file(dev, &dev_attr_brightness_hw_changed);
+ if (ret) {
+ dev_err(dev, "Error creating brightness_hw_changed\n");
+ return ret;
+ }
+
+ led_cdev->brightness_hw_changed_kn =
+ sysfs_get_dirent(dev->kobj.sd, "brightness_hw_changed");
+ if (!led_cdev->brightness_hw_changed_kn) {
+ dev_err(dev, "Error getting brightness_hw_changed kn\n");
+ device_remove_file(dev, &dev_attr_brightness_hw_changed);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+ sysfs_put(led_cdev->brightness_hw_changed_kn);
+ device_remove_file(led_cdev->dev, &dev_attr_brightness_hw_changed);
+}
+
+void led_classdev_notify_brightness_hw_changed(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (WARN_ON(!led_cdev->brightness_hw_changed_kn))
+ return;
+
+ led_cdev->brightness_hw_changed = brightness;
+ sysfs_notify_dirent(led_cdev->brightness_hw_changed_kn);
+}
+EXPORT_SYMBOL_GPL(led_classdev_notify_brightness_hw_changed);
+#else
+static int led_add_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+ return 0;
+}
+static void led_remove_brightness_hw_changed(struct led_classdev *led_cdev)
+{
+}
+#endif
+
/**
* led_classdev_suspend - suspend an led_classdev.
* @led_cdev: the led_classdev to suspend.
@@ -204,10 +266,21 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
dev_warn(parent, "Led %s renamed to %s due to name collision",
led_cdev->name, dev_name(led_cdev->dev));
+ if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
+ ret = led_add_brightness_hw_changed(led_cdev);
+ if (ret) {
+ device_unregister(led_cdev->dev);
+ return ret;
+ }
+ }
+
led_cdev->work_flags = 0;
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
+#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
+ led_cdev->brightness_hw_changed = -1;
+#endif
mutex_init(&led_cdev->led_access);
/* add to the list of leds */
down_write(&leds_list_lock);
@@ -256,6 +329,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
flush_work(&led_cdev->set_brightness_work);
+ if (led_cdev->flags & LED_BRIGHT_HW_CHANGED)
+ led_remove_brightness_hw_changed(led_cdev);
+
device_unregister(led_cdev->dev);
down_write(&leds_list_lock);
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index bf23ba191ad0..45296aaca9da 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -270,15 +270,15 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
return -ENXIO;
led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
- if (IS_ERR(led->ctrl_gpio)) {
- ret = PTR_ERR(led->ctrl_gpio);
+ ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
+ if (ret) {
dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
return ret;
}
led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
- if (IS_ERR(led->aux_gpio)) {
- ret = PTR_ERR(led->aux_gpio);
+ ret = PTR_ERR_OR_ZERO(led->aux_gpio);
+ if (ret) {
dev_err(dev, "cannot get aux-gpios %d\n", ret);
return ret;
}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index c9f386213e9e..e6f2f8b9f09a 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -43,6 +43,9 @@ static void led_heartbeat_function(unsigned long data)
return;
}
+ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags))
+ led_cdev->blink_brightness = led_cdev->new_blink_brightness;
+
/* acts like an actual heart beat -- ie thump-thump-pause... */
switch (heartbeat_data->phase) {
case 0:
@@ -59,26 +62,26 @@ static void led_heartbeat_function(unsigned long data)
delay = msecs_to_jiffies(70);
heartbeat_data->phase++;
if (!heartbeat_data->invert)
- brightness = led_cdev->max_brightness;
+ brightness = led_cdev->blink_brightness;
break;
case 1:
delay = heartbeat_data->period / 4 - msecs_to_jiffies(70);
heartbeat_data->phase++;
if (heartbeat_data->invert)
- brightness = led_cdev->max_brightness;
+ brightness = led_cdev->blink_brightness;
break;
case 2:
delay = msecs_to_jiffies(70);
heartbeat_data->phase++;
if (!heartbeat_data->invert)
- brightness = led_cdev->max_brightness;
+ brightness = led_cdev->blink_brightness;
break;
default:
delay = heartbeat_data->period - heartbeat_data->period / 4 -
msecs_to_jiffies(70);
heartbeat_data->phase = 0;
if (heartbeat_data->invert)
- brightness = led_cdev->max_brightness;
+ brightness = led_cdev->blink_brightness;
break;
}
@@ -133,7 +136,10 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
setup_timer(&heartbeat_data->timer,
led_heartbeat_function, (unsigned long) led_cdev);
heartbeat_data->phase = 0;
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
led_heartbeat_function(heartbeat_data->timer.data);
+ set_bit(LED_BLINK_SW, &led_cdev->work_flags);
led_cdev->activated = true;
}
@@ -145,6 +151,7 @@ static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
del_timer_sync(&heartbeat_data->timer);
device_remove_file(led_cdev->dev, &dev_attr_invert);
kfree(heartbeat_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
led_cdev->activated = false;
}
}
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 2f5d5f4a4c75..052714106b7b 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -26,15 +26,6 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
-config NVM_GENNVM
- tristate "General Non-Volatile Memory Manager for Open-Channel SSDs"
- ---help---
- Non-volatile memory media manager for Open-Channel SSDs that implements
- physical media metadata management and block provisioning API.
-
- This is the standard media manager for using Open-Channel SSDs, and
- required for targets to be instantiated.
-
config NVM_RRPC
tristate "Round-robin Hybrid Open-Channel SSD target"
---help---
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index a7a0a22cf1a5..b2a39e2d2895 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,5 @@
# Makefile for Open-Channel SSDs.
#
-obj-$(CONFIG_NVM) := core.o sysblk.o
-obj-$(CONFIG_NVM_GENNVM) += gennvm.o
+obj-$(CONFIG_NVM) := core.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 02240a0b39c9..5262ba66a7a7 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -29,10 +29,483 @@
static LIST_HEAD(nvm_tgt_types);
static DECLARE_RWSEM(nvm_tgtt_lock);
-static LIST_HEAD(nvm_mgrs);
static LIST_HEAD(nvm_devices);
static DECLARE_RWSEM(nvm_lock);
+/* Map between virtual and physical channel and lun */
+struct nvm_ch_map {
+ int ch_off;
+ int nr_luns;
+ int *lun_offs;
+};
+
+struct nvm_dev_map {
+ struct nvm_ch_map *chnls;
+ int nr_chnls;
+};
+
+struct nvm_area {
+ struct list_head list;
+ sector_t begin;
+ sector_t end; /* end is excluded */
+};
+
+static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
+{
+ struct nvm_target *tgt;
+
+ list_for_each_entry(tgt, &dev->targets, list)
+ if (!strcmp(name, tgt->disk->disk_name))
+ return tgt;
+
+ return NULL;
+}
+
+static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++) {
+ if (test_and_set_bit(i, dev->lun_map)) {
+ pr_err("nvm: lun %d already allocated\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ while (--i > lun_begin)
+ clear_bit(i, dev->lun_map);
+
+ return -EBUSY;
+}
+
+static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
+ int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++)
+ WARN_ON(!test_and_clear_bit(i, dev->lun_map));
+}
+
+static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_dev_map *dev_map = tgt_dev->map;
+ int i, j;
+
+ for (i = 0; i < dev_map->nr_chnls; i++) {
+ struct nvm_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs = ch_map->lun_offs;
+ int ch = i + ch_map->ch_off;
+
+ for (j = 0; j < ch_map->nr_luns; j++) {
+ int lun = j + lun_offs[j];
+ int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+ }
+
+ kfree(ch_map->lun_offs);
+ }
+
+ kfree(dev_map->chnls);
+ kfree(dev_map);
+
+ kfree(tgt_dev->luns);
+ kfree(tgt_dev);
+}
+
+static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
+ int lun_begin, int lun_end)
+{
+ struct nvm_tgt_dev *tgt_dev = NULL;
+ struct nvm_dev_map *dev_rmap = dev->rmap;
+ struct nvm_dev_map *dev_map;
+ struct ppa_addr *luns;
+ int nr_luns = lun_end - lun_begin + 1;
+ int luns_left = nr_luns;
+ int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
+ int bch = lun_begin / dev->geo.luns_per_chnl;
+ int blun = lun_begin % dev->geo.luns_per_chnl;
+ int lunid = 0;
+ int lun_balanced = 1;
+ int prev_nr_luns;
+ int i, j;
+
+ nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
+
+ dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
+ if (!dev_map)
+ goto err_dev;
+
+ dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
+ GFP_KERNEL);
+ if (!dev_map->chnls)
+ goto err_chnls;
+
+ luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
+ if (!luns)
+ goto err_luns;
+
+ prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+ for (i = 0; i < nr_chnls; i++) {
+ struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
+ int *lun_roffs = ch_rmap->lun_offs;
+ struct nvm_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs;
+ int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+
+ if (lun_balanced && prev_nr_luns != luns_in_chnl)
+ lun_balanced = 0;
+
+ ch_map->ch_off = ch_rmap->ch_off = bch;
+ ch_map->nr_luns = luns_in_chnl;
+
+ lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_offs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++) {
+ luns[lunid].ppa = 0;
+ luns[lunid].g.ch = i;
+ luns[lunid++].g.lun = j;
+
+ lun_offs[j] = blun;
+ lun_roffs[j + blun] = blun;
+ }
+
+ ch_map->lun_offs = lun_offs;
+
+ /* when starting a new channel, lun offset is reset */
+ blun = 0;
+ luns_left -= luns_in_chnl;
+ }
+
+ dev_map->nr_chnls = nr_chnls;
+
+ tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
+ if (!tgt_dev)
+ goto err_ch;
+
+ memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
+ /* Target device only owns a portion of the physical device */
+ tgt_dev->geo.nr_chnls = nr_chnls;
+ tgt_dev->geo.nr_luns = nr_luns;
+ tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
+ tgt_dev->q = dev->q;
+ tgt_dev->map = dev_map;
+ tgt_dev->luns = luns;
+ memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
+
+ tgt_dev->parent = dev;
+
+ return tgt_dev;
+err_ch:
+ while (--i > 0)
+ kfree(dev_map->chnls[i].lun_offs);
+ kfree(luns);
+err_luns:
+ kfree(dev_map->chnls);
+err_chnls:
+ kfree(dev_map);
+err_dev:
+ return tgt_dev;
+}
+
+static const struct block_device_operations nvm_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
+{
+ struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct request_queue *tqueue;
+ struct gendisk *tdisk;
+ struct nvm_tgt_type *tt;
+ struct nvm_target *t;
+ struct nvm_tgt_dev *tgt_dev;
+ void *targetdata;
+
+ tt = nvm_find_target_type(create->tgttype, 1);
+ if (!tt) {
+ pr_err("nvm: target type %s not found\n", create->tgttype);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ t = nvm_find_target(dev, create->tgtname);
+ if (t) {
+ pr_err("nvm: target name already exists.\n");
+ mutex_unlock(&dev->mlock);
+ return -EINVAL;
+ }
+ mutex_unlock(&dev->mlock);
+
+ if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
+ return -ENOMEM;
+
+ t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
+ if (!t)
+ goto err_reserve;
+
+ tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ if (!tgt_dev) {
+ pr_err("nvm: could not create target device\n");
+ goto err_t;
+ }
+
+ tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
+ if (!tqueue)
+ goto err_dev;
+ blk_queue_make_request(tqueue, tt->make_rq);
+
+ tdisk = alloc_disk(0);
+ if (!tdisk)
+ goto err_queue;
+
+ sprintf(tdisk->disk_name, "%s", create->tgtname);
+ tdisk->flags = GENHD_FL_EXT_DEVT;
+ tdisk->major = 0;
+ tdisk->first_minor = 0;
+ tdisk->fops = &nvm_fops;
+ tdisk->queue = tqueue;
+
+ targetdata = tt->init(tgt_dev, tdisk);
+ if (IS_ERR(targetdata))
+ goto err_init;
+
+ tdisk->private_data = targetdata;
+ tqueue->queuedata = targetdata;
+
+ blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
+
+ set_capacity(tdisk, tt->capacity(targetdata));
+ add_disk(tdisk);
+
+ if (tt->sysfs_init && tt->sysfs_init(tdisk))
+ goto err_sysfs;
+
+ t->type = tt;
+ t->disk = tdisk;
+ t->dev = tgt_dev;
+
+ mutex_lock(&dev->mlock);
+ list_add_tail(&t->list, &dev->targets);
+ mutex_unlock(&dev->mlock);
+
+ return 0;
+err_sysfs:
+ if (tt->exit)
+ tt->exit(targetdata);
+err_init:
+ put_disk(tdisk);
+err_queue:
+ blk_cleanup_queue(tqueue);
+err_dev:
+ nvm_remove_tgt_dev(tgt_dev);
+err_t:
+ kfree(t);
+err_reserve:
+ nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
+ return -ENOMEM;
+}
+
+static void __nvm_remove_target(struct nvm_target *t)
+{
+ struct nvm_tgt_type *tt = t->type;
+ struct gendisk *tdisk = t->disk;
+ struct request_queue *q = tdisk->queue;
+
+ del_gendisk(tdisk);
+ blk_cleanup_queue(q);
+
+ if (tt->sysfs_exit)
+ tt->sysfs_exit(tdisk);
+
+ if (tt->exit)
+ tt->exit(tdisk->private_data);
+
+ nvm_remove_tgt_dev(t->dev);
+ put_disk(tdisk);
+
+ list_del(&t->list);
+ kfree(t);
+}
+
+/**
+ * nvm_remove_tgt - Removes a target from the media manager
+ * @dev: device
+ * @remove: ioctl structure with target name to remove.
+ *
+ * Returns:
+ * 0: on success
+ * 1: on not found
+ * <0: on error
+ */
+static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
+{
+ struct nvm_target *t;
+
+ mutex_lock(&dev->mlock);
+ t = nvm_find_target(dev, remove->tgtname);
+ if (!t) {
+ mutex_unlock(&dev->mlock);
+ return 1;
+ }
+ __nvm_remove_target(t);
+ mutex_unlock(&dev->mlock);
+
+ return 0;
+}
+
+static int nvm_register_map(struct nvm_dev *dev)
+{
+ struct nvm_dev_map *rmap;
+ int i, j;
+
+ rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
+ if (!rmap)
+ goto err_rmap;
+
+ rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
+ GFP_KERNEL);
+ if (!rmap->chnls)
+ goto err_chnls;
+
+ for (i = 0; i < dev->geo.nr_chnls; i++) {
+ struct nvm_ch_map *ch_rmap;
+ int *lun_roffs;
+ int luns_in_chnl = dev->geo.luns_per_chnl;
+
+ ch_rmap = &rmap->chnls[i];
+
+ ch_rmap->ch_off = -1;
+ ch_rmap->nr_luns = luns_in_chnl;
+
+ lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_roffs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++)
+ lun_roffs[j] = -1;
+
+ ch_rmap->lun_offs = lun_roffs;
+ }
+
+ dev->rmap = rmap;
+
+ return 0;
+err_ch:
+ while (--i >= 0)
+ kfree(rmap->chnls[i].lun_offs);
+err_chnls:
+ kfree(rmap);
+err_rmap:
+ return -ENOMEM;
+}
+
+static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
+{
+ struct nvm_dev_map *dev_map = tgt_dev->map;
+ struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
+ int lun_off = ch_map->lun_offs[p->g.lun];
+
+ p->g.ch += ch_map->ch_off;
+ p->g.lun += lun_off;
+}
+
+static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_dev_map *dev_rmap = dev->rmap;
+ struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
+ int lun_roff = ch_rmap->lun_offs[p->g.lun];
+
+ p->g.ch -= ch_rmap->ch_off;
+ p->g.lun -= lun_roff;
+}
+
+static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
+ struct ppa_addr *ppa_list, int nr_ppas)
+{
+ int i;
+
+ for (i = 0; i < nr_ppas; i++) {
+ nvm_map_to_dev(tgt_dev, &ppa_list[i]);
+ ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
+ }
+}
+
+static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
+ struct ppa_addr *ppa_list, int nr_ppas)
+{
+ int i;
+
+ for (i = 0; i < nr_ppas; i++) {
+ ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
+ nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
+ }
+}
+
+static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
+{
+ if (rqd->nr_ppas == 1) {
+ nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
+ return;
+ }
+
+ nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
+}
+
+static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
+{
+ if (rqd->nr_ppas == 1) {
+ nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
+ return;
+ }
+
+ nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
+}
+
+void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
+ int len)
+{
+ struct nvm_geo *geo = &dev->geo;
+ struct nvm_dev_map *dev_rmap = dev->rmap;
+ u64 i;
+
+ for (i = 0; i < len; i++) {
+ struct nvm_ch_map *ch_rmap;
+ int *lun_roffs;
+ struct ppa_addr gaddr;
+ u64 pba = le64_to_cpu(entries[i]);
+ int off;
+ u64 diff;
+
+ if (!pba)
+ continue;
+
+ gaddr = linear_to_generic_addr(geo, pba);
+ ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
+ lun_roffs = ch_rmap->lun_offs;
+
+ off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
+
+ diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
+ (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
+
+ entries[i] -= cpu_to_le64(diff);
+ }
+}
+EXPORT_SYMBOL(nvm_part_to_tgt);
+
struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
{
struct nvm_tgt_type *tmp, *tt = NULL;
@@ -92,78 +565,6 @@ void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
}
EXPORT_SYMBOL(nvm_dev_dma_free);
-static struct nvmm_type *nvm_find_mgr_type(const char *name)
-{
- struct nvmm_type *mt;
-
- list_for_each_entry(mt, &nvm_mgrs, list)
- if (!strcmp(name, mt->name))
- return mt;
-
- return NULL;
-}
-
-static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
-{
- struct nvmm_type *mt;
- int ret;
-
- lockdep_assert_held(&nvm_lock);
-
- list_for_each_entry(mt, &nvm_mgrs, list) {
- if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
- continue;
-
- ret = mt->register_mgr(dev);
- if (ret < 0) {
- pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
- ret, dev->name);
- return NULL; /* initialization failed */
- } else if (ret > 0)
- return mt;
- }
-
- return NULL;
-}
-
-int nvm_register_mgr(struct nvmm_type *mt)
-{
- struct nvm_dev *dev;
- int ret = 0;
-
- down_write(&nvm_lock);
- if (nvm_find_mgr_type(mt->name)) {
- ret = -EEXIST;
- goto finish;
- } else {
- list_add(&mt->list, &nvm_mgrs);
- }
-
- /* try to register media mgr if any device have none configured */
- list_for_each_entry(dev, &nvm_devices, devices) {
- if (dev->mt)
- continue;
-
- dev->mt = nvm_init_mgr(dev);
- }
-finish:
- up_write(&nvm_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_register_mgr);
-
-void nvm_unregister_mgr(struct nvmm_type *mt)
-{
- if (!mt)
- return;
-
- down_write(&nvm_lock);
- list_del(&mt->list);
- up_write(&nvm_lock);
-}
-EXPORT_SYMBOL(nvm_unregister_mgr);
-
static struct nvm_dev *nvm_find_nvm_dev(const char *name)
{
struct nvm_dev *dev;
@@ -175,53 +576,6 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
-static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
- struct nvm_rq *rqd)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- int i;
-
- if (rqd->nr_ppas > 1) {
- for (i = 0; i < rqd->nr_ppas; i++) {
- rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
- rqd->ppa_list[i], TRANS_TGT_TO_DEV);
- rqd->ppa_list[i] = generic_to_dev_addr(dev,
- rqd->ppa_list[i]);
- }
- } else {
- rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
- TRANS_TGT_TO_DEV);
- rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
- }
-}
-
-int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
- int type)
-{
- struct nvm_rq rqd;
- int ret;
-
- if (nr_ppas > dev->ops->max_phys_sect) {
- pr_err("nvm: unable to update all sysblocks atomically\n");
- return -EINVAL;
- }
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
- nvm_generic_to_addr_mode(dev, &rqd);
-
- ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
- nvm_free_rqd_ppalist(dev, &rqd);
- if (ret) {
- pr_err("nvm: sysblk failed bb mark\n");
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_set_bb_tbl);
-
int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
int nr_ppas, int type)
{
@@ -237,12 +591,12 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
memset(&rqd, 0, sizeof(struct nvm_rq));
nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
- nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
+ nvm_rq_tgt_to_dev(tgt_dev, &rqd);
ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
nvm_free_rqd_ppalist(dev, &rqd);
if (ret) {
- pr_err("nvm: sysblk failed bb mark\n");
+ pr_err("nvm: failed bb mark\n");
return -EINVAL;
}
@@ -262,15 +616,42 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
struct nvm_dev *dev = tgt_dev->parent;
- return dev->mt->submit_io(tgt_dev, rqd);
+ if (!dev->ops->submit_io)
+ return -ENODEV;
+
+ nvm_rq_tgt_to_dev(tgt_dev, rqd);
+
+ rqd->dev = tgt_dev;
+ return dev->ops->submit_io(dev, rqd);
}
EXPORT_SYMBOL(nvm_submit_io);
-int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
+int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags)
{
struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_rq rqd;
+ int ret;
+
+ if (!dev->ops->erase_block)
+ return 0;
+
+ nvm_map_to_dev(tgt_dev, ppas);
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1);
+ if (ret)
+ return ret;
+
+ nvm_rq_tgt_to_dev(tgt_dev, &rqd);
+
+ rqd.flags = flags;
+
+ ret = dev->ops->erase_block(dev, &rqd);
- return dev->mt->erase_blk(tgt_dev, p, flags);
+ nvm_free_rqd_ppalist(dev, &rqd);
+
+ return ret;
}
EXPORT_SYMBOL(nvm_erase_blk);
@@ -289,46 +670,67 @@ EXPORT_SYMBOL(nvm_get_l2p_tbl);
int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
{
struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_geo *geo = &dev->geo;
+ struct nvm_area *area, *prev, *next;
+ sector_t begin = 0;
+ sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
- return dev->mt->get_area(dev, lba, len);
-}
-EXPORT_SYMBOL(nvm_get_area);
+ if (len > max_sectors)
+ return -EINVAL;
-void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
-{
- struct nvm_dev *dev = tgt_dev->parent;
+ area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
+ if (!area)
+ return -ENOMEM;
- dev->mt->put_area(dev, lba);
-}
-EXPORT_SYMBOL(nvm_put_area);
+ prev = NULL;
-void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- int i;
+ spin_lock(&dev->lock);
+ list_for_each_entry(next, &dev->area_list, list) {
+ if (begin + len > next->begin) {
+ begin = next->end;
+ prev = next;
+ continue;
+ }
+ break;
+ }
- if (rqd->nr_ppas > 1) {
- for (i = 0; i < rqd->nr_ppas; i++)
- rqd->ppa_list[i] = dev_to_generic_addr(dev,
- rqd->ppa_list[i]);
- } else {
- rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
+ if ((begin + len) > max_sectors) {
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return -EINVAL;
}
+
+ area->begin = *lba = begin;
+ area->end = begin + len;
+
+ if (prev) /* insert into sorted order */
+ list_add(&area->list, &prev->list);
+ else
+ list_add(&area->list, &dev->area_list);
+ spin_unlock(&dev->lock);
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_addr_to_generic_mode);
+EXPORT_SYMBOL(nvm_get_area);
-void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
+void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
{
- int i;
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_area *area;
- if (rqd->nr_ppas > 1) {
- for (i = 0; i < rqd->nr_ppas; i++)
- rqd->ppa_list[i] = generic_to_dev_addr(dev,
- rqd->ppa_list[i]);
- } else {
- rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
+ spin_lock(&dev->lock);
+ list_for_each_entry(area, &dev->area_list, list) {
+ if (area->begin != begin)
+ continue;
+
+ list_del(&area->list);
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return;
}
+ spin_unlock(&dev->lock);
}
-EXPORT_SYMBOL(nvm_generic_to_addr_mode);
+EXPORT_SYMBOL(nvm_put_area);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
const struct ppa_addr *ppas, int nr_ppas, int vblk)
@@ -380,149 +782,19 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_free_rqd_ppalist);
-int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
- int flags)
+void nvm_end_io(struct nvm_rq *rqd)
{
- struct nvm_rq rqd;
- int ret;
+ struct nvm_tgt_dev *tgt_dev = rqd->dev;
- if (!dev->ops->erase_block)
- return 0;
+ /* Convert address space */
+ if (tgt_dev)
+ nvm_rq_dev_to_tgt(tgt_dev, rqd);
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
- if (ret)
- return ret;
-
- nvm_generic_to_addr_mode(dev, &rqd);
-
- rqd.flags = flags;
-
- ret = dev->ops->erase_block(dev, &rqd);
-
- nvm_free_rqd_ppalist(dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_erase_ppa);
-
-void nvm_end_io(struct nvm_rq *rqd, int error)
-{
- rqd->error = error;
- rqd->end_io(rqd);
+ if (rqd->end_io)
+ rqd->end_io(rqd);
}
EXPORT_SYMBOL(nvm_end_io);
-static void nvm_end_io_sync(struct nvm_rq *rqd)
-{
- struct completion *waiting = rqd->wait;
-
- rqd->wait = NULL;
-
- complete(waiting);
-}
-
-static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
- int flags, void *buf, int len)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- struct bio *bio;
- int ret;
- unsigned long hang_check;
-
- bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
- if (IS_ERR_OR_NULL(bio))
- return -ENOMEM;
-
- nvm_generic_to_addr_mode(dev, rqd);
-
- rqd->dev = NULL;
- rqd->opcode = opcode;
- rqd->flags = flags;
- rqd->bio = bio;
- rqd->wait = &wait;
- rqd->end_io = nvm_end_io_sync;
-
- ret = dev->ops->submit_io(dev, rqd);
- if (ret) {
- bio_put(bio);
- return ret;
- }
-
- /* Prevent hang_check timer from firing at us during very long I/O */
- hang_check = sysctl_hung_task_timeout_secs;
- if (hang_check)
- while (!wait_for_completion_io_timeout(&wait,
- hang_check * (HZ/2)))
- ;
- else
- wait_for_completion_io(&wait);
-
- return rqd->error;
-}
-
-/**
- * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
- * take to free ppa list if necessary.
- * @dev: device
- * @ppa_list: user created ppa_list
- * @nr_ppas: length of ppa_list
- * @opcode: device opcode
- * @flags: device flags
- * @buf: data buffer
- * @len: data buffer length
- */
-int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
- int nr_ppas, int opcode, int flags, void *buf, int len)
-{
- struct nvm_rq rqd;
-
- if (dev->ops->max_phys_sect < nr_ppas)
- return -EINVAL;
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- rqd.nr_ppas = nr_ppas;
- if (nr_ppas > 1)
- rqd.ppa_list = ppa_list;
- else
- rqd.ppa_addr = ppa_list[0];
-
- return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
-}
-EXPORT_SYMBOL(nvm_submit_ppa_list);
-
-/**
- * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
- * as single, dual, quad plane PPAs depending on device type.
- * @dev: device
- * @ppa: user created ppa_list
- * @nr_ppas: length of ppa_list
- * @opcode: device opcode
- * @flags: device flags
- * @buf: data buffer
- * @len: data buffer length
- */
-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
- int opcode, int flags, void *buf, int len)
-{
- struct nvm_rq rqd;
- int ret;
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
- if (ret)
- return ret;
-
- ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
-
- nvm_free_rqd_ppalist(dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_submit_ppa);
-
/*
* folds a bad block list from its plane representation to its virtual
* block representation. The fold is done in place and reduced size is
@@ -559,21 +831,14 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
-int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
-{
- ppa = generic_to_dev_addr(dev, ppa);
-
- return dev->ops->get_bb_tbl(dev, ppa, blks);
-}
-EXPORT_SYMBOL(nvm_get_bb_tbl);
-
int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
u8 *blks)
{
struct nvm_dev *dev = tgt_dev->parent;
- ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
- return nvm_get_bb_tbl(dev, ppa, blks);
+ nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
+
+ return dev->ops->get_bb_tbl(dev, ppa, blks);
}
EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
@@ -627,7 +892,7 @@ static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
- struct nvm_id_group *grp = &id->groups[0];
+ struct nvm_id_group *grp = &id->grp;
struct nvm_geo *geo = &dev->geo;
int ret;
@@ -691,36 +956,31 @@ static int nvm_core_init(struct nvm_dev *dev)
goto err_fmtype;
}
+ INIT_LIST_HEAD(&dev->area_list);
+ INIT_LIST_HEAD(&dev->targets);
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
- blk_queue_logical_block_size(dev->q, geo->sec_size);
+ ret = nvm_register_map(dev);
+ if (ret)
+ goto err_fmtype;
+ blk_queue_logical_block_size(dev->q, geo->sec_size);
return 0;
err_fmtype:
kfree(dev->lun_map);
return ret;
}
-static void nvm_free_mgr(struct nvm_dev *dev)
-{
- if (!dev->mt)
- return;
-
- dev->mt->unregister_mgr(dev);
- dev->mt = NULL;
-}
-
void nvm_free(struct nvm_dev *dev)
{
if (!dev)
return;
- nvm_free_mgr(dev);
-
if (dev->dma_pool)
dev->ops->destroy_dma_pool(dev->dma_pool);
+ kfree(dev->rmap);
kfree(dev->lptbl);
kfree(dev->lun_map);
kfree(dev);
@@ -731,28 +991,19 @@ static int nvm_init(struct nvm_dev *dev)
struct nvm_geo *geo = &dev->geo;
int ret = -EINVAL;
- if (!dev->q || !dev->ops)
- return ret;
-
if (dev->ops->identity(dev, &dev->identity)) {
pr_err("nvm: device could not be identified\n");
goto err;
}
- pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
- dev->identity.ver_id, dev->identity.vmnt,
- dev->identity.cgrps);
+ pr_debug("nvm: ver:%x nvm_vendor:%x\n",
+ dev->identity.ver_id, dev->identity.vmnt);
if (dev->identity.ver_id != 1) {
pr_err("nvm: device not supported by kernel.");
goto err;
}
- if (dev->identity.cgrps != 1) {
- pr_err("nvm: only one group configuration supported.");
- goto err;
- }
-
ret = nvm_core_init(dev);
if (ret) {
pr_err("nvm: could not initialize core structures.\n");
@@ -779,49 +1030,50 @@ int nvm_register(struct nvm_dev *dev)
{
int ret;
- ret = nvm_init(dev);
- if (ret)
- goto err_init;
+ if (!dev->q || !dev->ops)
+ return -EINVAL;
if (dev->ops->max_phys_sect > 256) {
pr_info("nvm: max sectors supported is 256.\n");
- ret = -EINVAL;
- goto err_init;
+ return -EINVAL;
}
if (dev->ops->max_phys_sect > 1) {
dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
if (!dev->dma_pool) {
pr_err("nvm: could not create dma pool\n");
- ret = -ENOMEM;
- goto err_init;
+ return -ENOMEM;
}
}
- if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
- ret = nvm_get_sysblock(dev, &dev->sb);
- if (!ret)
- pr_err("nvm: device not initialized.\n");
- else if (ret < 0)
- pr_err("nvm: err (%d) on device initialization\n", ret);
- }
+ ret = nvm_init(dev);
+ if (ret)
+ goto err_init;
/* register device with a supported media manager */
down_write(&nvm_lock);
- if (ret > 0)
- dev->mt = nvm_init_mgr(dev);
list_add(&dev->devices, &nvm_devices);
up_write(&nvm_lock);
return 0;
err_init:
- kfree(dev->lun_map);
+ dev->ops->destroy_dma_pool(dev->dma_pool);
return ret;
}
EXPORT_SYMBOL(nvm_register);
void nvm_unregister(struct nvm_dev *dev)
{
+ struct nvm_target *t, *tmp;
+
+ mutex_lock(&dev->mlock);
+ list_for_each_entry_safe(t, tmp, &dev->targets, list) {
+ if (t->dev->parent != dev)
+ continue;
+ __nvm_remove_target(t);
+ }
+ mutex_unlock(&dev->mlock);
+
down_write(&nvm_lock);
list_del(&dev->devices);
up_write(&nvm_lock);
@@ -844,24 +1096,24 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- if (!dev->mt) {
- pr_info("nvm: device has no media manager registered.\n");
- return -ENODEV;
- }
-
if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
pr_err("nvm: config type not valid\n");
return -EINVAL;
}
s = &create->conf.s;
- if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
+ if (s->lun_begin == -1 && s->lun_end == -1) {
+ s->lun_begin = 0;
+ s->lun_end = dev->geo.nr_luns - 1;
+ }
+
+ if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->geo.nr_luns);
+ s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
return -EINVAL;
}
- return dev->mt->create_tgt(dev, create);
+ return nvm_create_tgt(dev, create);
}
static long nvm_ioctl_info(struct file *file, void __user *arg)
@@ -923,16 +1175,14 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
struct nvm_ioctl_device_info *info = &devices->info[i];
sprintf(info->devname, "%s", dev->name);
- if (dev->mt) {
- info->bmversion[0] = dev->mt->version[0];
- info->bmversion[1] = dev->mt->version[1];
- info->bmversion[2] = dev->mt->version[2];
- sprintf(info->bmname, "%s", dev->mt->name);
- } else {
- sprintf(info->bmname, "none");
- }
+ /* kept for compatibility */
+ info->bmversion[0] = 1;
+ info->bmversion[1] = 0;
+ info->bmversion[2] = 0;
+ sprintf(info->bmname, "%s", "gennvm");
i++;
+
if (i > 31) {
pr_err("nvm: max 31 devices can be reported.\n");
break;
@@ -994,7 +1244,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
}
list_for_each_entry(dev, &nvm_devices, devices) {
- ret = dev->mt->remove_tgt(dev, &remove);
+ ret = nvm_remove_tgt(dev, &remove);
if (!ret)
break;
}
@@ -1002,47 +1252,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
return ret;
}
-static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
-{
- info->seqnr = 1;
- info->erase_cnt = 0;
- info->version = 1;
-}
-
-static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
-{
- struct nvm_dev *dev;
- struct nvm_sb_info info;
- int ret;
-
- down_write(&nvm_lock);
- dev = nvm_find_nvm_dev(init->dev);
- up_write(&nvm_lock);
- if (!dev) {
- pr_err("nvm: device not found\n");
- return -EINVAL;
- }
-
- nvm_setup_nvm_sb_info(&info);
-
- strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
- info.fs_ppa.ppa = -1;
-
- if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
- ret = nvm_init_sysblock(dev, &info);
- if (ret)
- return ret;
- }
-
- memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
-
- down_write(&nvm_lock);
- dev->mt = nvm_init_mgr(dev);
- up_write(&nvm_lock);
-
- return 0;
-}
-
+/* kept for compatibility reasons */
static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
{
struct nvm_ioctl_dev_init init;
@@ -1058,15 +1268,13 @@ static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
return -EINVAL;
}
- init.dev[DISK_NAME_LEN - 1] = '\0';
-
- return __nvm_ioctl_dev_init(&init);
+ return 0;
}
+/* Kept for compatibility reasons */
static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
{
struct nvm_ioctl_dev_factory fact;
- struct nvm_dev *dev;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1079,19 +1287,6 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
return -EINVAL;
- down_write(&nvm_lock);
- dev = nvm_find_nvm_dev(fact.dev);
- up_write(&nvm_lock);
- if (!dev) {
- pr_err("nvm: device not found\n");
- return -EINVAL;
- }
-
- nvm_free_mgr(dev);
-
- if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
- return nvm_dev_factory(dev, fact.flags);
-
return 0;
}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
deleted file mode 100644
index ca7880082d80..000000000000
--- a/drivers/lightnvm/gennvm.c
+++ /dev/null
@@ -1,657 +0,0 @@
-/*
- * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- * Implementation of a general nvm manager for Open-Channel SSDs.
- */
-
-#include "gennvm.h"
-
-static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name)
-{
- struct nvm_target *tgt;
-
- list_for_each_entry(tgt, &gn->targets, list)
- if (!strcmp(name, tgt->disk->disk_name))
- return tgt;
-
- return NULL;
-}
-
-static const struct block_device_operations gen_fops = {
- .owner = THIS_MODULE,
-};
-
-static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
- int lun_begin, int lun_end)
-{
- int i;
-
- for (i = lun_begin; i <= lun_end; i++) {
- if (test_and_set_bit(i, dev->lun_map)) {
- pr_err("nvm: lun %d already allocated\n", i);
- goto err;
- }
- }
-
- return 0;
-
-err:
- while (--i > lun_begin)
- clear_bit(i, dev->lun_map);
-
- return -EBUSY;
-}
-
-static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
- int lun_end)
-{
- int i;
-
- for (i = lun_begin; i <= lun_end; i++)
- WARN_ON(!test_and_clear_bit(i, dev->lun_map));
-}
-
-static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct gen_dev_map *dev_map = tgt_dev->map;
- int i, j;
-
- for (i = 0; i < dev_map->nr_chnls; i++) {
- struct gen_ch_map *ch_map = &dev_map->chnls[i];
- int *lun_offs = ch_map->lun_offs;
- int ch = i + ch_map->ch_off;
-
- for (j = 0; j < ch_map->nr_luns; j++) {
- int lun = j + lun_offs[j];
- int lunid = (ch * dev->geo.luns_per_chnl) + lun;
-
- WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
- }
-
- kfree(ch_map->lun_offs);
- }
-
- kfree(dev_map->chnls);
- kfree(dev_map);
- kfree(tgt_dev->luns);
- kfree(tgt_dev);
-}
-
-static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
- int lun_begin, int lun_end)
-{
- struct nvm_tgt_dev *tgt_dev = NULL;
- struct gen_dev_map *dev_rmap = dev->rmap;
- struct gen_dev_map *dev_map;
- struct ppa_addr *luns;
- int nr_luns = lun_end - lun_begin + 1;
- int luns_left = nr_luns;
- int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
- int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
- int bch = lun_begin / dev->geo.luns_per_chnl;
- int blun = lun_begin % dev->geo.luns_per_chnl;
- int lunid = 0;
- int lun_balanced = 1;
- int prev_nr_luns;
- int i, j;
-
- nr_chnls = nr_luns / dev->geo.luns_per_chnl;
- nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
-
- dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
- if (!dev_map)
- goto err_dev;
-
- dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
- GFP_KERNEL);
- if (!dev_map->chnls)
- goto err_chnls;
-
- luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
- if (!luns)
- goto err_luns;
-
- prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
- for (i = 0; i < nr_chnls; i++) {
- struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
- int *lun_roffs = ch_rmap->lun_offs;
- struct gen_ch_map *ch_map = &dev_map->chnls[i];
- int *lun_offs;
- int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
-
- if (lun_balanced && prev_nr_luns != luns_in_chnl)
- lun_balanced = 0;
-
- ch_map->ch_off = ch_rmap->ch_off = bch;
- ch_map->nr_luns = luns_in_chnl;
-
- lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
- if (!lun_offs)
- goto err_ch;
-
- for (j = 0; j < luns_in_chnl; j++) {
- luns[lunid].ppa = 0;
- luns[lunid].g.ch = i;
- luns[lunid++].g.lun = j;
-
- lun_offs[j] = blun;
- lun_roffs[j + blun] = blun;
- }
-
- ch_map->lun_offs = lun_offs;
-
- /* when starting a new channel, lun offset is reset */
- blun = 0;
- luns_left -= luns_in_chnl;
- }
-
- dev_map->nr_chnls = nr_chnls;
-
- tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
- if (!tgt_dev)
- goto err_ch;
-
- memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
- /* Target device only owns a portion of the physical device */
- tgt_dev->geo.nr_chnls = nr_chnls;
- tgt_dev->geo.nr_luns = nr_luns;
- tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
- tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
- tgt_dev->q = dev->q;
- tgt_dev->map = dev_map;
- tgt_dev->luns = luns;
- memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
-
- tgt_dev->parent = dev;
-
- return tgt_dev;
-err_ch:
- while (--i > 0)
- kfree(dev_map->chnls[i].lun_offs);
- kfree(luns);
-err_luns:
- kfree(dev_map->chnls);
-err_chnls:
- kfree(dev_map);
-err_dev:
- return tgt_dev;
-}
-
-static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
-{
- struct gen_dev *gn = dev->mp;
- struct nvm_ioctl_create_simple *s = &create->conf.s;
- struct request_queue *tqueue;
- struct gendisk *tdisk;
- struct nvm_tgt_type *tt;
- struct nvm_target *t;
- struct nvm_tgt_dev *tgt_dev;
- void *targetdata;
-
- tt = nvm_find_target_type(create->tgttype, 1);
- if (!tt) {
- pr_err("nvm: target type %s not found\n", create->tgttype);
- return -EINVAL;
- }
-
- mutex_lock(&gn->lock);
- t = gen_find_target(gn, create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- mutex_unlock(&gn->lock);
- return -EINVAL;
- }
- mutex_unlock(&gn->lock);
-
- t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
- if (!t)
- return -ENOMEM;
-
- if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
- goto err_t;
-
- tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
- if (!tgt_dev) {
- pr_err("nvm: could not create target device\n");
- goto err_reserve;
- }
-
- tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
- if (!tqueue)
- goto err_dev;
- blk_queue_make_request(tqueue, tt->make_rq);
-
- tdisk = alloc_disk(0);
- if (!tdisk)
- goto err_queue;
-
- sprintf(tdisk->disk_name, "%s", create->tgtname);
- tdisk->flags = GENHD_FL_EXT_DEVT;
- tdisk->major = 0;
- tdisk->first_minor = 0;
- tdisk->fops = &gen_fops;
- tdisk->queue = tqueue;
-
- targetdata = tt->init(tgt_dev, tdisk);
- if (IS_ERR(targetdata))
- goto err_init;
-
- tdisk->private_data = targetdata;
- tqueue->queuedata = targetdata;
-
- blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
-
- set_capacity(tdisk, tt->capacity(targetdata));
- add_disk(tdisk);
-
- t->type = tt;
- t->disk = tdisk;
- t->dev = tgt_dev;
-
- mutex_lock(&gn->lock);
- list_add_tail(&t->list, &gn->targets);
- mutex_unlock(&gn->lock);
-
- return 0;
-err_init:
- put_disk(tdisk);
-err_queue:
- blk_cleanup_queue(tqueue);
-err_dev:
- kfree(tgt_dev);
-err_reserve:
- gen_release_luns_err(dev, s->lun_begin, s->lun_end);
-err_t:
- kfree(t);
- return -ENOMEM;
-}
-
-static void __gen_remove_target(struct nvm_target *t)
-{
- struct nvm_tgt_type *tt = t->type;
- struct gendisk *tdisk = t->disk;
- struct request_queue *q = tdisk->queue;
-
- del_gendisk(tdisk);
- blk_cleanup_queue(q);
-
- if (tt->exit)
- tt->exit(tdisk->private_data);
-
- gen_remove_tgt_dev(t->dev);
- put_disk(tdisk);
-
- list_del(&t->list);
- kfree(t);
-}
-
-/**
- * gen_remove_tgt - Removes a target from the media manager
- * @dev: device
- * @remove: ioctl structure with target name to remove.
- *
- * Returns:
- * 0: on success
- * 1: on not found
- * <0: on error
- */
-static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
-{
- struct gen_dev *gn = dev->mp;
- struct nvm_target *t;
-
- if (!gn)
- return 1;
-
- mutex_lock(&gn->lock);
- t = gen_find_target(gn, remove->tgtname);
- if (!t) {
- mutex_unlock(&gn->lock);
- return 1;
- }
- __gen_remove_target(t);
- mutex_unlock(&gn->lock);
-
- return 0;
-}
-
-static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
-{
- struct nvm_geo *geo = &dev->geo;
- struct gen_dev *gn = dev->mp;
- struct gen_area *area, *prev, *next;
- sector_t begin = 0;
- sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
-
- if (len > max_sectors)
- return -EINVAL;
-
- area = kmalloc(sizeof(struct gen_area), GFP_KERNEL);
- if (!area)
- return -ENOMEM;
-
- prev = NULL;
-
- spin_lock(&dev->lock);
- list_for_each_entry(next, &gn->area_list, list) {
- if (begin + len > next->begin) {
- begin = next->end;
- prev = next;
- continue;
- }
- break;
- }
-
- if ((begin + len) > max_sectors) {
- spin_unlock(&dev->lock);
- kfree(area);
- return -EINVAL;
- }
-
- area->begin = *lba = begin;
- area->end = begin + len;
-
- if (prev) /* insert into sorted order */
- list_add(&area->list, &prev->list);
- else
- list_add(&area->list, &gn->area_list);
- spin_unlock(&dev->lock);
-
- return 0;
-}
-
-static void gen_put_area(struct nvm_dev *dev, sector_t begin)
-{
- struct gen_dev *gn = dev->mp;
- struct gen_area *area;
-
- spin_lock(&dev->lock);
- list_for_each_entry(area, &gn->area_list, list) {
- if (area->begin != begin)
- continue;
-
- list_del(&area->list);
- spin_unlock(&dev->lock);
- kfree(area);
- return;
- }
- spin_unlock(&dev->lock);
-}
-
-static void gen_free(struct nvm_dev *dev)
-{
- kfree(dev->mp);
- kfree(dev->rmap);
- dev->mp = NULL;
-}
-
-static int gen_register(struct nvm_dev *dev)
-{
- struct gen_dev *gn;
- struct gen_dev_map *dev_rmap;
- int i, j;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
- if (!gn)
- goto err_gn;
-
- dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
- if (!dev_rmap)
- goto err_rmap;
-
- dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
- GFP_KERNEL);
- if (!dev_rmap->chnls)
- goto err_chnls;
-
- for (i = 0; i < dev->geo.nr_chnls; i++) {
- struct gen_ch_map *ch_rmap;
- int *lun_roffs;
- int luns_in_chnl = dev->geo.luns_per_chnl;
-
- ch_rmap = &dev_rmap->chnls[i];
-
- ch_rmap->ch_off = -1;
- ch_rmap->nr_luns = luns_in_chnl;
-
- lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
- if (!lun_roffs)
- goto err_ch;
-
- for (j = 0; j < luns_in_chnl; j++)
- lun_roffs[j] = -1;
-
- ch_rmap->lun_offs = lun_roffs;
- }
-
- gn->dev = dev;
- gn->nr_luns = dev->geo.nr_luns;
- INIT_LIST_HEAD(&gn->area_list);
- mutex_init(&gn->lock);
- INIT_LIST_HEAD(&gn->targets);
- dev->mp = gn;
- dev->rmap = dev_rmap;
-
- return 1;
-err_ch:
- while (--i >= 0)
- kfree(dev_rmap->chnls[i].lun_offs);
-err_chnls:
- kfree(dev_rmap);
-err_rmap:
- gen_free(dev);
-err_gn:
- module_put(THIS_MODULE);
- return -ENOMEM;
-}
-
-static void gen_unregister(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
- struct nvm_target *t, *tmp;
-
- mutex_lock(&gn->lock);
- list_for_each_entry_safe(t, tmp, &gn->targets, list) {
- if (t->dev->parent != dev)
- continue;
- __gen_remove_target(t);
- }
- mutex_unlock(&gn->lock);
-
- gen_free(dev);
- module_put(THIS_MODULE);
-}
-
-static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
-{
- struct gen_dev_map *dev_map = tgt_dev->map;
- struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
- int lun_off = ch_map->lun_offs[p->g.lun];
- struct nvm_dev *dev = tgt_dev->parent;
- struct gen_dev_map *dev_rmap = dev->rmap;
- struct gen_ch_map *ch_rmap;
- int lun_roff;
-
- p->g.ch += ch_map->ch_off;
- p->g.lun += lun_off;
-
- ch_rmap = &dev_rmap->chnls[p->g.ch];
- lun_roff = ch_rmap->lun_offs[p->g.lun];
-
- if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
- pr_err("nvm: corrupted device partition table\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct gen_dev_map *dev_rmap = dev->rmap;
- struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
- int lun_roff = ch_rmap->lun_offs[p->g.lun];
-
- p->g.ch -= ch_rmap->ch_off;
- p->g.lun -= lun_roff;
-
- return 0;
-}
-
-static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
- int flag)
-{
- gen_trans_fn *f;
- int i;
- int ret = 0;
-
- f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
-
- if (rqd->nr_ppas == 1)
- return f(tgt_dev, &rqd->ppa_addr);
-
- for (i = 0; i < rqd->nr_ppas; i++) {
- ret = f(tgt_dev, &rqd->ppa_list[i]);
- if (ret)
- goto out;
- }
-
-out:
- return ret;
-}
-
-static void gen_end_io(struct nvm_rq *rqd)
-{
- struct nvm_tgt_dev *tgt_dev = rqd->dev;
- struct nvm_tgt_instance *ins = rqd->ins;
-
- /* Convert address space */
- if (tgt_dev)
- gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
-
- ins->tt->end_io(rqd);
-}
-
-static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
-{
- struct nvm_dev *dev = tgt_dev->parent;
-
- if (!dev->ops->submit_io)
- return -ENODEV;
-
- /* Convert address space */
- gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
- nvm_generic_to_addr_mode(dev, rqd);
-
- rqd->dev = tgt_dev;
- rqd->end_io = gen_end_io;
- return dev->ops->submit_io(dev, rqd);
-}
-
-static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
- int flags)
-{
- /* Convert address space */
- gen_map_to_dev(tgt_dev, p);
-
- return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
-}
-
-static struct ppa_addr gen_trans_ppa(struct nvm_tgt_dev *tgt_dev,
- struct ppa_addr p, int direction)
-{
- gen_trans_fn *f;
- struct ppa_addr ppa = p;
-
- f = (direction == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
- f(tgt_dev, &ppa);
-
- return ppa;
-}
-
-static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
- int len)
-{
- struct nvm_geo *geo = &dev->geo;
- struct gen_dev_map *dev_rmap = dev->rmap;
- u64 i;
-
- for (i = 0; i < len; i++) {
- struct gen_ch_map *ch_rmap;
- int *lun_roffs;
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- int off;
- u64 diff;
-
- if (!pba)
- continue;
-
- gaddr = linear_to_generic_addr(geo, pba);
- ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
- lun_roffs = ch_rmap->lun_offs;
-
- off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
-
- diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
- (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
-
- entries[i] -= cpu_to_le64(diff);
- }
-}
-
-static struct nvmm_type gen = {
- .name = "gennvm",
- .version = {0, 1, 0},
-
- .register_mgr = gen_register,
- .unregister_mgr = gen_unregister,
-
- .create_tgt = gen_create_tgt,
- .remove_tgt = gen_remove_tgt,
-
- .submit_io = gen_submit_io,
- .erase_blk = gen_erase_blk,
-
- .get_area = gen_get_area,
- .put_area = gen_put_area,
-
- .trans_ppa = gen_trans_ppa,
- .part_to_tgt = gen_part_to_tgt,
-};
-
-static int __init gen_module_init(void)
-{
- return nvm_register_mgr(&gen);
-}
-
-static void gen_module_exit(void)
-{
- nvm_unregister_mgr(&gen);
-}
-
-module_init(gen_module_init);
-module_exit(gen_module_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
deleted file mode 100644
index 6a4b3f368848..000000000000
--- a/drivers/lightnvm/gennvm.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright: Matias Bjorling <mb@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-
-#ifndef GENNVM_H_
-#define GENNVM_H_
-
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-
-#include <linux/lightnvm.h>
-
-struct gen_dev {
- struct nvm_dev *dev;
-
- int nr_luns;
- struct list_head area_list;
-
- struct mutex lock;
- struct list_head targets;
-};
-
-/* Map between virtual and physical channel and lun */
-struct gen_ch_map {
- int ch_off;
- int nr_luns;
- int *lun_offs;
-};
-
-struct gen_dev_map {
- struct gen_ch_map *chnls;
- int nr_chnls;
-};
-
-struct gen_area {
- struct list_head list;
- sector_t begin;
- sector_t end; /* end is excluded */
-};
-
-static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
-{
- return ch_map + 1;
-}
-
-typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
-
-#define gen_for_each_lun(bm, lun, i) \
- for ((i) = 0, lun = &(bm)->luns[0]; \
- (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
-
-#endif /* GENNVM_H_ */
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 9fb7de395915..e00b1d7b976f 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -779,7 +779,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
static void rrpc_end_io(struct nvm_rq *rqd)
{
- struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
+ struct rrpc *rrpc = rqd->private;
struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
uint8_t npages = rqd->nr_ppas;
@@ -972,8 +972,9 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_get(bio);
rqd->bio = bio;
- rqd->ins = &rrpc->instance;
+ rqd->private = rrpc;
rqd->nr_ppas = nr_pages;
+ rqd->end_io = rrpc_end_io;
rrq->flags = flags;
err = nvm_submit_io(dev, rqd);
@@ -1532,7 +1533,6 @@ static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
if (!rrpc)
return ERR_PTR(-ENOMEM);
- rrpc->instance.tt = &tt_rrpc;
rrpc->dev = dev;
rrpc->disk = tdisk;
@@ -1611,7 +1611,6 @@ static struct nvm_tgt_type tt_rrpc = {
.make_rq = rrpc_make_rq,
.capacity = rrpc_capacity,
- .end_io = rrpc_end_io,
.init = rrpc_init,
.exit = rrpc_exit,
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 94e4d73116b2..fdb6ff902903 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -102,9 +102,6 @@ struct rrpc_lun {
};
struct rrpc {
- /* instance must be kept in top to resolve rrpc in unprep */
- struct nvm_tgt_instance instance;
-
struct nvm_tgt_dev *dev;
struct gendisk *disk;
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
deleted file mode 100644
index 12002bf4efc2..000000000000
--- a/drivers/lightnvm/sysblk.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * Copyright (C) 2015 Matias Bjorling. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- */
-
-#include <linux/lightnvm.h>
-
-#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
-#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
- * enables ~1.5M updates per sysblk unit
- */
-
-struct sysblk_scan {
- /* A row is a collection of flash blocks for a system block. */
- int nr_rows;
- int row;
- int act_blk[MAX_SYSBLKS];
-
- int nr_ppas;
- struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
-};
-
-static inline int scan_ppa_idx(int row, int blkid)
-{
- return (row * MAX_BLKS_PR_SYSBLK) + blkid;
-}
-
-static void nvm_sysblk_to_cpu(struct nvm_sb_info *info,
- struct nvm_system_block *sb)
-{
- info->seqnr = be32_to_cpu(sb->seqnr);
- info->erase_cnt = be32_to_cpu(sb->erase_cnt);
- info->version = be16_to_cpu(sb->version);
- strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
- info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
-}
-
-static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
- struct nvm_sb_info *info)
-{
- sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
- sb->seqnr = cpu_to_be32(info->seqnr);
- sb->erase_cnt = cpu_to_be32(info->erase_cnt);
- sb->version = cpu_to_be16(info->version);
- strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
- sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
-}
-
-static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
-{
- struct nvm_geo *geo = &dev->geo;
- int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
- int i;
-
- for (i = 0; i < nr_rows; i++)
- sysblk_ppas[i].ppa = 0;
-
- /* if possible, place sysblk at first channel, middle channel and last
- * channel of the device. If not, create only one or two sys blocks
- */
- switch (geo->nr_chnls) {
- case 2:
- sysblk_ppas[1].g.ch = 1;
- /* fall-through */
- case 1:
- sysblk_ppas[0].g.ch = 0;
- break;
- default:
- sysblk_ppas[0].g.ch = 0;
- sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
- sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
- break;
- }
-
- return nr_rows;
-}
-
-static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
- struct ppa_addr *sysblk_ppas)
-{
- memset(s, 0, sizeof(struct sysblk_scan));
- s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
-}
-
-static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
- u8 *blks, int nr_blks,
- struct sysblk_scan *s)
-{
- struct ppa_addr *sppa;
- int i, blkid = 0;
-
- nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
- if (nr_blks < 0)
- return nr_blks;
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_HOST)
- return -EEXIST;
-
- if (blks[i] != NVM_BLK_T_FREE)
- continue;
-
- sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
- sppa->g.ch = ppa.g.ch;
- sppa->g.lun = ppa.g.lun;
- sppa->g.blk = i;
- s->nr_ppas++;
- blkid++;
-
- pr_debug("nvm: use (%u %u %u) as sysblk\n",
- sppa->g.ch, sppa->g.lun, sppa->g.blk);
- if (blkid > MAX_BLKS_PR_SYSBLK - 1)
- return 0;
- }
-
- pr_err("nvm: sysblk failed get sysblk\n");
- return -EINVAL;
-}
-
-static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
- u8 *blks, int nr_blks,
- struct sysblk_scan *s)
-{
- int i, nr_sysblk = 0;
-
- nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
- if (nr_blks < 0)
- return nr_blks;
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] != NVM_BLK_T_HOST)
- continue;
-
- if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
- pr_err("nvm: too many host blks\n");
- return -EINVAL;
- }
-
- ppa.g.blk = i;
-
- s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
- s->nr_ppas++;
- nr_sysblk++;
- }
-
- return 0;
-}
-
-static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
- struct ppa_addr *ppas, int get_free)
-{
- struct nvm_geo *geo = &dev->geo;
- int i, nr_blks, ret = 0;
- u8 *blks;
-
- s->nr_ppas = 0;
- nr_blks = geo->blks_per_lun * geo->plane_mode;
-
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- for (i = 0; i < s->nr_rows; i++) {
- s->row = i;
-
- ret = nvm_get_bb_tbl(dev, ppas[i], blks);
- if (ret) {
- pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
- ppas[i].g.ch,
- ppas[i].g.blk);
- goto err_get;
- }
-
- if (get_free)
- ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
- s);
- else
- ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
- s);
-
- if (ret)
- goto err_get;
- }
-
-err_get:
- kfree(blks);
- return ret;
-}
-
-/*
- * scans a block for latest sysblk.
- * Returns:
- * 0 - newer sysblk not found. PPA is updated to latest page.
- * 1 - newer sysblk found and stored in *cur. PPA is updated to
- * next valid page.
- * <0- error.
- */
-static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
- struct nvm_system_block *sblk)
-{
- struct nvm_geo *geo = &dev->geo;
- struct nvm_system_block *cur;
- int pg, ret, found = 0;
-
- /* the full buffer for a flash page is allocated. Only the first of it
- * contains the system block information
- */
- cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
- if (!cur)
- return -ENOMEM;
-
- /* perform linear scan through the block */
- for (pg = 0; pg < dev->lps_per_blk; pg++) {
- ppa->g.pg = ppa_to_slc(dev, pg);
-
- ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
- cur, geo->pfpg_size);
- if (ret) {
- if (ret == NVM_RSP_ERR_EMPTYPAGE) {
- pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
- ppa->g.ch,
- ppa->g.lun,
- ppa->g.blk,
- ppa->g.pg);
- break;
- }
- pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
- ret,
- ppa->g.ch,
- ppa->g.lun,
- ppa->g.blk,
- ppa->g.pg);
- break; /* if we can't read a page, continue to the
- * next blk
- */
- }
-
- if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
- pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
- ppa->g.ch,
- ppa->g.lun,
- ppa->g.blk,
- ppa->g.pg);
- break; /* last valid page already found */
- }
-
- if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
- continue;
-
- memcpy(sblk, cur, sizeof(struct nvm_system_block));
- found = 1;
- }
-
- kfree(cur);
-
- return found;
-}
-
-static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
- int type)
-{
- return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
-}
-
-static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
- struct sysblk_scan *s)
-{
- struct nvm_geo *geo = &dev->geo;
- struct nvm_system_block nvmsb;
- void *buf;
- int i, sect, ret = 0;
- struct ppa_addr *ppas;
-
- nvm_cpu_to_sysblk(&nvmsb, info);
-
- buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
-
- ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
- if (!ppas) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* Write and verify */
- for (i = 0; i < s->nr_rows; i++) {
- ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
-
- pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
- ppas[0].g.ch,
- ppas[0].g.lun,
- ppas[0].g.blk,
- ppas[0].g.pg);
-
- /* Expand to all sectors within a flash page */
- if (geo->sec_per_pg > 1) {
- for (sect = 1; sect < geo->sec_per_pg; sect++) {
- ppas[sect].ppa = ppas[0].ppa;
- ppas[sect].g.sec = sect;
- }
- }
-
- ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
- NVM_IO_SLC_MODE, buf, geo->pfpg_size);
- if (ret) {
- pr_err("nvm: sysblk failed program (%u %u %u)\n",
- ppas[0].g.ch,
- ppas[0].g.lun,
- ppas[0].g.blk);
- break;
- }
-
- ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
- NVM_IO_SLC_MODE, buf, geo->pfpg_size);
- if (ret) {
- pr_err("nvm: sysblk failed read (%u %u %u)\n",
- ppas[0].g.ch,
- ppas[0].g.lun,
- ppas[0].g.blk);
- break;
- }
-
- if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
- pr_err("nvm: sysblk failed verify (%u %u %u)\n",
- ppas[0].g.ch,
- ppas[0].g.lun,
- ppas[0].g.blk);
- ret = -EINVAL;
- break;
- }
- }
-
- kfree(ppas);
-err:
- kfree(buf);
-
- return ret;
-}
-
-static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
-{
- int i, ret;
- unsigned long nxt_blk;
- struct ppa_addr *ppa;
-
- for (i = 0; i < s->nr_rows; i++) {
- nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
- ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
- ppa->g.pg = ppa_to_slc(dev, 0);
-
- ret = nvm_erase_ppa(dev, ppa, 1, 0);
- if (ret)
- return ret;
-
- s->act_blk[i] = nxt_blk;
- }
-
- return 0;
-}
-
-int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
-{
- struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
- struct sysblk_scan s;
- struct nvm_system_block *cur;
- int i, j, found = 0;
- int ret = -ENOMEM;
-
- /*
- * 1. setup sysblk locations
- * 2. get bad block list
- * 3. filter on host-specific (type 3)
- * 4. iterate through all and find the highest seq nr.
- * 5. return superblock information
- */
-
- if (!dev->ops->get_bb_tbl)
- return -EINVAL;
-
- nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
-
- mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
- if (ret)
- goto err_sysblk;
-
- /* no sysblocks initialized */
- if (!s.nr_ppas)
- goto err_sysblk;
-
- cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
- if (!cur)
- goto err_sysblk;
-
- /* find the latest block across all sysblocks */
- for (i = 0; i < s.nr_rows; i++) {
- for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
- struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
-
- ret = nvm_scan_block(dev, &ppa, cur);
- if (ret > 0)
- found = 1;
- else if (ret < 0)
- break;
- }
- }
-
- nvm_sysblk_to_cpu(info, cur);
-
- kfree(cur);
-err_sysblk:
- mutex_unlock(&dev->mlock);
-
- if (found)
- return 1;
- return ret;
-}
-
-int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
-{
- /* 1. for each latest superblock
- * 2. if room
- * a. write new flash page entry with the updated information
- * 3. if no room
- * a. find next available block on lun (linear search)
- * if none, continue to next lun
- * if none at all, report error. also report that it wasn't
- * possible to write to all superblocks.
- * c. write data to block.
- */
- struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
- struct sysblk_scan s;
- struct nvm_system_block *cur;
- int i, j, ppaidx, found = 0;
- int ret = -ENOMEM;
-
- if (!dev->ops->get_bb_tbl)
- return -EINVAL;
-
- nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
-
- mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
- if (ret)
- goto err_sysblk;
-
- cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
- if (!cur)
- goto err_sysblk;
-
- /* Get the latest sysblk for each sysblk row */
- for (i = 0; i < s.nr_rows; i++) {
- found = 0;
- for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
- ppaidx = scan_ppa_idx(i, j);
- ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
- if (ret > 0) {
- s.act_blk[i] = j;
- found = 1;
- } else if (ret < 0)
- break;
- }
- }
-
- if (!found) {
- pr_err("nvm: no valid sysblks found to update\n");
- ret = -EINVAL;
- goto err_cur;
- }
-
- /*
- * All sysblocks found. Check that they have same page id in their flash
- * blocks
- */
- for (i = 1; i < s.nr_rows; i++) {
- struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
- struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
-
- if (l.g.pg != r.g.pg) {
- pr_err("nvm: sysblks not on same page. Previous update failed.\n");
- ret = -EINVAL;
- goto err_cur;
- }
- }
-
- /*
- * Check that there haven't been another update to the seqnr since we
- * began
- */
- if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
- pr_err("nvm: seq is not sequential\n");
- ret = -EINVAL;
- goto err_cur;
- }
-
- /*
- * When all pages in a block has been written, a new block is selected
- * and writing is performed on the new block.
- */
- if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
- dev->lps_per_blk - 1) {
- ret = nvm_prepare_new_sysblks(dev, &s);
- if (ret)
- goto err_cur;
- }
-
- ret = nvm_write_and_verify(dev, new, &s);
-err_cur:
- kfree(cur);
-err_sysblk:
- mutex_unlock(&dev->mlock);
-
- return ret;
-}
-
-int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
- struct sysblk_scan s;
- int ret;
-
- /*
- * 1. select master blocks and select first available blks
- * 2. get bad block list
- * 3. mark MAX_SYSBLKS block as host-based device allocated.
- * 4. write and verify data to block
- */
-
- if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
- return -EINVAL;
-
- if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
- pr_err("nvm: memory does not support SLC access\n");
- return -EINVAL;
- }
-
- /* Index all sysblocks and mark them as host-driven */
- nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
-
- mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
- if (ret)
- goto err_mark;
-
- ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
- if (ret)
- goto err_mark;
-
- /* Write to the first block of each row */
- ret = nvm_write_and_verify(dev, info, &s);
-err_mark:
- mutex_unlock(&dev->mlock);
- return ret;
-}
-
-static int factory_nblks(int nblks)
-{
- /* Round up to nearest BITS_PER_LONG */
- return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
-}
-
-static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
-{
- int nblks = factory_nblks(geo->blks_per_lun);
-
- return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
- BITS_PER_LONG;
-}
-
-static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
- u8 *blks, int nr_blks,
- unsigned long *blk_bitmap, int flags)
-{
- int i, lunoff;
-
- nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
- if (nr_blks < 0)
- return nr_blks;
-
- lunoff = factory_blk_offset(&dev->geo, ppa);
-
- /* non-set bits correspond to the block must be erased */
- for (i = 0; i < nr_blks; i++) {
- switch (blks[i]) {
- case NVM_BLK_T_FREE:
- if (flags & NVM_FACTORY_ERASE_ONLY_USER)
- set_bit(i, &blk_bitmap[lunoff]);
- break;
- case NVM_BLK_T_HOST:
- if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
- set_bit(i, &blk_bitmap[lunoff]);
- break;
- case NVM_BLK_T_GRWN_BAD:
- if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
- set_bit(i, &blk_bitmap[lunoff]);
- break;
- default:
- set_bit(i, &blk_bitmap[lunoff]);
- break;
- }
- }
-
- return 0;
-}
-
-static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
- int max_ppas, unsigned long *blk_bitmap)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr ppa;
- int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
- unsigned long *offset;
-
- while (!done) {
- done = 1;
- nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
- idx = factory_blk_offset(geo, ppa);
- offset = &blk_bitmap[idx];
-
- blkid = find_first_zero_bit(offset, geo->blks_per_lun);
- if (blkid >= geo->blks_per_lun)
- continue;
- set_bit(blkid, offset);
-
- ppa.g.blk = blkid;
- pr_debug("nvm: erase ppa (%u %u %u)\n",
- ppa.g.ch,
- ppa.g.lun,
- ppa.g.blk);
-
- erase_list[ppa_cnt] = ppa;
- ppa_cnt++;
- done = 0;
-
- if (ppa_cnt == max_ppas)
- return ppa_cnt;
- }
- }
-
- return ppa_cnt;
-}
-
-static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
- int flags)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr ppa;
- int ch, lun, nr_blks, ret = 0;
- u8 *blks;
-
- nr_blks = geo->blks_per_lun * geo->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
- ret = nvm_get_bb_tbl(dev, ppa, blks);
- if (ret)
- pr_err("nvm: failed bb tbl for ch%u lun%u\n",
- ppa.g.ch, ppa.g.blk);
-
- ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
- flags);
- if (ret)
- break;
- }
-
- kfree(blks);
- return ret;
-}
-
-int nvm_dev_factory(struct nvm_dev *dev, int flags)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr *ppas;
- int ppa_cnt, ret = -ENOMEM;
- int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
- struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
- struct sysblk_scan s;
- unsigned long *blk_bitmap;
-
- blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
- GFP_KERNEL);
- if (!blk_bitmap)
- return ret;
-
- ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
- if (!ppas)
- goto err_blks;
-
- /* create list of blks to be erased */
- ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
- if (ret)
- goto err_ppas;
-
- /* continue to erase until list of blks until empty */
- while ((ppa_cnt =
- nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
- nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
-
- /* mark host reserved blocks free */
- if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
- nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
- mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
- if (!ret)
- ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
- mutex_unlock(&dev->mlock);
- }
-err_ppas:
- kfree(ppas);
-err_blks:
- kfree(blk_bitmap);
- return ret;
-}
-EXPORT_SYMBOL(nvm_dev_factory);
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 775527135b93..e199fd6c71ce 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -52,8 +52,8 @@ struct rackmeter_dma {
struct rackmeter_cpu {
struct delayed_work sniffer;
struct rackmeter *rm;
- cputime64_t prev_wall;
- cputime64_t prev_idle;
+ u64 prev_wall;
+ u64 prev_idle;
int zero;
} ____cacheline_aligned;
@@ -81,7 +81,7 @@ static int rackmeter_ignore_nice;
/* This is copied from cpufreq_ondemand, maybe we should put it in
* a common header somewhere
*/
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+static inline u64 get_cpu_idle_time(unsigned int cpu)
{
u64 retval;
@@ -217,23 +217,23 @@ static void rackmeter_do_timer(struct work_struct *work)
container_of(work, struct rackmeter_cpu, sniffer.work);
struct rackmeter *rm = rcpu->rm;
unsigned int cpu = smp_processor_id();
- cputime64_t cur_jiffies, total_idle_ticks;
- unsigned int total_ticks, idle_ticks;
+ u64 cur_nsecs, total_idle_nsecs;
+ u64 total_nsecs, idle_nsecs;
int i, offset, load, cumm, pause;
- cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
- total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
- rcpu->prev_wall = cur_jiffies;
+ cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
+ total_nsecs = cur_nsecs - rcpu->prev_wall;
+ rcpu->prev_wall = cur_nsecs;
- total_idle_ticks = get_cpu_idle_time(cpu);
- idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
- idle_ticks = min(idle_ticks, total_ticks);
- rcpu->prev_idle = total_idle_ticks;
+ total_idle_nsecs = get_cpu_idle_time(cpu);
+ idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
+ idle_nsecs = min(idle_nsecs, total_nsecs);
+ rcpu->prev_idle = total_idle_nsecs;
/* We do a very dumb calculation to update the LEDs for now,
* we'll do better once we have actual PWM implemented
*/
- load = (9 * (total_ticks - idle_ticks)) / total_ticks;
+ load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
offset = cpu << 3;
cumm = 0;
@@ -278,7 +278,7 @@ static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
continue;
rcpu = &rm->cpu[cpu];
rcpu->prev_idle = get_cpu_idle_time(cpu);
- rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
+ rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 76d20875503c..709c9cc34369 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
- s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
+ s->iop.flush_journal = op_is_flush(bio->bi_opf);
s->iop.wq = bcache_wq;
return s;
@@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits)
struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0;
- if (bdi_congested(&q->backing_dev_info, bits))
+ if (bdi_congested(q->backing_dev_info, bits))
return 1;
if (cached_dev_get(dc)) {
@@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
cached_dev_put(dc);
@@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request;
- g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+ g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
@@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
@@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request;
- g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+ g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3a19cbc8b230..85e3f21c2514 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
- q->backing_dev_info.congested_data = d;
+ q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
@@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
- dc->disk.disk->queue->backing_dev_info.ra_pages =
- max(dc->disk.disk->queue->backing_dev_info.ra_pages,
- q->backing_dev_info.ra_pages);
+ dc->disk.disk->queue->backing_dev_info->ra_pages =
+ max(dc->disk.disk->queue->backing_dev_info->ra_pages,
+ q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 84d2f0e4c754..d36d427a9efb 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -794,7 +794,7 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c)
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&c->free_buffer_wait, &wait);
- set_task_state(current, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
dm_bufio_unlock(c);
io_schedule();
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 624fe4319b24..e4c2c1a1e993 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -25,7 +25,7 @@
* defines a range of metadata versions that this module can handle.
*/
#define MIN_CACHE_VERSION 1
-#define MAX_CACHE_VERSION 1
+#define MAX_CACHE_VERSION 2
#define CACHE_METADATA_CACHE_SIZE 64
@@ -55,6 +55,7 @@ enum mapping_bits {
/*
* The data on the cache is different from that on the origin.
+ * This flag is only used by metadata format 1.
*/
M_DIRTY = 2
};
@@ -93,12 +94,18 @@ struct cache_disk_superblock {
__le32 write_misses;
__le32 policy_version[CACHE_POLICY_VERSION_SIZE];
+
+ /*
+ * Metadata format 2 fields.
+ */
+ __le64 dirty_root;
} __packed;
struct dm_cache_metadata {
atomic_t ref_count;
struct list_head list;
+ unsigned version;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@@ -142,11 +149,18 @@ struct dm_cache_metadata {
bool fail_io:1;
/*
+ * Metadata format 2 fields.
+ */
+ dm_block_t dirty_root;
+ struct dm_disk_bitset dirty_info;
+
+ /*
* These structures are used when loading metadata. They're too
* big to put on the stack.
*/
struct dm_array_cursor mapping_cursor;
struct dm_array_cursor hint_cursor;
+ struct dm_bitset_cursor dirty_cursor;
};
/*-------------------------------------------------------------------
@@ -170,6 +184,7 @@ static void sb_prepare_for_write(struct dm_block_validator *v,
static int check_metadata_version(struct cache_disk_superblock *disk_super)
{
uint32_t metadata_version = le32_to_cpu(disk_super->version);
+
if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
@@ -310,6 +325,11 @@ static void __copy_sm_root(struct dm_cache_metadata *cmd,
sizeof(cmd->metadata_space_map_root));
}
+static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
+{
+ return cmd->version >= 2;
+}
+
static int __write_initial_superblock(struct dm_cache_metadata *cmd)
{
int r;
@@ -341,7 +361,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->flags = 0;
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
- disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
+ disk_super->version = cpu_to_le32(cmd->version);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = 0;
@@ -362,6 +382,9 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->write_hits = cpu_to_le32(0);
disk_super->write_misses = cpu_to_le32(0);
+ if (separate_dirty_bits(cmd))
+ disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
+
return dm_tm_commit(cmd->tm, sblock);
}
@@ -382,6 +405,13 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
if (r < 0)
goto bad;
+ if (separate_dirty_bits(cmd)) {
+ dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
+ r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
+ if (r < 0)
+ goto bad;
+ }
+
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
if (r < 0)
@@ -407,9 +437,10 @@ bad:
static int __check_incompat_features(struct cache_disk_superblock *disk_super,
struct dm_cache_metadata *cmd)
{
- uint32_t features;
+ uint32_t incompat_flags, features;
- features = le32_to_cpu(disk_super->incompat_flags) & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
+ incompat_flags = le32_to_cpu(disk_super->incompat_flags);
+ features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
if (features) {
DMERR("could not access metadata due to unsupported optional features (%lx).",
(unsigned long)features);
@@ -470,6 +501,7 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
}
__setup_mapping_info(cmd);
+ dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
sb_flags = le32_to_cpu(disk_super->flags);
cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
@@ -548,6 +580,7 @@ static unsigned long clear_clean_shutdown(unsigned long flags)
static void read_superblock_fields(struct dm_cache_metadata *cmd,
struct cache_disk_superblock *disk_super)
{
+ cmd->version = le32_to_cpu(disk_super->version);
cmd->flags = le32_to_cpu(disk_super->flags);
cmd->root = le64_to_cpu(disk_super->mapping_root);
cmd->hint_root = le64_to_cpu(disk_super->hint_root);
@@ -567,6 +600,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
+ if (separate_dirty_bits(cmd))
+ cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
+
cmd->changed = false;
}
@@ -625,6 +661,13 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
*/
BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
+ if (separate_dirty_bits(cmd)) {
+ r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
+ &cmd->dirty_root);
+ if (r)
+ return r;
+ }
+
r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
&cmd->discard_root);
if (r)
@@ -649,6 +692,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
update_flags(disk_super, mutator);
disk_super->mapping_root = cpu_to_le64(cmd->root);
+ if (separate_dirty_bits(cmd))
+ disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
disk_super->hint_root = cpu_to_le64(cmd->hint_root);
disk_super->discard_root = cpu_to_le64(cmd->discard_root);
disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
@@ -698,7 +743,8 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
- size_t policy_hint_size)
+ size_t policy_hint_size,
+ unsigned metadata_version)
{
int r;
struct dm_cache_metadata *cmd;
@@ -709,6 +755,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
return ERR_PTR(-ENOMEM);
}
+ cmd->version = metadata_version;
atomic_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
@@ -757,7 +804,8 @@ static struct dm_cache_metadata *lookup(struct block_device *bdev)
static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
- size_t policy_hint_size)
+ size_t policy_hint_size,
+ unsigned metadata_version)
{
struct dm_cache_metadata *cmd, *cmd2;
@@ -768,7 +816,8 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
if (cmd)
return cmd;
- cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+ cmd = metadata_open(bdev, data_block_size, may_format_device,
+ policy_hint_size, metadata_version);
if (!IS_ERR(cmd)) {
mutex_lock(&table_lock);
cmd2 = lookup(bdev);
@@ -800,10 +849,11 @@ static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
- size_t policy_hint_size)
+ size_t policy_hint_size,
+ unsigned metadata_version)
{
- struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
- may_format_device, policy_hint_size);
+ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
+ policy_hint_size, metadata_version);
if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
dm_cache_metadata_close(cmd);
@@ -829,8 +879,8 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
/*
* Checks that the given cache block is either unmapped or clean.
*/
-static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
- bool *result)
+static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
+ bool *result)
{
int r;
__le64 value;
@@ -838,10 +888,8 @@ static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
unsigned flags;
r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
- if (r) {
- DMERR("block_unmapped_or_clean failed");
+ if (r)
return r;
- }
unpack_value(value, &ob, &flags);
*result = !((flags & M_VALID) && (flags & M_DIRTY));
@@ -849,17 +897,19 @@ static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
return 0;
}
-static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
- dm_cblock_t begin, dm_cblock_t end,
- bool *result)
+static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
+ dm_cblock_t begin, dm_cblock_t end,
+ bool *result)
{
int r;
*result = true;
while (begin != end) {
- r = block_unmapped_or_clean(cmd, begin, result);
- if (r)
+ r = block_clean_combined_dirty(cmd, begin, result);
+ if (r) {
+ DMERR("block_clean_combined_dirty failed");
return r;
+ }
if (!*result) {
DMERR("cache block %llu is dirty",
@@ -873,6 +923,67 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
+static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
+ dm_cblock_t begin, dm_cblock_t end,
+ bool *result)
+{
+ int r;
+ bool dirty_flag;
+ *result = true;
+
+ r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
+ from_cblock(begin), &cmd->dirty_cursor);
+ if (r) {
+ DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
+ return r;
+ }
+
+ r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
+ if (r) {
+ DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
+ dm_bitset_cursor_end(&cmd->dirty_cursor);
+ return r;
+ }
+
+ while (begin != end) {
+ /*
+ * We assume that unmapped blocks have their dirty bit
+ * cleared.
+ */
+ dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
+ if (dirty_flag) {
+ DMERR("%s: cache block %llu is dirty", __func__,
+ (unsigned long long) from_cblock(begin));
+ dm_bitset_cursor_end(&cmd->dirty_cursor);
+ *result = false;
+ return 0;
+ }
+
+ r = dm_bitset_cursor_next(&cmd->dirty_cursor);
+ if (r) {
+ DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
+ dm_bitset_cursor_end(&cmd->dirty_cursor);
+ return r;
+ }
+
+ begin = to_cblock(from_cblock(begin) + 1);
+ }
+
+ dm_bitset_cursor_end(&cmd->dirty_cursor);
+
+ return 0;
+}
+
+static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ dm_cblock_t begin, dm_cblock_t end,
+ bool *result)
+{
+ if (separate_dirty_bits(cmd))
+ return blocks_are_clean_separate_dirty(cmd, begin, end, result);
+ else
+ return blocks_are_clean_combined_dirty(cmd, begin, end, result);
+}
+
static bool cmd_write_lock(struct dm_cache_metadata *cmd)
{
down_write(&cmd->root_lock);
@@ -950,8 +1061,18 @@ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
from_cblock(new_cache_size),
&null_mapping, &cmd->root);
- if (!r)
- cmd->cache_blocks = new_cache_size;
+ if (r)
+ goto out;
+
+ if (separate_dirty_bits(cmd)) {
+ r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
+ from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
+ false, &cmd->dirty_root);
+ if (r)
+ goto out;
+ }
+
+ cmd->cache_blocks = new_cache_size;
cmd->changed = true;
out:
@@ -995,14 +1116,6 @@ static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
from_dblock(b), &cmd->discard_root);
}
-static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
- bool *is_discarded)
-{
- return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
- from_dblock(b), &cmd->discard_root,
- is_discarded);
-}
-
static int __discard(struct dm_cache_metadata *cmd,
dm_dblock_t dblock, bool discard)
{
@@ -1032,22 +1145,38 @@ static int __load_discards(struct dm_cache_metadata *cmd,
load_discard_fn fn, void *context)
{
int r = 0;
- dm_block_t b;
- bool discard;
+ uint32_t b;
+ struct dm_bitset_cursor c;
- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
- dm_dblock_t dblock = to_dblock(b);
+ if (from_dblock(cmd->discard_nr_blocks) == 0)
+ /* nothing to do */
+ return 0;
- if (cmd->clean_when_opened) {
- r = __is_discarded(cmd, dblock, &discard);
- if (r)
- return r;
- } else
- discard = false;
+ if (cmd->clean_when_opened) {
+ r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
+ if (r)
+ return r;
- r = fn(context, cmd->discard_block_size, dblock, discard);
+ r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
+ from_dblock(cmd->discard_nr_blocks), &c);
if (r)
- break;
+ return r;
+
+ for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ r = fn(context, cmd->discard_block_size, to_dblock(b),
+ dm_bitset_cursor_get_value(&c));
+ if (r)
+ break;
+ }
+
+ dm_bitset_cursor_end(&c);
+
+ } else {
+ for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ r = fn(context, cmd->discard_block_size, to_dblock(b), false);
+ if (r)
+ return r;
+ }
}
return r;
@@ -1177,11 +1306,11 @@ static bool hints_array_available(struct dm_cache_metadata *cmd,
hints_array_initialized(cmd);
}
-static int __load_mapping(struct dm_cache_metadata *cmd,
- uint64_t cb, bool hints_valid,
- struct dm_array_cursor *mapping_cursor,
- struct dm_array_cursor *hint_cursor,
- load_mapping_fn fn, void *context)
+static int __load_mapping_v1(struct dm_cache_metadata *cmd,
+ uint64_t cb, bool hints_valid,
+ struct dm_array_cursor *mapping_cursor,
+ struct dm_array_cursor *hint_cursor,
+ load_mapping_fn fn, void *context)
{
int r = 0;
@@ -1206,8 +1335,51 @@ static int __load_mapping(struct dm_cache_metadata *cmd,
r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
le32_to_cpu(hint), hints_valid);
- if (r)
- DMERR("policy couldn't load cblock");
+ if (r) {
+ DMERR("policy couldn't load cache block %llu",
+ (unsigned long long) from_cblock(to_cblock(cb)));
+ }
+ }
+
+ return r;
+}
+
+static int __load_mapping_v2(struct dm_cache_metadata *cmd,
+ uint64_t cb, bool hints_valid,
+ struct dm_array_cursor *mapping_cursor,
+ struct dm_array_cursor *hint_cursor,
+ struct dm_bitset_cursor *dirty_cursor,
+ load_mapping_fn fn, void *context)
+{
+ int r = 0;
+
+ __le64 mapping;
+ __le32 hint = 0;
+
+ __le64 *mapping_value_le;
+ __le32 *hint_value_le;
+
+ dm_oblock_t oblock;
+ unsigned flags;
+ bool dirty;
+
+ dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+ memcpy(&mapping, mapping_value_le, sizeof(mapping));
+ unpack_value(mapping, &oblock, &flags);
+
+ if (flags & M_VALID) {
+ if (hints_valid) {
+ dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
+ memcpy(&hint, hint_value_le, sizeof(hint));
+ }
+
+ dirty = dm_bitset_cursor_get_value(dirty_cursor);
+ r = fn(context, oblock, to_cblock(cb), dirty,
+ le32_to_cpu(hint), hints_valid);
+ if (r) {
+ DMERR("policy couldn't load cache block %llu",
+ (unsigned long long) from_cblock(to_cblock(cb)));
+ }
}
return r;
@@ -1238,10 +1410,28 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
}
}
+ if (separate_dirty_bits(cmd)) {
+ r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
+ from_cblock(cmd->cache_blocks),
+ &cmd->dirty_cursor);
+ if (r) {
+ dm_array_cursor_end(&cmd->hint_cursor);
+ dm_array_cursor_end(&cmd->mapping_cursor);
+ return r;
+ }
+ }
+
for (cb = 0; ; cb++) {
- r = __load_mapping(cmd, cb, hints_valid,
- &cmd->mapping_cursor, &cmd->hint_cursor,
- fn, context);
+ if (separate_dirty_bits(cmd))
+ r = __load_mapping_v2(cmd, cb, hints_valid,
+ &cmd->mapping_cursor,
+ &cmd->hint_cursor,
+ &cmd->dirty_cursor,
+ fn, context);
+ else
+ r = __load_mapping_v1(cmd, cb, hints_valid,
+ &cmd->mapping_cursor, &cmd->hint_cursor,
+ fn, context);
if (r)
goto out;
@@ -1264,12 +1454,23 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
goto out;
}
}
+
+ if (separate_dirty_bits(cmd)) {
+ r = dm_bitset_cursor_next(&cmd->dirty_cursor);
+ if (r) {
+ DMERR("dm_bitset_cursor_next for dirty failed");
+ goto out;
+ }
+ }
}
out:
dm_array_cursor_end(&cmd->mapping_cursor);
if (hints_valid)
dm_array_cursor_end(&cmd->hint_cursor);
+ if (separate_dirty_bits(cmd))
+ dm_bitset_cursor_end(&cmd->dirty_cursor);
+
return r;
}
@@ -1352,13 +1553,55 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
}
-int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
- dm_cblock_t cblock, bool dirty)
+static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
+{
+ int r;
+ unsigned i;
+ for (i = 0; i < nr_bits; i++) {
+ r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int is_dirty_callback(uint32_t index, bool *value, void *context)
+{
+ unsigned long *bits = context;
+ *value = test_bit(index, bits);
+ return 0;
+}
+
+static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
+{
+ int r = 0;
+
+ /* nr_bits is really just a sanity check */
+ if (nr_bits != from_cblock(cmd->cache_blocks)) {
+ DMERR("dirty bitset is wrong size");
+ return -EINVAL;
+ }
+
+ r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
+}
+
+int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
+ unsigned nr_bits,
+ unsigned long *bits)
{
int r;
WRITE_LOCK(cmd);
- r = __dirty(cmd, cblock, dirty);
+ if (separate_dirty_bits(cmd))
+ r = __set_dirty_bits_v2(cmd, nr_bits, bits);
+ else
+ r = __set_dirty_bits_v1(cmd, nr_bits, bits);
WRITE_UNLOCK(cmd);
return r;
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 8528744195e5..4f07c08cf107 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -45,18 +45,20 @@
* As these various flags are defined they should be added to the
* following masks.
*/
+
#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
/*
- * Reopens or creates a new, empty metadata volume.
- * Returns an ERR_PTR on failure.
+ * Reopens or creates a new, empty metadata volume. Returns an ERR_PTR on
+ * failure. If reopening then features must match.
*/
struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
- size_t policy_hint_size);
+ size_t policy_hint_size,
+ unsigned metadata_version);
void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
@@ -91,7 +93,8 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
load_mapping_fn fn,
void *context);
-int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty);
+int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
+ unsigned nr_bits, unsigned long *bits);
struct dm_cache_statistics {
uint32_t read_hits;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index e04c61e0839e..9c689b34e6e7 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -179,6 +179,7 @@ enum cache_io_mode {
struct cache_features {
enum cache_metadata_mode mode;
enum cache_io_mode io_mode;
+ unsigned metadata_version;
};
struct cache_stats {
@@ -248,7 +249,7 @@ struct cache {
/*
* Fields for converting from sectors to blocks.
*/
- uint32_t sectors_per_block;
+ sector_t sectors_per_block;
int sectors_per_block_shift;
spinlock_t lock;
@@ -787,8 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
spin_lock_irqsave(&cache->lock, flags);
- if (cache->need_tick_bio &&
- !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
+ if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
@@ -828,11 +828,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
return to_oblock(block_nr);
}
-static int bio_triggers_commit(struct cache *cache, struct bio *bio)
-{
- return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
-}
-
/*
* You must increment the deferred set whilst the prison cell is held. To
* encourage this, we ask for 'cell' to be passed in.
@@ -884,7 +879,7 @@ static void issue(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- if (!bio_triggers_commit(cache, bio)) {
+ if (!op_is_flush(bio->bi_opf)) {
accounted_request(cache, bio);
return;
}
@@ -1069,8 +1064,7 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
- return bio_op(bio) == REQ_OP_DISCARD ||
- bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
+ return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -2291,7 +2285,7 @@ static void do_waker(struct work_struct *ws)
static int is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
@@ -2541,13 +2535,14 @@ static void init_features(struct cache_features *cf)
{
cf->mode = CM_WRITE;
cf->io_mode = CM_IO_WRITEBACK;
+ cf->metadata_version = 1;
}
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
static struct dm_arg _args[] = {
- {0, 1, "Invalid number of cache feature arguments"},
+ {0, 2, "Invalid number of cache feature arguments"},
};
int r;
@@ -2573,6 +2568,9 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
else if (!strcasecmp(arg, "passthrough"))
cf->io_mode = CM_IO_PASSTHROUGH;
+ else if (!strcasecmp(arg, "metadata2"))
+ cf->metadata_version = 2;
+
else {
*error = "Unrecognised cache feature requested";
return -EINVAL;
@@ -2827,7 +2825,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
ca->block_size, may_format,
- dm_cache_policy_get_hint_size(cache->policy));
+ dm_cache_policy_get_hint_size(cache->policy),
+ ca->features.metadata_version);
if (IS_ERR(cmd)) {
*error = "Error creating metadata object";
r = PTR_ERR(cmd);
@@ -3172,21 +3171,16 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
static int write_dirty_bitset(struct cache *cache)
{
- unsigned i, r;
+ int r;
if (get_cache_mode(cache) >= CM_READ_ONLY)
return -EINVAL;
- for (i = 0; i < from_cblock(cache->cache_size); i++) {
- r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
- is_dirty(cache, to_cblock(i)));
- if (r) {
- metadata_operation_failed(cache, "dm_cache_set_dirty", r);
- return r;
- }
- }
+ r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
+ if (r)
+ metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
- return 0;
+ return r;
}
static int write_discard_bitset(struct cache *cache)
@@ -3547,11 +3541,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
- DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
+ DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
(unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
- cache->sectors_per_block,
+ (unsigned long long)cache->sectors_per_block,
(unsigned long long) from_cblock(residency),
(unsigned long long) from_cblock(cache->cache_size),
(unsigned) atomic_read(&cache->stats.read_hit),
@@ -3562,14 +3556,19 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned) atomic_read(&cache->stats.promotion),
(unsigned long) atomic_read(&cache->nr_dirty));
+ if (cache->features.metadata_version == 2)
+ DMEMIT("2 metadata2 ");
+ else
+ DMEMIT("1 ");
+
if (writethrough_mode(&cache->features))
- DMEMIT("1 writethrough ");
+ DMEMIT("writethrough ");
else if (passthrough_mode(&cache->features))
- DMEMIT("1 passthrough ");
+ DMEMIT("passthrough ");
else if (writeback_mode(&cache->features))
- DMEMIT("1 writeback ");
+ DMEMIT("writeback ");
else {
DMERR("%s: internal error: unknown io mode: %d",
@@ -3817,7 +3816,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 40ceba1fe8be..136fda3ff9e5 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -92,7 +92,6 @@ struct mapped_device {
* io objects are allocated from here.
*/
mempool_t *io_pool;
- mempool_t *rq_pool;
struct bio_set *bs;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7c6c57216bf2..1cb2ca9dfae3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1210,14 +1210,14 @@ continue_locked:
spin_unlock_irq(&cc->write_thread_wait.lock);
if (unlikely(kthread_should_stop())) {
- set_task_state(current, TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(&cc->write_thread_wait, &wait);
break;
}
schedule();
- set_task_state(current, TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
spin_lock_irq(&cc->write_thread_wait.lock);
__remove_wait_queue(&cc->write_thread_wait, &wait);
goto continue_locked;
@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
return PTR_ERR(key);
}
- rcu_read_lock();
+ down_read(&key->sem);
ukp = user_key_payload(key);
if (!ukp) {
- rcu_read_unlock();
+ up_read(&key->sem);
key_put(key);
kzfree(new_key_string);
return -EKEYREVOKED;
}
if (cc->key_size != ukp->datalen) {
- rcu_read_unlock();
+ up_read(&key->sem);
key_put(key);
kzfree(new_key_string);
return -EINVAL;
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
memcpy(cc->key, ukp->data, cc->key_size);
- rcu_read_unlock();
+ up_read(&key->sem);
key_put(key);
/* clear the flag since following operations may invalidate previously valid key */
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bf2b2676cb8a..9fab33b113c4 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6400cffb986d..7f223dbed49f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -92,12 +92,6 @@ struct multipath {
unsigned queue_mode;
- /*
- * We must use a mempool of dm_mpath_io structs so that we
- * can resubmit bios on error.
- */
- mempool_t *mpio_pool;
-
struct mutex work_mutex;
struct work_struct trigger_event;
@@ -115,8 +109,6 @@ struct dm_mpath_io {
typedef int (*action_fn) (struct pgpath *pgpath);
-static struct kmem_cache *_mpio_cache;
-
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
@@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
- m->mpio_pool = NULL;
m->queue_mode = DM_TYPE_NONE;
m->ti = ti;
@@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
- }
-
- if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
- unsigned min_ios = dm_get_reserved_rq_based_ios();
-
- m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
- if (!m->mpio_pool)
- return -ENOMEM;
- }
- else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
/*
* bio-based doesn't support any direct scsi_dh management;
@@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
- mempool_destroy(m->mpio_pool);
kfree(m);
}
@@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
return info->ptr;
}
-static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
-{
- struct dm_mpath_io *mpio;
-
- if (!m->mpio_pool) {
- /* Use blk-mq pdu memory requested via per_io_data_size */
- mpio = get_mpio(info);
- memset(mpio, 0, sizeof(*mpio));
- return mpio;
- }
-
- mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
- if (!mpio)
- return NULL;
-
- memset(mpio, 0, sizeof(*mpio));
- info->ptr = mpio;
-
- return mpio;
-}
-
-static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
-{
- /* Only needed for non blk-mq (.request_fn) multipath */
- if (m->mpio_pool) {
- struct dm_mpath_io *mpio = info->ptr;
-
- info->ptr = NULL;
- mempool_free(mpio, m->mpio_pool);
- }
-}
-
static size_t multipath_per_bio_data_size(void)
{
return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
@@ -427,7 +376,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned long flags;
struct priority_group *pg;
struct pgpath *pgpath;
- bool bypassed = true;
+ unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -466,7 +415,7 @@ check_current_pg:
*/
do {
list_for_each_entry(pg, &m->priority_groups, list) {
- if (pg->bypassed == bypassed)
+ if (pg->bypassed == !!bypassed)
continue;
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath)) {
@@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
/*
* Map cloned requests (request-based multipath)
*/
-static int __multipath_map(struct dm_target *ti, struct request *clone,
- union map_info *map_context,
- struct request *rq, struct request **__clone)
+static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
+ union map_info *map_context,
+ struct request **__clone)
{
struct multipath *m = ti->private;
int r = DM_MAPIO_REQUEUE;
- size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
+ size_t nr_bytes = blk_rq_bytes(rq);
struct pgpath *pgpath;
struct block_device *bdev;
- struct dm_mpath_io *mpio;
+ struct dm_mpath_io *mpio = get_mpio(map_context);
+ struct request *clone;
/* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath);
@@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return r;
}
- mpio = set_mpio(m, map_context);
- if (!mpio)
- /* ENOMEM, requeue */
- return r;
-
+ memset(mpio, 0, sizeof(*mpio));
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
bdev = pgpath->path.dev->bdev;
- if (clone) {
- /*
- * Old request-based interface: allocated clone is passed in.
- * Used by: .request_fn stacked on .request_fn path(s).
- */
- clone->q = bdev_get_queue(bdev);
- clone->rq_disk = bdev->bd_disk;
- clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- } else {
- /*
- * blk-mq request-based interface; used by both:
- * .request_fn stacked on blk-mq path(s) and
- * blk-mq stacked on blk-mq path(s).
- */
- clone = blk_mq_alloc_request(bdev_get_queue(bdev),
- rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(clone)) {
- /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
- clear_request_fn_mpio(m, map_context);
- return r;
- }
- clone->bio = clone->biotail = NULL;
- clone->rq_disk = bdev->bd_disk;
- clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- *__clone = clone;
+ clone = blk_get_request(bdev_get_queue(bdev),
+ rq->cmd_flags | REQ_NOMERGE,
+ GFP_ATOMIC);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
+ return r;
}
+ clone->bio = clone->biotail = NULL;
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ *__clone = clone;
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return DM_MAPIO_REMAPPED;
}
-static int multipath_map(struct dm_target *ti, struct request *clone,
- union map_info *map_context)
-{
- return __multipath_map(ti, clone, map_context, NULL, NULL);
-}
-
-static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
- union map_info *map_context,
- struct request **clone)
-{
- return __multipath_map(ti, NULL, map_context, rq, clone);
-}
-
static void multipath_release_clone(struct request *clone)
{
- blk_mq_free_request(clone);
+ blk_put_request(clone);
}
/*
@@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_write_same_bios = 1;
if (m->queue_mode == DM_TYPE_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
- else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+ else
ti->per_io_data_size = sizeof(struct dm_mpath_io);
return 0;
@@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- clear_request_fn_mpio(m, map_context);
return r;
}
@@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
- .map_rq = multipath_map,
.clone_and_map_rq = multipath_clone_and_map,
.release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io,
@@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
{
int r;
- /* allocate a slab for the dm_mpath_ios */
- _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
- if (!_mpio_cache)
- return -ENOMEM;
-
r = dm_register_target(&multipath_target);
if (r < 0) {
DMERR("request-based register failed %d", r);
@@ -2120,8 +2031,6 @@ bad_alloc_kmpath_handlerd:
bad_alloc_kmultipathd:
dm_unregister_target(&multipath_target);
bad_register_target:
- kmem_cache_destroy(_mpio_cache);
-
return r;
}
@@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
destroy_workqueue(kmultipathd);
dm_unregister_target(&multipath_target);
- kmem_cache_destroy(_mpio_cache);
}
module_init(dm_multipath_init);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b8f978e551d7..5c9e95d66f3b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -24,6 +24,11 @@
*/
#define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
+/*
+ * Minimum journal space 4 MiB in sectors.
+ */
+#define MIN_RAID456_JOURNAL_SPACE (4*2048)
+
static bool devices_handle_discard_safely = false;
/*
@@ -73,6 +78,9 @@ struct raid_dev {
#define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
+/* New for v1.10.0 */
+#define __CTR_FLAG_JOURNAL_DEV 15 /* 2 */ /* Only with raid4/5/6! */
+
/*
* Flags for rs->ctr_flags field.
*/
@@ -91,6 +99,7 @@ struct raid_dev {
#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
+#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
/*
* Definitions of various constructor flags to
@@ -163,7 +172,8 @@ struct raid_dev {
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
- CTR_FLAG_DATA_OFFSET)
+ CTR_FLAG_DATA_OFFSET | \
+ CTR_FLAG_JOURNAL_DEV)
#define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
CTR_FLAG_REBUILD | \
@@ -173,7 +183,8 @@ struct raid_dev {
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
- CTR_FLAG_DATA_OFFSET)
+ CTR_FLAG_DATA_OFFSET | \
+ CTR_FLAG_JOURNAL_DEV)
/* ...valid options definitions per raid level */
/*
@@ -222,6 +233,12 @@ struct raid_set {
struct raid_type *raid_type;
struct dm_target_callbacks callbacks;
+ /* Optional raid4/5/6 journal device */
+ struct journal_dev {
+ struct dm_dev *dev;
+ struct md_rdev rdev;
+ } journal_dev;
+
struct raid_dev dev[0];
};
@@ -306,6 +323,7 @@ static struct arg_name_flag {
{ CTR_FLAG_DATA_OFFSET, "data_offset"},
{ CTR_FLAG_DELTA_DISKS, "delta_disks"},
{ CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
+ { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
};
/* Return argument name string for given @flag */
@@ -370,7 +388,7 @@ static bool rs_is_reshapable(struct raid_set *rs)
/* Return true, if raid set in @rs is recovering */
static bool rs_is_recovering(struct raid_set *rs)
{
- return rs->md.recovery_cp < rs->dev[0].rdev.sectors;
+ return rs->md.recovery_cp < rs->md.dev_sectors;
}
/* Return true, if raid set in @rs is reshaping */
@@ -627,7 +645,8 @@ static void rs_set_capacity(struct raid_set *rs)
* is unintended in case of out-of-place reshaping
*/
rdev_for_each(rdev, mddev)
- rdev->sectors = mddev->dev_sectors;
+ if (!test_bit(Journal, &rdev->flags))
+ rdev->sectors = mddev->dev_sectors;
set_capacity(gendisk, mddev->array_sectors);
revalidate_disk(gendisk);
@@ -713,6 +732,11 @@ static void raid_set_free(struct raid_set *rs)
{
int i;
+ if (rs->journal_dev.dev) {
+ md_rdev_clear(&rs->journal_dev.rdev);
+ dm_put_device(rs->ti, rs->journal_dev.dev);
+ }
+
for (i = 0; i < rs->raid_disks; i++) {
if (rs->dev[i].meta_dev)
dm_put_device(rs->ti, rs->dev[i].meta_dev);
@@ -760,10 +784,11 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
rs->dev[i].data_dev = NULL;
/*
- * There are no offsets, since there is a separate device
- * for data and metadata.
+ * There are no offsets initially.
+ * Out of place reshape will set them accordingly.
*/
rs->dev[i].rdev.data_offset = 0;
+ rs->dev[i].rdev.new_data_offset = 0;
rs->dev[i].rdev.mddev = &rs->md;
arg = dm_shift_arg(as);
@@ -821,6 +846,9 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
rebuild++;
}
+ if (rs->journal_dev.dev)
+ list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
+
if (metadata_available) {
rs->md.external = 0;
rs->md.persistent = 1;
@@ -1026,6 +1054,8 @@ too_many:
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
* [region_size <sectors>] Defines granularity of bitmap
+ * [journal_dev <dev>] raid4/5/6 journaling deviice
+ * (i.e. write hole closing log)
*
* RAID10-only options:
* [raid10_copies <# copies>] Number of copies. (Default: 2)
@@ -1133,7 +1163,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
/*
* Parameters that take a string value are checked here.
*/
-
+ /* "raid10_format {near|offset|far} */
if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
rs->ti->error = "Only one 'raid10_format' argument pair allowed";
@@ -1151,6 +1181,41 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
continue;
}
+ /* "journal_dev dev" */
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
+ int r;
+ struct md_rdev *jdev;
+
+ if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
+ rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
+ return -EINVAL;
+ }
+ if (!rt_is_raid456(rt)) {
+ rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
+ return -EINVAL;
+ }
+ r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
+ &rs->journal_dev.dev);
+ if (r) {
+ rs->ti->error = "raid4/5/6 journal device lookup failure";
+ return r;
+ }
+ jdev = &rs->journal_dev.rdev;
+ md_rdev_init(jdev);
+ jdev->mddev = &rs->md;
+ jdev->bdev = rs->journal_dev.dev->bdev;
+ jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
+ if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
+ rs->ti->error = "No space for raid4/5/6 journal";
+ return -ENOSPC;
+ }
+ set_bit(Journal, &jdev->flags);
+ continue;
+ }
+
+ /*
+ * Parameters with number values from here on.
+ */
if (kstrtoint(arg, 10, &value) < 0) {
rs->ti->error = "Bad numerical argument given in raid params";
return -EINVAL;
@@ -1425,6 +1490,25 @@ static unsigned int rs_data_stripes(struct raid_set *rs)
return rs->raid_disks - rs->raid_type->parity_devs;
}
+/*
+ * Retrieve rdev->sectors from any valid raid device of @rs
+ * to allow userpace to pass in arbitray "- -" device tupples.
+ */
+static sector_t __rdev_sectors(struct raid_set *rs)
+{
+ int i;
+
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ struct md_rdev *rdev = &rs->dev[i].rdev;
+
+ if (!test_bit(Journal, &rdev->flags) &&
+ rdev->bdev && rdev->sectors)
+ return rdev->sectors;
+ }
+
+ BUG(); /* Constructor ensures we got some. */
+}
+
/* Calculate the sectors per device and per array used for @rs */
static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
{
@@ -1468,7 +1552,8 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
array_sectors = (data_stripes + delta_disks) * dev_sectors;
rdev_for_each(rdev, mddev)
- rdev->sectors = dev_sectors;
+ if (!test_bit(Journal, &rdev->flags))
+ rdev->sectors = dev_sectors;
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
@@ -1510,9 +1595,9 @@ static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
else if (dev_sectors == MaxSector)
/* Prevent recovery */
__rs_setup_recovery(rs, MaxSector);
- else if (rs->dev[0].rdev.sectors < dev_sectors)
+ else if (__rdev_sectors(rs) < dev_sectors)
/* Grown raid set */
- __rs_setup_recovery(rs, rs->dev[0].rdev.sectors);
+ __rs_setup_recovery(rs, __rdev_sectors(rs));
else
__rs_setup_recovery(rs, MaxSector);
}
@@ -1851,18 +1936,21 @@ static int rs_check_reshape(struct raid_set *rs)
return -EPERM;
}
-static int read_disk_sb(struct md_rdev *rdev, int size)
+static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
{
BUG_ON(!rdev->sb_page);
- if (rdev->sb_loaded)
+ if (rdev->sb_loaded && !force_reload)
return 0;
+ rdev->sb_loaded = 0;
+
if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
DMERR("Failed to read superblock of device at position %d",
rdev->raid_disk);
md_error(rdev->mddev, rdev);
- return -EINVAL;
+ set_bit(Faulty, &rdev->flags);
+ return -EIO;
}
rdev->sb_loaded = 1;
@@ -1990,7 +2078,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
return -EINVAL;
}
- r = read_disk_sb(rdev, rdev->sb_size);
+ r = read_disk_sb(rdev, rdev->sb_size, false);
if (r)
return r;
@@ -2146,6 +2234,9 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
*/
d = 0;
rdev_for_each(r, mddev) {
+ if (test_bit(Journal, &rdev->flags))
+ continue;
+
if (test_bit(FirstUse, &r->flags))
new_devs++;
@@ -2201,7 +2292,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
*/
sb_retrieve_failed_devices(sb, failed_devices);
rdev_for_each(r, mddev) {
- if (!r->sb_page)
+ if (test_bit(Journal, &rdev->flags) ||
+ !r->sb_page)
continue;
sb2 = page_address(r->sb_page);
sb2->failed_devices = 0;
@@ -2253,7 +2345,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
struct mddev *mddev = &rs->md;
struct dm_raid_superblock *sb;
- if (rs_is_raid0(rs) || !rdev->sb_page)
+ if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
return 0;
sb = page_address(rdev->sb_page);
@@ -2278,7 +2370,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
/* Enable bitmap creation for RAID levels != 0 */
mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
- rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
+ mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
/* Retrieve device size stored in superblock to be prepared for shrink */
@@ -2316,21 +2408,22 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{
int r;
- struct raid_dev *dev;
- struct md_rdev *rdev, *tmp, *freshest;
+ struct md_rdev *rdev, *freshest;
struct mddev *mddev = &rs->md;
freshest = NULL;
- rdev_for_each_safe(rdev, tmp, mddev) {
+ rdev_for_each(rdev, mddev) {
+ if (test_bit(Journal, &rdev->flags))
+ continue;
+
/*
* Skipping super_load due to CTR_FLAG_SYNC will cause
* the array to undergo initialization again as
* though it were new. This is the intended effect
* of the "sync" directive.
*
- * When reshaping capability is added, we must ensure
- * that the "sync" directive is disallowed during the
- * reshape.
+ * With reshaping capability added, we must ensure that
+ * that the "sync" directive is disallowed during the reshape.
*/
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
@@ -2347,6 +2440,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
case 0:
break;
default:
+ /* This is a failure to read the superblock from the metadata device. */
/*
* We have to keep any raid0 data/metadata device pairs or
* the MD raid0 personality will fail to start the array.
@@ -2354,33 +2448,16 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (rs_is_raid0(rs))
continue;
- dev = container_of(rdev, struct raid_dev, rdev);
- if (dev->meta_dev)
- dm_put_device(ti, dev->meta_dev);
-
- dev->meta_dev = NULL;
- rdev->meta_bdev = NULL;
-
- if (rdev->sb_page)
- put_page(rdev->sb_page);
-
- rdev->sb_page = NULL;
-
- rdev->sb_loaded = 0;
-
/*
- * We might be able to salvage the data device
- * even though the meta device has failed. For
- * now, we behave as though '- -' had been
- * set for this device in the table.
+ * We keep the dm_devs to be able to emit the device tuple
+ * properly on the table line in raid_status() (rather than
+ * mistakenly acting as if '- -' got passed into the constructor).
+ *
+ * The rdev has to stay on the same_set list to allow for
+ * the attempt to restore faulty devices on second resume.
*/
- if (dev->data_dev)
- dm_put_device(ti, dev->data_dev);
-
- dev->data_dev = NULL;
- rdev->bdev = NULL;
-
- list_del(&rdev->same_set);
+ rdev->raid_disk = rdev->saved_raid_disk = -1;
+ break;
}
}
@@ -2401,7 +2478,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
return -EINVAL;
rdev_for_each(rdev, mddev)
- if ((rdev != freshest) && super_validate(rs, rdev))
+ if (!test_bit(Journal, &rdev->flags) &&
+ rdev != freshest &&
+ super_validate(rs, rdev))
return -EINVAL;
return 0;
}
@@ -2488,10 +2567,12 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
return -ENOSPC;
}
out:
- /* Adjust data offsets on all rdevs */
+ /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
- rdev->data_offset = data_offset;
- rdev->new_data_offset = new_data_offset;
+ if (!test_bit(Journal, &rdev->flags)) {
+ rdev->data_offset = data_offset;
+ rdev->new_data_offset = new_data_offset;
+ }
}
return 0;
@@ -2504,8 +2585,10 @@ static void __reorder_raid_disk_indexes(struct raid_set *rs)
struct md_rdev *rdev;
rdev_for_each(rdev, &rs->md) {
- rdev->raid_disk = i++;
- rdev->saved_raid_disk = rdev->new_raid_disk = -1;
+ if (!test_bit(Journal, &rdev->flags)) {
+ rdev->raid_disk = i++;
+ rdev->saved_raid_disk = rdev->new_raid_disk = -1;
+ }
}
}
@@ -2845,7 +2928,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- calculated_dev_sectors = rs->dev[0].rdev.sectors;
+ calculated_dev_sectors = rs->md.dev_sectors;
/*
* Backup any new raid set level, layout, ...
@@ -2858,7 +2941,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- resize = calculated_dev_sectors != rs->dev[0].rdev.sectors;
+ resize = calculated_dev_sectors != __rdev_sectors(rs);
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -2902,6 +2985,13 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
+ /* We can't takeover a journaled raid4/5/6 */
+ if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
+ ti->error = "Can't takeover a journaled raid4/5/6 set";
+ r = -EPERM;
+ goto bad;
+ }
+
/*
* If a takeover is needed, userspace sets any additional
* devices to rebuild and we can check for a valid request here.
@@ -2924,6 +3014,18 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs_set_new(rs);
} else if (rs_reshape_requested(rs)) {
/*
+ * No need to check for 'ongoing' takeover here, because takeover
+ * is an instant operation as oposed to an ongoing reshape.
+ */
+
+ /* We can't reshape a journaled raid4/5/6 */
+ if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
+ ti->error = "Can't reshape a journaled raid4/5/6 set";
+ r = -EPERM;
+ goto bad;
+ }
+
+ /*
* We can only prepare for a reshape here, because the
* raid set needs to run to provide the repective reshape
* check functions via its MD personality instance.
@@ -3071,18 +3173,23 @@ static const char *decipher_sync_action(struct mddev *mddev)
}
/*
- * Return status string @rdev
+ * Return status string for @rdev
*
* Status characters:
*
- * 'D' = Dead/Failed device
+ * 'D' = Dead/Failed raid set component or raid4/5/6 journal device
* 'a' = Alive but not in-sync
- * 'A' = Alive and in-sync
+ * 'A' = Alive and in-sync raid set component or alive raid4/5/6 journal device
+ * '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
*/
static const char *__raid_dev_status(struct md_rdev *rdev, bool array_in_sync)
{
- if (test_bit(Faulty, &rdev->flags))
+ if (!rdev->bdev)
+ return "-";
+ else if (test_bit(Faulty, &rdev->flags))
return "D";
+ else if (test_bit(Journal, &rdev->flags))
+ return "A";
else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
return "a";
else
@@ -3151,7 +3258,8 @@ static sector_t rs_get_progress(struct raid_set *rs,
* being initialized.
*/
rdev_for_each(rdev, mddev)
- if (!test_bit(In_sync, &rdev->flags))
+ if (!test_bit(Journal, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags))
*array_in_sync = true;
#if 0
r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
@@ -3183,7 +3291,6 @@ static void raid_status(struct dm_target *ti, status_type_t type,
sector_t progress, resync_max_sectors, resync_mismatches;
const char *sync_action;
struct raid_type *rt;
- struct md_rdev *rdev;
switch (type) {
case STATUSTYPE_INFO:
@@ -3204,9 +3311,9 @@ static void raid_status(struct dm_target *ti, status_type_t type,
atomic64_read(&mddev->resync_mismatches) : 0;
sync_action = decipher_sync_action(&rs->md);
- /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */
- rdev_for_each(rdev, mddev)
- DMEMIT(__raid_dev_status(rdev, array_in_sync));
+ /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
+ for (i = 0; i < rs->raid_disks; i++)
+ DMEMIT(__raid_dev_status(&rs->dev[i].rdev, array_in_sync));
/*
* In-sync/Reshape ratio:
@@ -3252,6 +3359,12 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* so retrieving it from the first raid disk is sufficient.
*/
DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
+
+ /*
+ * v1.10.0+:
+ */
+ DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
+ __raid_dev_status(&rs->journal_dev.rdev, 0) : "-");
break;
case STATUSTYPE_TABLE:
@@ -3265,7 +3378,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
raid_param_cnt += rebuild_disks * 2 +
write_mostly_params +
hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
- hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
+ hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
+ (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0);
/* Emit table line */
DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
@@ -3312,6 +3426,9 @@ static void raid_status(struct dm_target *ti, status_type_t type,
if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
mddev->sync_speed_min);
+ if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
+ DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
+ __get_dev_name(rs->journal_dev.dev));
DMEMIT(" %d", rs->raid_disks);
for (i = 0; i < rs->raid_disks; i++)
DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
@@ -3347,10 +3464,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv)
else {
if (!strcasecmp(argv[0], "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
- else if (!!strcasecmp(argv[0], "repair"))
+ else if (!strcasecmp(argv[0], "repair")) {
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ } else
return -EINVAL;
- set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
}
if (mddev->ro == 2) {
/* A write to sync_action is enough to justify
@@ -3427,11 +3545,14 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < mddev->raid_disks; i++) {
r = &rs->dev[i].rdev;
- if (test_bit(Faulty, &r->flags) && r->sb_page &&
- sync_page_io(r, 0, r->sb_size, r->sb_page,
- REQ_OP_READ, 0, true)) {
+ /* HM FIXME: enhance journal device recovery processing */
+ if (test_bit(Journal, &r->flags))
+ continue;
+
+ if (test_bit(Faulty, &r->flags) &&
+ r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
DMINFO("Faulty %s device #%d has readable super block."
" Attempting to revive it.",
rs->raid_type->name, i);
@@ -3445,22 +3566,26 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
* '>= 0' - meaning we must call this function
* ourselves.
*/
- if ((r->raid_disk >= 0) &&
- (mddev->pers->hot_remove_disk(mddev, r) != 0))
- /* Failed to revive this device, try next */
- continue;
-
- r->raid_disk = i;
- r->saved_raid_disk = i;
flags = r->flags;
+ clear_bit(In_sync, &r->flags); /* Mandatory for hot remove. */
+ if (r->raid_disk >= 0) {
+ if (mddev->pers->hot_remove_disk(mddev, r)) {
+ /* Failed to revive this device, try next */
+ r->flags = flags;
+ continue;
+ }
+ } else
+ r->raid_disk = r->saved_raid_disk = i;
+
clear_bit(Faulty, &r->flags);
clear_bit(WriteErrorSeen, &r->flags);
- clear_bit(In_sync, &r->flags);
+
if (mddev->pers->hot_add_disk(mddev, r)) {
- r->raid_disk = -1;
- r->saved_raid_disk = -1;
+ /* Failed to revive this device, try next */
+ r->raid_disk = r->saved_raid_disk = -1;
r->flags = flags;
} else {
+ clear_bit(In_sync, &r->flags);
r->recovery_offset = 0;
set_bit(i, (void *) cleared_failed_devices);
cleared = true;
@@ -3473,6 +3598,9 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
uint64_t failed_devices[DISKS_ARRAY_ELEMS];
rdev_for_each(r, &rs->md) {
+ if (test_bit(Journal, &r->flags))
+ continue;
+
sb = page_address(r->sb_page);
sb_retrieve_failed_devices(sb, failed_devices);
@@ -3651,7 +3779,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 9, 1},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 6c25213ab38c..bdbb7e6e8212 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -17,8 +17,8 @@
#include <linux/module.h>
#define DM_MSG_PREFIX "multipath round-robin"
-#define RR_MIN_IO 1000
-#define RR_VERSION "1.1.0"
+#define RR_MIN_IO 1
+#define RR_VERSION "1.2.0"
/*-----------------------------------------------------------------
* Path-handling code, paths are held in lists
@@ -47,44 +47,19 @@ struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
spinlock_t lock;
- struct dm_path * __percpu *current_path;
- struct percpu_counter repeat_count;
};
-static void set_percpu_current_path(struct selector *s, struct dm_path *path)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(s->current_path, cpu) = path;
-}
-
static struct selector *alloc_selector(void)
{
struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (!s)
- return NULL;
-
- INIT_LIST_HEAD(&s->valid_paths);
- INIT_LIST_HEAD(&s->invalid_paths);
- spin_lock_init(&s->lock);
-
- s->current_path = alloc_percpu(struct dm_path *);
- if (!s->current_path)
- goto out_current_path;
- set_percpu_current_path(s, NULL);
-
- if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
- goto out_repeat_count;
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->invalid_paths);
+ spin_lock_init(&s->lock);
+ }
return s;
-
-out_repeat_count:
- free_percpu(s->current_path);
-out_current_path:
- kfree(s);
- return NULL;;
}
static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
@@ -105,8 +80,6 @@ static void rr_destroy(struct path_selector *ps)
free_paths(&s->valid_paths);
free_paths(&s->invalid_paths);
- free_percpu(s->current_path);
- percpu_counter_destroy(&s->repeat_count);
kfree(s);
ps->context = NULL;
}
@@ -157,6 +130,11 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
+ if (repeat_count > 1) {
+ DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
+ repeat_count = 1;
+ }
+
/* allocate the path */
pi = kmalloc(sizeof(*pi), GFP_KERNEL);
if (!pi) {
@@ -183,9 +161,6 @@ static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
struct path_info *pi = p->pscontext;
spin_lock_irqsave(&s->lock, flags);
- if (p == *this_cpu_ptr(s->current_path))
- set_percpu_current_path(s, NULL);
-
list_move(&pi->list, &s->invalid_paths);
spin_unlock_irqrestore(&s->lock, flags);
}
@@ -208,29 +183,15 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
unsigned long flags;
struct selector *s = ps->context;
struct path_info *pi = NULL;
- struct dm_path *current_path = NULL;
-
- local_irq_save(flags);
- current_path = *this_cpu_ptr(s->current_path);
- if (current_path) {
- percpu_counter_dec(&s->repeat_count);
- if (percpu_counter_read_positive(&s->repeat_count) > 0) {
- local_irq_restore(flags);
- return current_path;
- }
- }
- spin_lock(&s->lock);
+ spin_lock_irqsave(&s->lock, flags);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
- percpu_counter_set(&s->repeat_count, pi->repeat_count);
- set_percpu_current_path(s, pi->path);
- current_path = pi->path;
}
spin_unlock_irqrestore(&s->lock, flags);
- return current_path;
+ return pi ? pi->path : NULL;
}
static struct path_selector_type rr_ps = {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d7275fb541a..67d76f21fecd 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
dm_mq_stop_queue(q);
}
-static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->io_pool, gfp_mask);
-}
-
-static void free_old_rq_tio(struct dm_rq_target_io *tio)
-{
- mempool_free(tio, tio->md->io_pool);
-}
-
-static struct request *alloc_old_clone_request(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->rq_pool, gfp_mask);
-}
-
-static void free_old_clone_request(struct mapped_device *md, struct request *rq)
-{
- mempool_free(rq, md->rq_pool);
-}
-
/*
* Partial completion handling for request-based dm
*/
@@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
static struct dm_rq_target_io *tio_from_request(struct request *rq)
{
- return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
+ return blk_mq_rq_to_pdu(rq);
}
static void rq_end_stats(struct mapped_device *md, struct request *orig)
@@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md);
}
-static void free_rq_clone(struct request *clone)
-{
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
-
- blk_rq_unprep_clone(clone);
-
- /*
- * It is possible for a clone_old_rq() allocated clone to
- * get passed in -- it may not yet have a request_queue.
- * This is known to occur if the error target replaces
- * a multipath target that has a request_fn queue stacked
- * on blk-mq queue(s).
- */
- if (clone->q && clone->q->mq_ops)
- /* stacked on blk-mq queue(s) */
- tio->ti->type->release_clone_rq(clone);
- else if (!md->queue->mq_ops)
- /* request_fn queue stacked on request_fn queue(s) */
- free_old_clone_request(md, clone);
-
- if (!md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
/*
* Complete the clone and the original request.
* Must be called without clone's queue lock held,
@@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error)
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
- rq->errors = clone->errors;
- rq->resid_len = clone->resid_len;
-
- if (rq->sense)
- /*
- * We are using the sense buffer of the original
- * request.
- * So setting the length of the sense data is enough.
- */
- rq->sense_len = clone->sense_len;
- }
+ blk_rq_unprep_clone(clone);
+ tio->ti->type->release_clone_rq(clone);
- free_rq_clone(clone);
rq_end_stats(md, rq);
if (!rq->q->mq_ops)
blk_end_request_all(rq, error);
@@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
rq_completed(md, rw, true);
}
-static void dm_unprep_request(struct request *rq)
-{
- struct dm_rq_target_io *tio = tio_from_request(rq);
- struct request *clone = tio->clone;
-
- if (!rq->q->mq_ops) {
- rq->special = NULL;
- rq->rq_flags &= ~RQF_DONTPREP;
- }
-
- if (clone)
- free_rq_clone(clone);
- else if (!tio->md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
/*
* Requeue the original request of a clone.
*/
@@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
int rw = rq_data_dir(rq);
rq_end_stats(md, rq);
- dm_unprep_request(rq);
+ if (tio->clone) {
+ blk_rq_unprep_clone(tio->clone);
+ tio->ti->type->release_clone_rq(tio->clone);
+ }
if (!rq->q->mq_ops)
dm_old_requeue_request(rq);
@@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
if (!clone) {
rq_end_stats(tio->md, rq);
rw = rq_data_dir(rq);
- if (!rq->q->mq_ops) {
+ if (!rq->q->mq_ops)
blk_end_request_all(rq, tio->error);
- rq_completed(tio->md, rw, false);
- free_old_rq_tio(tio);
- } else {
+ else
blk_mq_end_request(rq, tio->error);
- rq_completed(tio->md, rw, false);
- }
+ rq_completed(tio->md, rw, false);
return;
}
@@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- if (!clone->q->mq_ops) {
- /*
- * For just cleaning up the information of the queue in which
- * the clone was dispatched.
- * The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (RQF_ALLOCED isn't set).
- */
- __blk_put_request(clone->q, clone);
- }
-
/*
* Actual request completion is done in a softirq context which doesn't
* hold the clone's queue lock. Otherwise, deadlock could occur because:
@@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq,
if (r)
return r;
- clone->cmd = rq->cmd;
- clone->cmd_len = rq->cmd_len;
- clone->sense = rq->sense;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
@@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0;
}
-static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
-{
- /*
- * Create clone for use with .request_fn request_queue
- */
- struct request *clone;
-
- clone = alloc_old_clone_request(md, gfp_mask);
- if (!clone)
- return NULL;
-
- blk_rq_init(NULL, clone);
- if (setup_clone(clone, rq, tio, gfp_mask)) {
- /* -ENOMEM */
- free_old_clone_request(md, clone);
- return NULL;
- }
-
- return clone;
-}
-
static void map_tio_request(struct kthread_work *work);
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
@@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
kthread_init_work(&tio->work, map_tio_request);
}
-static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
- struct mapped_device *md,
- gfp_t gfp_mask)
-{
- struct dm_rq_target_io *tio;
- int srcu_idx;
- struct dm_table *table;
-
- tio = alloc_old_rq_tio(md, gfp_mask);
- if (!tio)
- return NULL;
-
- init_tio(tio, rq, md);
-
- table = dm_get_live_table(md, &srcu_idx);
- /*
- * Must clone a request if this .request_fn DM device
- * is stacked on .request_fn device(s).
- */
- if (!dm_table_all_blk_mq_devices(table)) {
- if (!clone_old_rq(rq, md, tio, gfp_mask)) {
- dm_put_live_table(md, srcu_idx);
- free_old_rq_tio(tio);
- return NULL;
- }
- }
- dm_put_live_table(md, srcu_idx);
-
- return tio;
-}
-
-/*
- * Called with the queue lock held.
- */
-static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_rq_target_io *tio;
-
- if (unlikely(rq->special)) {
- DMWARN("Already has something in rq->special.");
- return BLKPREP_KILL;
- }
-
- tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
- if (!tio)
- return BLKPREP_DEFER;
-
- rq->special = tio;
- rq->rq_flags |= RQF_DONTPREP;
-
- return BLKPREP_OK;
-}
-
/*
* Returns:
* DM_MAPIO_* : the request has been processed as indicated
@@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
struct request *rq = tio->orig;
struct request *clone = NULL;
- if (tio->clone) {
- clone = tio->clone;
- r = ti->type->map_rq(ti, clone, &tio->info);
- if (r == DM_MAPIO_DELAY_REQUEUE)
- return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
- } else {
- r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
- if (r < 0) {
- /* The target wants to complete the I/O */
- dm_kill_unmapped_request(rq, r);
- return r;
- }
- if (r == DM_MAPIO_REMAPPED &&
- setup_clone(clone, rq, tio, GFP_ATOMIC)) {
- /* -ENOMEM */
- ti->type->release_clone_rq(clone);
- return DM_MAPIO_REQUEUE;
- }
- }
-
+ r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
break;
case DM_MAPIO_REMAPPED:
+ if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+ /* -ENOMEM */
+ ti->type->release_clone_rq(clone);
+ return DM_MAPIO_REQUEUE;
+ }
+
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
@@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md);
}
+static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
+{
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+
+ /*
+ * Must initialize md member of tio, otherwise it won't
+ * be available in dm_mq_queue_rq.
+ */
+ tio->md = md;
+
+ if (md->init_tio_pdu) {
+ /* target-specific per-io data is immediately after the tio */
+ tio->info.ptr = tio + 1;
+ }
+
+ return 0;
+}
+
+static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+{
+ return __dm_rq_init_rq(q->rq_alloc_data, rq);
+}
+
static void map_tio_request(struct kthread_work *work)
{
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
@@ -779,6 +626,10 @@ static void dm_old_request_fn(struct request_queue *q)
int srcu_idx;
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ dm_put_live_table(md, srcu_idx);
+ return;
+ }
ti = dm_table_find_target(map, pos);
dm_put_live_table(md, srcu_idx);
}
@@ -810,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q)
dm_start_request(md, rq);
tio = tio_from_request(rq);
+ init_tio(tio, rq, md);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
@@ -820,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q)
/*
* Fully initialize a .request_fn request-based queue.
*/
-int dm_old_init_request_queue(struct mapped_device *md)
+int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
+ struct dm_target *immutable_tgt;
+
/* Fully initialize the queue */
- if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
+ md->queue->cmd_size = sizeof(struct dm_rq_target_io);
+ md->queue->rq_alloc_data = md;
+ md->queue->request_fn = dm_old_request_fn;
+ md->queue->init_rq_fn = dm_rq_init_rq;
+
+ immutable_tgt = dm_table_get_immutable_target(t);
+ if (immutable_tgt && immutable_tgt->per_io_data_size) {
+ /* any target-specific per-io data is immediately after the tio */
+ md->queue->cmd_size += immutable_tgt->per_io_data_size;
+ md->init_tio_pdu = true;
+ }
+ if (blk_init_allocated_queue(md->queue) < 0)
return -EINVAL;
/* disable dm_old_request_fn's merge heuristic by default */
@@ -831,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_old_prep_fn);
/* Initialize the request-based DM worker thread */
kthread_init_worker(&md->kworker);
@@ -852,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
- struct mapped_device *md = data;
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
-
- /*
- * Must initialize md member of tio, otherwise it won't
- * be available in dm_mq_queue_rq.
- */
- tio->md = md;
-
- if (md->init_tio_pdu) {
- /* target-specific per-io data is immediately after the tio */
- tio->info.ptr = tio + 1;
- }
-
- return 0;
+ return __dm_rq_init_rq(data, rq);
}
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 4da06cae7bad..f0020d21b95f 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info {
bool dm_use_blk_mq_default(void);
bool dm_use_blk_mq(struct mapped_device *md);
-int dm_old_init_request_queue(struct mapped_device *md);
+int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 38b05f23b96c..0250e7e521ab 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -175,6 +175,7 @@ static void dm_stat_free(struct rcu_head *head)
int cpu;
struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
+ kfree(s->histogram_boundaries);
kfree(s->program_id);
kfree(s->aux_data);
for_each_possible_cpu(cpu) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0a427de23ed2..3ad16d9c9d5a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
char b[BDEVNAME_SIZE];
if (likely(q))
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ r |= bdi_congested(q->backing_dev_info, bdi_bits);
else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md),
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 710ae28fd618..43d3445b121d 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
return -EIO;
}
-static int io_err_map_rq(struct dm_target *ti, struct request *clone,
- union map_info *map_context)
-{
- return -EIO;
-}
-
static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
union map_info *map_context,
struct request **clone)
@@ -161,7 +155,6 @@ static struct target_type error_target = {
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
- .map_rq = io_err_map_rq,
.clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq,
.direct_access = io_err_direct_access,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d1c05c12a9db..2b266a2b5035 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
- return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
+ return op_is_flush(bio->bi_opf) &&
dm_thin_changed_this_transaction(tc->td);
}
@@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
- bio_op(bio) == REQ_OP_DISCARD)
+ if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
inc_all_io_entry(info->tc->pool, bio);
@@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if ((bio_data_dir(bio) == WRITE) ||
- (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
- bio_op(bio) == REQ_OP_DISCARD))
+ if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
+ bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
- if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
- bio_op(bio) == REQ_OP_DISCARD) {
+ if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -2714,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
return 1;
q = bdev_get_queue(pt->data_dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static void requeue_bios(struct pool *pool)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3086da5664f3..9f37d7fc2786 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE;
*/
struct dm_md_mempools {
mempool_t *io_pool;
- mempool_t *rq_pool;
struct bio_set *bs;
};
@@ -466,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (r > 0) {
/*
- * Target determined this ioctl is being issued against
- * a logical partition of the parent bdev; so extra
- * validation is needed.
+ * Target determined this ioctl is being issued against a
+ * subset of the parent bdev; require extra privileges.
*/
- r = scsi_verify_blk_ioctl(NULL, cmd);
- if (r)
+ if (!capable(CAP_SYS_RAWIO)) {
+ DMWARN_LIMIT(
+ "%s: sending ioctl %x to DM device without required privilege.",
+ current->comm, cmd);
+ r = -ENOIOCTLCMD;
goto out;
+ }
}
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
@@ -972,10 +974,61 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+/*
+ * Flush current->bio_list when the target map method blocks.
+ * This fixes deadlocks in snapshot and possibly in other targets.
+ */
+struct dm_offload {
+ struct blk_plug plug;
+ struct blk_plug_cb cb;
+};
+
+static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct dm_offload *o = container_of(cb, struct dm_offload, cb);
+ struct bio_list list;
+ struct bio *bio;
+
+ INIT_LIST_HEAD(&o->cb.list);
+
+ if (unlikely(!current->bio_list))
+ return;
+
+ list = *current->bio_list;
+ bio_list_init(current->bio_list);
+
+ while ((bio = bio_list_pop(&list))) {
+ struct bio_set *bs = bio->bi_pool;
+ if (unlikely(!bs) || bs == fs_bio_set) {
+ bio_list_add(current->bio_list, bio);
+ continue;
+ }
+
+ spin_lock(&bs->rescue_lock);
+ bio_list_add(&bs->rescue_list, bio);
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ spin_unlock(&bs->rescue_lock);
+ }
+}
+
+static void dm_offload_start(struct dm_offload *o)
+{
+ blk_start_plug(&o->plug);
+ o->cb.callback = flush_current_bio_list;
+ list_add(&o->cb.list, &current->plug->cb_list);
+}
+
+static void dm_offload_end(struct dm_offload *o)
+{
+ list_del(&o->cb.list);
+ blk_finish_plug(&o->plug);
+}
+
static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
+ struct dm_offload o;
struct bio *clone = &tio->clone;
struct dm_target *ti = tio->ti;
@@ -988,7 +1041,11 @@ static void __map_bio(struct dm_target_io *tio)
*/
atomic_inc(&tio->io->io_count);
sector = clone->bi_iter.bi_sector;
+
+ dm_offload_start(&o);
r = ti->type->map(ti, clone);
+ dm_offload_end(&o);
+
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1314,7 +1371,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* With request-based DM we only need to check the
* top-level queue for congestion.
*/
- r = md->queue->backing_dev_info.wb.state & bdi_bits;
+ r = md->queue->backing_dev_info->wb.state & bdi_bits;
} else {
map = dm_get_live_table_fast(md);
if (map)
@@ -1397,7 +1454,7 @@ void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used
*/
md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_data = md;
+ md->queue->backing_dev_info->congested_data = md;
}
void dm_init_normal_md_queue(struct mapped_device *md)
@@ -1408,7 +1465,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
@@ -1419,7 +1476,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->kworker_task)
kthread_stop(md->kworker_task);
mempool_destroy(md->io_pool);
- mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
@@ -1595,12 +1651,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out;
}
- BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
+ BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
- md->rq_pool = p->rq_pool;
- p->rq_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
@@ -1777,7 +1831,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
- r = dm_old_init_request_queue(md);
+ r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
return r;
@@ -2493,7 +2547,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
unsigned integrity, unsigned per_io_data_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
- struct kmem_cache *cachep = NULL;
unsigned int pool_size = 0;
unsigned int front_pad;
@@ -2503,20 +2556,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios();
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+
+ pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
+ if (!pools->io_pool)
+ goto out;
break;
case DM_TYPE_REQUEST_BASED:
- cachep = _rq_tio_cache;
- pool_size = dm_get_reserved_rq_based_ios();
- pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
- if (!pools->rq_pool)
- goto out;
- /* fall through to setup remaining rq-based pools */
case DM_TYPE_MQ_REQUEST_BASED:
- if (!pool_size)
- pool_size = dm_get_reserved_rq_based_ios();
+ pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
@@ -2524,12 +2573,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
BUG();
}
- if (cachep) {
- pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
- if (!pools->io_pool)
- goto out;
- }
-
pools->bs = bioset_create_nobvec(pool_size, front_pad);
if (!pools->bs)
goto out;
@@ -2551,7 +2594,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
return;
mempool_destroy(pools->io_pool);
- mempool_destroy(pools->rq_pool);
if (pools->bs)
bioset_free(pools->bs);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08b9654..f298b01f7ab3 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* To check whether the target type is request-based or not (bio-based).
*/
-#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \
- ((t)->type->clone_and_map_rq != NULL))
+#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
/*
* To check whether the target type is a hybrid (capable of being
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5975c9915684..f1c7bbac31a5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits)
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 82821ee0d57f..ba485dcf1064 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
+ /*
+ * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
+ * up mddev->thread. It is important to initialize critical
+ * resources for mddev->thread BEFORE calling pers->run().
+ */
err = pers->run(mddev);
if (err)
pr_warn("md: pers->run() failed ...\n");
@@ -5341,8 +5346,8 @@ int md_run(struct mddev *mddev)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = md_congested;
+ mddev->queue->backing_dev_info->congested_data = mddev;
+ mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5699,7 +5704,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info.congested_fn = NULL;
+ mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index aa8c4e5c1ee2..d457afa672d5 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
* first available device
*/
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 7938cd21fa4c..185dc60360b5 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -976,6 +976,27 @@ int dm_array_cursor_next(struct dm_array_cursor *c)
}
EXPORT_SYMBOL_GPL(dm_array_cursor_next);
+int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count)
+{
+ int r;
+
+ do {
+ uint32_t remaining = le32_to_cpu(c->ab->nr_entries) - c->index;
+
+ if (count < remaining) {
+ c->index += count;
+ return 0;
+ }
+
+ count -= remaining;
+ r = dm_array_cursor_next(c);
+
+ } while (!r);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_skip);
+
void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le)
{
*value_le = element_at(c->info, c->ab, c->index);
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
index 27ee49a55473..d7d2d579c662 100644
--- a/drivers/md/persistent-data/dm-array.h
+++ b/drivers/md/persistent-data/dm-array.h
@@ -207,6 +207,7 @@ void dm_array_cursor_end(struct dm_array_cursor *c);
uint32_t dm_array_cursor_index(struct dm_array_cursor *c);
int dm_array_cursor_next(struct dm_array_cursor *c);
+int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count);
/*
* value_le is only valid while the cursor points at the current value.
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
index 36f7cc2c7109..b7208d82e748 100644
--- a/drivers/md/persistent-data/dm-bitset.c
+++ b/drivers/md/persistent-data/dm-bitset.c
@@ -39,6 +39,48 @@ int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *root)
}
EXPORT_SYMBOL_GPL(dm_bitset_empty);
+struct packer_context {
+ bit_value_fn fn;
+ unsigned nr_bits;
+ void *context;
+};
+
+static int pack_bits(uint32_t index, void *value, void *context)
+{
+ int r;
+ struct packer_context *p = context;
+ unsigned bit, nr = min(64u, p->nr_bits - (index * 64));
+ uint64_t word = 0;
+ bool bv;
+
+ for (bit = 0; bit < nr; bit++) {
+ r = p->fn(index * 64 + bit, &bv, p->context);
+ if (r)
+ return r;
+
+ if (bv)
+ set_bit(bit, (unsigned long *) &word);
+ else
+ clear_bit(bit, (unsigned long *) &word);
+ }
+
+ *((__le64 *) value) = cpu_to_le64(word);
+
+ return 0;
+}
+
+int dm_bitset_new(struct dm_disk_bitset *info, dm_block_t *root,
+ uint32_t size, bit_value_fn fn, void *context)
+{
+ struct packer_context p;
+ p.fn = fn;
+ p.nr_bits = size;
+ p.context = context;
+
+ return dm_array_new(&info->array_info, root, dm_div_up(size, 64), pack_bits, &p);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_new);
+
int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t root,
uint32_t old_nr_entries, uint32_t new_nr_entries,
bool default_value, dm_block_t *new_root)
@@ -168,4 +210,108 @@ int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
}
EXPORT_SYMBOL_GPL(dm_bitset_test_bit);
+static int cursor_next_array_entry(struct dm_bitset_cursor *c)
+{
+ int r;
+ __le64 *value;
+
+ r = dm_array_cursor_next(&c->cursor);
+ if (r)
+ return r;
+
+ dm_array_cursor_get_value(&c->cursor, (void **) &value);
+ c->array_index++;
+ c->bit_index = 0;
+ c->current_bits = le64_to_cpu(*value);
+ return 0;
+}
+
+int dm_bitset_cursor_begin(struct dm_disk_bitset *info,
+ dm_block_t root, uint32_t nr_entries,
+ struct dm_bitset_cursor *c)
+{
+ int r;
+ __le64 *value;
+
+ if (!nr_entries)
+ return -ENODATA;
+
+ c->info = info;
+ c->entries_remaining = nr_entries;
+
+ r = dm_array_cursor_begin(&info->array_info, root, &c->cursor);
+ if (r)
+ return r;
+
+ dm_array_cursor_get_value(&c->cursor, (void **) &value);
+ c->array_index = 0;
+ c->bit_index = 0;
+ c->current_bits = le64_to_cpu(*value);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_cursor_begin);
+
+void dm_bitset_cursor_end(struct dm_bitset_cursor *c)
+{
+ return dm_array_cursor_end(&c->cursor);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_cursor_end);
+
+int dm_bitset_cursor_next(struct dm_bitset_cursor *c)
+{
+ int r = 0;
+
+ if (!c->entries_remaining)
+ return -ENODATA;
+
+ c->entries_remaining--;
+ if (++c->bit_index > 63)
+ r = cursor_next_array_entry(c);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_cursor_next);
+
+int dm_bitset_cursor_skip(struct dm_bitset_cursor *c, uint32_t count)
+{
+ int r;
+ __le64 *value;
+ uint32_t nr_array_skip;
+ uint32_t remaining_in_word = 64 - c->bit_index;
+
+ if (c->entries_remaining < count)
+ return -ENODATA;
+
+ if (count < remaining_in_word) {
+ c->bit_index += count;
+ c->entries_remaining -= count;
+ return 0;
+
+ } else {
+ c->entries_remaining -= remaining_in_word;
+ count -= remaining_in_word;
+ }
+
+ nr_array_skip = (count / 64) + 1;
+ r = dm_array_cursor_skip(&c->cursor, nr_array_skip);
+ if (r)
+ return r;
+
+ dm_array_cursor_get_value(&c->cursor, (void **) &value);
+ c->entries_remaining -= count;
+ c->array_index += nr_array_skip;
+ c->bit_index = count & 63;
+ c->current_bits = le64_to_cpu(*value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_cursor_skip);
+
+bool dm_bitset_cursor_get_value(struct dm_bitset_cursor *c)
+{
+ return test_bit(c->bit_index, (unsigned long *) &c->current_bits);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_cursor_get_value);
+
/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
index c2287d672ef5..df888da04ee1 100644
--- a/drivers/md/persistent-data/dm-bitset.h
+++ b/drivers/md/persistent-data/dm-bitset.h
@@ -93,6 +93,22 @@ void dm_disk_bitset_init(struct dm_transaction_manager *tm,
int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *new_root);
/*
+ * Creates a new bitset populated with values provided by a callback
+ * function. This is more efficient than creating an empty bitset,
+ * resizing, and then setting values since that process incurs a lot of
+ * copying.
+ *
+ * info - describes the array
+ * root - the root block of the array on disk
+ * size - the number of entries in the array
+ * fn - the callback
+ * context - passed to the callback
+ */
+typedef int (*bit_value_fn)(uint32_t index, bool *value, void *context);
+int dm_bitset_new(struct dm_disk_bitset *info, dm_block_t *root,
+ uint32_t size, bit_value_fn fn, void *context);
+
+/*
* Resize the bitset.
*
* info - describes the bitset
@@ -161,6 +177,29 @@ int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
dm_block_t *new_root);
+struct dm_bitset_cursor {
+ struct dm_disk_bitset *info;
+ struct dm_array_cursor cursor;
+
+ uint32_t entries_remaining;
+ uint32_t array_index;
+ uint32_t bit_index;
+ uint64_t current_bits;
+};
+
+/*
+ * Make sure you've flush any dm_disk_bitset and updated the root before
+ * using this.
+ */
+int dm_bitset_cursor_begin(struct dm_disk_bitset *info,
+ dm_block_t root, uint32_t nr_entries,
+ struct dm_bitset_cursor *c);
+void dm_bitset_cursor_end(struct dm_bitset_cursor *c);
+
+int dm_bitset_cursor_next(struct dm_bitset_cursor *c);
+int dm_bitset_cursor_skip(struct dm_bitset_cursor *c, uint32_t count);
+bool dm_bitset_cursor_get_value(struct dm_bitset_cursor *c);
+
/*----------------------------------------------------------------*/
#endif /* _LINUX_DM_BITSET_H */
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index a6dde7cab458..0863905dee02 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -120,7 +120,7 @@ static int __check_holder(struct block_lock *lock)
static void __wait(struct waiter *w)
{
for (;;) {
- set_task_state(current, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
if (!w->task)
break;
@@ -128,7 +128,7 @@ static void __wait(struct waiter *w)
schedule();
}
- set_task_state(current, TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
}
static void __wake_waiter(struct waiter *w)
@@ -462,7 +462,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
int r;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (IS_ERR(p))
+ if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -498,7 +498,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (IS_ERR(p))
+ if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -531,7 +531,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm,
int r;
p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
- if (IS_ERR(p))
+ if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
if (unlikely(!p))
return -EWOULDBLOCK;
@@ -567,7 +567,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
- if (IS_ERR(p))
+ if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
memset(p, 0, dm_bm_block_size(bm));
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 20a40329d84a..02e2ee0d8a00 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -272,7 +272,12 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
int r;
struct del_stack *s;
- s = kmalloc(sizeof(*s), GFP_NOIO);
+ /*
+ * dm_btree_del() is called via an ioctl, as such should be
+ * considered an FS op. We can't recurse back into the FS, so we
+ * allocate GFP_NOFS.
+ */
+ s = kmalloc(sizeof(*s), GFP_NOFS);
if (!s)
return -ENOMEM;
s->info = info;
@@ -1139,6 +1144,17 @@ int dm_btree_cursor_next(struct dm_btree_cursor *c)
}
EXPORT_SYMBOL_GPL(dm_btree_cursor_next);
+int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count)
+{
+ int r = 0;
+
+ while (count-- && !r)
+ r = dm_btree_cursor_next(c);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_skip);
+
int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le)
{
if (c->depth) {
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index db9bd26adf31..3dc5bb1a4748 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -209,6 +209,7 @@ int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
bool prefetch_leaves, struct dm_btree_cursor *c);
void dm_btree_cursor_end(struct dm_btree_cursor *c);
int dm_btree_cursor_next(struct dm_btree_cursor *c);
+int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count);
int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le);
#endif /* _LINUX_DM_BTREE_H */
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 4c28608a0c94..829b4ce057d8 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -626,13 +626,19 @@ int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
void *root_le, size_t len)
{
int r;
- struct disk_sm_root *smr = root_le;
+ struct disk_sm_root smr;
if (len < sizeof(struct disk_sm_root)) {
DMERR("sm_metadata root too small");
return -ENOMEM;
}
+ /*
+ * We don't know the alignment of the root_le buffer, so need to
+ * copy into a new structure.
+ */
+ memcpy(&smr, root_le, sizeof(smr));
+
r = sm_ll_init(ll, tm);
if (r < 0)
return r;
@@ -644,10 +650,10 @@ int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
ll->max_entries = metadata_ll_max_entries;
ll->commit = metadata_ll_commit;
- ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
- ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
- ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
- ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
+ ll->nr_blocks = le64_to_cpu(smr.nr_blocks);
+ ll->nr_allocated = le64_to_cpu(smr.nr_allocated);
+ ll->bitmap_root = le64_to_cpu(smr.bitmap_root);
+ ll->ref_count_root = le64_to_cpu(smr.ref_count_root);
return ll->open_index(ll);
}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 20557e2c60c6..4aed69d9dd17 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -544,7 +544,7 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t
static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
-static struct dm_space_map ops = {
+static const struct dm_space_map ops = {
.destroy = sm_metadata_destroy,
.extend = sm_metadata_extend,
.get_nr_blocks = sm_metadata_get_nr_blocks,
@@ -671,7 +671,7 @@ static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
return -EINVAL;
}
-static struct dm_space_map bootstrap_ops = {
+static const struct dm_space_map bootstrap_ops = {
.destroy = sm_bootstrap_destroy,
.extend = sm_bootstrap_extend,
.get_nr_blocks = sm_bootstrap_get_nr_blocks,
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 848365d474f3..d6585239bff2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
}
@@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev)
*/
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2* stripe;
}
dump_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7b0f647bcccb..830ff2b20346 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
* non-congested targets, it can be removed
*/
if ((bits & (1 << WB_async_congested)) || 1)
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
else
- ret &= bdi_congested(&q->backing_dev_info, bits);
+ ret &= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
@@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int i, disks;
struct bitmap *bitmap = mddev->bitmap;
unsigned long flags;
- const int op = bio_op(bio);
- const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
- const unsigned long do_flush_fua = (bio->bi_opf &
- (REQ_PREFLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
+ mbio->bi_opf = bio_op(bio) |
+ (bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA));
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
!test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1920756828df..6bc5c2a85160 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
@@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev)
* maybe...
*/
stripe /= conf->geo.near_copies;
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
if (md_integrity_register(mddev))
@@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf)
int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies;
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0e8ed2c327b0..302dea3296ba 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -162,6 +162,8 @@ struct r5l_log {
/* to submit async io_units, to fulfill ordering of flush */
struct work_struct deferred_io_work;
+ /* to disable write back during in degraded mode */
+ struct work_struct disable_writeback_work;
};
/*
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
r5l_do_submit_io(log, io);
}
+static void r5c_disable_writeback_async(struct work_struct *work)
+{
+ struct r5l_log *log = container_of(work, struct r5l_log,
+ disable_writeback_work);
+ struct mddev *mddev = log->rdev->mddev;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+ pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
+ mdname(mddev));
+ mddev_suspend(mddev);
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ mddev_resume(mddev);
+}
+
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log)
next_checkpoint = r5c_calculate_new_cp(conf);
spin_unlock_irq(&log->io_list_lock);
- BUG_ON(reclaimable < 0);
-
if (reclaimable == 0 || !write_super)
return;
@@ -2062,7 +2077,7 @@ static int
r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
struct r5l_recovery_ctx *ctx)
{
- struct stripe_head *sh, *next;
+ struct stripe_head *sh;
struct mddev *mddev = log->rdev->mddev;
struct page *page;
sector_t next_checkpoint = MaxSector;
@@ -2076,7 +2091,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
WARN_ON(list_empty(&ctx->cached_list));
- list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ list_for_each_entry(sh, &ctx->cached_list, lru) {
struct r5l_meta_block *mb;
int i;
int offset;
@@ -2126,14 +2141,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
ctx->pos = write_pos;
ctx->seq += 1;
next_checkpoint = sh->log_start;
- list_del_init(&sh->lru);
- raid5_release_stripe(sh);
}
log->next_checkpoint = next_checkpoint;
__free_page(page);
return 0;
}
+static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct stripe_head *sh, *next;
+
+ if (ctx->data_only_stripes == 0)
+ return;
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
+
+ list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ r5c_make_stripe_write_out(sh);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+
+ md_wakeup_thread(conf->mddev->thread);
+ /* reuse conf->wait_for_quiescent in recovery */
+ wait_event(conf->wait_for_quiescent,
+ atomic_read(&conf->active_stripes) == 0);
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+}
+
static int r5l_recovery_log(struct r5l_log *log)
{
struct mddev *mddev = log->rdev->mddev;
@@ -2160,32 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log)
pos = ctx.pos;
ctx.seq += 10000;
- if (ctx.data_only_stripes == 0) {
- log->next_checkpoint = ctx.pos;
- r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
- ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- }
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
pr_debug("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
- else {
+ else
pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx.data_only_stripes,
ctx.data_parity_stripes);
- if (ctx.data_only_stripes > 0)
- if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
- pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
- mdname(mddev));
- return -EIO;
- }
+ if (ctx.data_only_stripes == 0) {
+ log->next_checkpoint = ctx.pos;
+ r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+ ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
+ } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+ mdname(mddev));
+ return -EIO;
}
log->log_start = ctx.pos;
log->seq = ctx.seq;
log->last_checkpoint = pos;
r5l_write_super(log, pos);
+
+ r5c_recovery_flush_data_only_stripes(log, &ctx);
return 0;
}
@@ -2247,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
val > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
+ if (raid5_calc_degraded(conf) > 0 &&
+ val == R5C_JOURNAL_MODE_WRITE_BACK)
+ return -EINVAL;
+
mddev_suspend(mddev);
conf->log->r5c_journal_mode = val;
mddev_resume(mddev);
@@ -2301,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
set_bit(STRIPE_R5C_CACHING, &sh->state);
}
+ /*
+ * When run in degraded mode, array is set to write-through mode.
+ * This check helps drain pending write safely in the transition to
+ * write-through mode.
+ */
+ if (s->failed) {
+ r5c_make_stripe_write_out(sh);
+ return -EAGAIN;
+ }
+
for (i = disks; i--; ) {
dev = &sh->dev[i];
/* if non-overwrite, use writing-out phase */
@@ -2351,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh)
struct page *p = sh->dev[i].orig_page;
sh->dev[i].orig_page = sh->dev[i].page;
+ clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
if (!using_disk_info_extra_page)
put_page(p);
}
@@ -2555,6 +2610,19 @@ ioerr:
return ret;
}
+void r5c_update_on_rdev_error(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+
+ if (!log)
+ return;
+
+ if (raid5_calc_degraded(conf) > 0 &&
+ conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ schedule_work(&log->disable_writeback_work);
+}
+
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -2627,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->no_space_stripes_lock);
INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+ INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
INIT_LIST_HEAD(&log->stripe_in_journal_list);
@@ -2659,6 +2728,7 @@ io_kc:
void r5l_exit_log(struct r5l_log *log)
{
+ flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
mempool_destroy(log->meta_pool);
bioset_free(log->bs);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 36c13e4be9c9..6214e699342c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
*/
-static int calc_degraded(struct r5conf *conf)
+int raid5_calc_degraded(struct r5conf *conf)
{
int degraded, degraded2;
int i;
@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf)
if (conf->mddev->reshape_position == MaxSector)
return conf->mddev->degraded > conf->max_degraded;
- degraded = calc_degraded(conf);
+ degraded = raid5_calc_degraded(conf);
if (degraded > conf->max_degraded)
return 1;
return 0;
@@ -1015,7 +1015,17 @@ again:
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
- sh->dev[i].vec.bv_page = sh->dev[i].page;
+
+ if (!op_is_write(op) &&
+ test_bit(R5_InJournal, &sh->dev[i].flags))
+ /*
+ * issuing read for a page in journal, this
+ * must be preparing for prexor in rmw; read
+ * the data into orig_page
+ */
+ sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
+ else
+ sh->dev[i].vec.bv_page = sh->dev[i].page;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
@@ -2380,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi)
} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ /*
+ * end read for a page in journal, this
+ * must be preparing for prexor in rmw
+ */
+ set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
if (atomic_read(&rdev->read_errors))
atomic_set(&rdev->read_errors, 0);
} else {
@@ -2538,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
spin_lock_irqsave(&conf->device_lock, flags);
clear_bit(In_sync, &rdev->flags);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
@@ -2552,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
bdevname(rdev->bdev, b),
mdname(mddev),
conf->raid_disks - mddev->degraded);
+ r5c_update_on_rdev_error(mddev);
}
/*
@@ -2880,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
return r_sector;
}
+/*
+ * There are cases where we want handle_stripe_dirtying() and
+ * schedule_reconstruction() to delay towrite to some dev of a stripe.
+ *
+ * This function checks whether we want to delay the towrite. Specifically,
+ * we delay the towrite when:
+ *
+ * 1. degraded stripe has a non-overwrite to the missing dev, AND this
+ * stripe has data in journal (for other devices).
+ *
+ * In this case, when reading data for the non-overwrite dev, it is
+ * necessary to handle complex rmw of write back cache (prexor with
+ * orig_page, and xor with page). To keep read path simple, we would
+ * like to flush data in journal to RAID disks first, so complex rmw
+ * is handled in the write patch (handle_stripe_dirtying).
+ *
+ */
+static inline bool delay_towrite(struct r5dev *dev,
+ struct stripe_head_state *s)
+{
+ return !test_bit(R5_OVERWRITE, &dev->flags) &&
+ !test_bit(R5_Insync, &dev->flags) && s->injournal;
+}
+
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
@@ -2900,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->towrite) {
+ if (dev->towrite && !delay_towrite(dev, s)) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantdrain, &dev->flags);
if (!expand)
@@ -3295,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
return rv;
}
-/* fetch_block - checks the given member device to see if its data needs
- * to be read or computed to satisfy a request.
- *
- * Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill to continue
- */
-
static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
int disk_idx, int disks)
{
@@ -3392,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
return 0;
}
+/* fetch_block - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill to continue
+ */
static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
int disk_idx, int disks)
{
@@ -3478,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh,
* midst of changing due to a write
*/
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
- !sh->reconstruct_state)
+ !sh->reconstruct_state) {
+
+ /*
+ * For degraded stripe with data in journal, do not handle
+ * read requests yet, instead, flush the stripe to raid
+ * disks first, this avoids handling complex rmw of write
+ * back cache (prexor with orig_page, and then xor with
+ * page) in the read path
+ */
+ if (s->injournal && s->failed) {
+ if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+ r5c_make_stripe_write_out(sh);
+ goto out;
+ }
+
for (i = disks; i--; )
if (fetch_block(sh, s, i, disks))
break;
+ }
+out:
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -3594,6 +3651,21 @@ unhash:
break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
}
+/*
+ * For RMW in write back cache, we need extra page in prexor to store the
+ * old data. This page is stored in dev->orig_page.
+ *
+ * This function checks whether we have data for prexor. The exact logic
+ * is:
+ * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
+ */
+static inline bool uptodate_for_rmw(struct r5dev *dev)
+{
+ return (test_bit(R5_UPTODATE, &dev->flags)) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ test_bit(R5_OrigPageUPTDODATE, &dev->flags));
+}
+
static int handle_stripe_dirtying(struct r5conf *conf,
struct stripe_head *sh,
struct stripe_head_state *s,
@@ -3622,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+ if (((dev->towrite && !delay_towrite(dev, s)) ||
+ i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !((test_bit(R5_UPTODATE, &dev->flags) &&
- (!test_bit(R5_InJournal, &dev->flags) ||
- dev->page != dev->orig_page)) ||
+ !(uptodate_for_rmw(dev) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
@@ -3639,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rcw++;
@@ -3689,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite ||
+ if (((dev->towrite && !delay_towrite(dev, s)) ||
i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !((test_bit(R5_UPTODATE, &dev->flags) &&
- (!test_bit(R5_InJournal, &dev->flags) ||
- dev->page != dev->orig_page)) ||
+ !(uptodate_for_rmw(dev) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (test_bit(STRIPE_PREREAD_ACTIVE,
@@ -3722,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
if (test_bit(R5_Insync, &dev->flags) &&
@@ -6264,10 +6331,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
- mddev->queue->backing_dev_info.capabilities |=
+ mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
else
- mddev->queue->backing_dev_info.capabilities &=
+ mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev);
}
@@ -7025,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
/*
* 0 for a fully functional array, 1 or 2 for a degraded array.
*/
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
if (has_failed(conf)) {
pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
@@ -7086,8 +7153,8 @@ static int raid5_run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -7272,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
}
}
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
return count;
@@ -7632,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
* pre and post number of devices.
*/
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
mddev->raid_disks = conf->raid_disks;
@@ -7696,8 +7763,8 @@ static void end_reshape(struct r5conf *conf)
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
}
}
@@ -7720,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
} else {
int d;
spin_lock_irq(&conf->device_lock);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irq(&conf->device_lock);
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index ed8e1362ab36..1440fa26e296 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -322,6 +322,11 @@ enum r5dev_flags {
* data and parity being written are in the journal
* device
*/
+ R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
+ * dev->orig_page for prexor. When this flag is
+ * set, orig_page contains latest data in the
+ * raid disk.
+ */
};
/*
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce);
+extern int raid5_calc_degraded(struct r5conf *conf);
extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
extern void r5l_exit_log(struct r5l_log *log);
extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
extern void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode;
+extern void r5c_update_on_rdev_error(struct mddev *mddev);
#endif
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 0ea4efb3de66..ccda41c2c9e4 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -30,8 +30,9 @@
#include "cec-priv.h"
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx);
/*
* 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
/* Mark it as an error */
data->msg.tx_ts = ktime_get_ns();
- data->msg.tx_status = CEC_TX_STATUS_ERROR |
- CEC_TX_STATUS_MAX_RETRIES;
+ data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES;
+ data->msg.tx_error_cnt++;
data->attempts = 0;
- data->msg.tx_error_cnt = 1;
/* Queue transmitted message for monitoring purposes */
cec_queue_msg_monitor(data->adap, &data->msg, 1);
@@ -611,8 +612,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
}
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
if (msg->len == 1) {
- if (cec_msg_initiator(msg) != 0xf ||
- cec_msg_destination(msg) == 0xf) {
+ if (cec_msg_destination(msg) == 0xf) {
dprintk(1, "cec_transmit_msg: invalid poll message\n");
return -EINVAL;
}
@@ -637,7 +637,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
return -EINVAL;
}
- if (cec_msg_initiator(msg) != 0xf &&
+ if (msg->len > 1 && adap->is_configured &&
!cec_has_log_addr(adap, cec_msg_initiator(msg))) {
dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
cec_msg_initiator(msg));
@@ -851,7 +851,7 @@ static const u8 cec_msg_size[256] = {
[CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
[CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
[CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
- [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+ [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
[CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
};
@@ -1071,7 +1071,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
/* Send poll message */
msg.len = 1;
- msg.msg[0] = 0xf0 | log_addr;
+ msg.msg[0] = (log_addr << 4) | log_addr;
err = cec_transmit_msg_fh(adap, &msg, NULL, true);
/*
@@ -1205,7 +1205,7 @@ static int cec_config_thread_func(void *arg)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (last_la == CEC_LOG_ADDR_INVALID ||
last_la == CEC_LOG_ADDR_UNREGISTERED ||
- !(last_la & type2mask[type]))
+ !((1 << last_la) & type2mask[type]))
last_la = la_list[0];
err = cec_config_log_addr(adap, i, last_la);
@@ -1250,30 +1250,49 @@ configured:
for (i = 1; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
}
+ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
adap->is_configured = true;
adap->is_configuring = false;
cec_post_state_event(adap);
- mutex_unlock(&adap->lock);
+ /*
+ * Now post the Report Features and Report Physical Address broadcast
+ * messages. Note that these are non-blocking transmits, meaning that
+ * they are just queued up and once adap->lock is unlocked the main
+ * thread will kick in and start transmitting these.
+ *
+ * If after this function is done (but before one or more of these
+ * messages are actually transmitted) the CEC adapter is unconfigured,
+ * then any remaining messages will be dropped by the main thread.
+ */
for (i = 0; i < las->num_log_addrs; i++) {
+ struct cec_msg msg = {};
+
if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
continue;
- /*
- * Report Features must come first according
- * to CEC 2.0
- */
- if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
- cec_report_features(adap, i);
- cec_report_phys_addr(adap, i);
+ msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+ /* Report Features must come first according to CEC 2.0 */
+ if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+ adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+ cec_fill_msg_report_features(adap, &msg, i);
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
+ }
+
+ /* Report Physical Address */
+ cec_msg_report_physical_addr(&msg, adap->phys_addr,
+ las->primary_device_type[i]);
+ dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+ las->log_addr[i],
+ cec_phys_addr_exp(adap->phys_addr));
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
}
- for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
- las->log_addr[i] = CEC_LOG_ADDR_INVALID;
- mutex_lock(&adap->lock);
adap->kthread_config = NULL;
- mutex_unlock(&adap->lock);
complete(&adap->config_completion);
+ mutex_unlock(&adap->lock);
return 0;
unconfigure:
@@ -1526,52 +1545,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
/* High-level core CEC message handling */
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx)
{
- struct cec_msg msg = { };
const struct cec_log_addrs *las = &adap->log_addrs;
const u8 *features = las->features[la_idx];
bool op_is_dev_features = false;
unsigned int idx;
- /* This is 2.0 and up only */
- if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
- return 0;
-
/* Report Features */
- msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
- msg.len = 4;
- msg.msg[1] = CEC_MSG_REPORT_FEATURES;
- msg.msg[2] = adap->log_addrs.cec_version;
- msg.msg[3] = las->all_device_types[la_idx];
+ msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+ msg->msg[2] = adap->log_addrs.cec_version;
+ msg->msg[3] = las->all_device_types[la_idx];
/* Write RC Profiles first, then Device Features */
for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
- msg.msg[msg.len++] = features[idx];
+ msg->msg[msg->len++] = features[idx];
if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
if (op_is_dev_features)
break;
op_is_dev_features = true;
}
}
- return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
- const struct cec_log_addrs *las = &adap->log_addrs;
- struct cec_msg msg = { };
-
- /* Report Physical Address */
- msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
- cec_msg_report_physical_addr(&msg, adap->phys_addr,
- las->primary_device_type[la_idx]);
- dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
- las->log_addr[la_idx],
- cec_phys_addr_exp(adap->phys_addr));
- return cec_transmit_msg(adap, &msg, false);
}
/* Transmit the Feature Abort message */
@@ -1777,9 +1776,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
}
case CEC_MSG_GIVE_FEATURES:
- if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
- return cec_report_features(adap, la_idx);
- return 0;
+ if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+ return cec_feature_abort(adap, msg);
+ cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
default:
/*
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index aca3ab83a8a1..37217e205040 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -239,7 +239,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
#if IS_REACHABLE(CONFIG_RC_CORE)
/* Prepare the RC input device */
- adap->rc = rc_allocate_device();
+ adap->rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!adap->rc) {
pr_err("cec-%s: failed to allocate memory for rc_dev\n",
name);
@@ -259,7 +259,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->input_id.vendor = 0;
adap->rc->input_id.product = 0;
adap->rc->input_id.version = 1;
- adap->rc->driver_type = RC_DRIVER_SCANCODE;
adap->rc->driver_name = CEC_NAME;
adap->rc->allowed_protocols = RC_BIT_CEC;
adap->rc->priv = adap;
diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c
index f5956402fc69..5f10151ecec9 100644
--- a/drivers/media/common/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c
@@ -24,8 +24,7 @@
/* Can we use the specified front-end? Remember that if we are compiled
* into the kernel we can't call code that's in modules. */
-#define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \
- (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE)))
+#define FE_SUPPORTED(fe) IS_REACHABLE(CONFIG_DVB_ ## fe)
#if FE_SUPPORTED(BCM3510) || (FE_SUPPORTED(CX24120) && FE_SUPPORTED(ISL6421))
static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index 4338ab0043b4..2e0ab55cd67e 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -25,10 +25,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "flexcop.h"
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 2725702eda7b..81dce9a81bd3 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c
index ca2f80c7740c..af6b2268db61 100644
--- a/drivers/media/common/siano/sms-cards.c
+++ b/drivers/media/common/siano/sms-cards.c
@@ -11,10 +11,6 @@
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
*
* See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "sms-cards.h"
diff --git a/drivers/media/common/siano/sms-cards.h b/drivers/media/common/siano/sms-cards.h
index bb3d733f092b..e6264b4797b4 100644
--- a/drivers/media/common/siano/sms-cards.h
+++ b/drivers/media/common/siano/sms-cards.h
@@ -11,10 +11,6 @@
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
*
* See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __SMS_CARDS_H__
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index f3a42834d7d6..e7a0d7798d5b 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -15,10 +15,6 @@
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
*
* See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "smscoreapi.h"
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 41f2a3939979..7c898b06d85c 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -58,7 +58,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
struct rc_dev *dev;
pr_debug("Allocating rc device\n");
- dev = rc_allocate_device();
+ dev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!dev)
return -ENOMEM;
@@ -86,8 +86,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
#endif
dev->priv = coredev;
- dev->driver_type = RC_DRIVER_IR_RAW;
- dev->allowed_protocols = RC_BIT_ALL;
+ dev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c
index 11976031aff8..6e1020227f9f 100644
--- a/drivers/media/common/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index f8adf4506a45..f854309ba8a5 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef __DEMUX_H
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 0c16bb213101..45e91add73ba 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#define pr_fmt(fmt) "dmxdev: " fmt
@@ -151,6 +147,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
void *mem;
+
if (!dvbdev->readers) {
mutex_unlock(&dmxdev->mutex);
return -EBUSY;
@@ -202,6 +199,7 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
dvbdev->readers++;
if (dmxdev->dvr_buffer.data) {
void *mem = dmxdev->dvr_buffer.data;
+ /*memory barrier*/
mb();
spin_lock_irq(&dmxdev->lock);
dmxdev->dvr_buffer.data = NULL;
@@ -876,7 +874,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
dvb_dmxdev_filter_stop(dmxdevfilter);
dvb_dmxdev_filter_reset(dmxdevfilter);
- if ((unsigned)params->pes_type > DMX_PES_OTHER)
+ if ((unsigned int)params->pes_type > DMX_PES_OTHER)
return -EINVAL;
dmxdevfilter->type = DMXDEV_TYPE_PES;
@@ -1125,7 +1123,7 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
mutex_lock(&dmxdev->mutex);
dmxdev->dvbdev->users--;
- if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
+ if (dmxdev->dvbdev->users == 1 && dmxdev->exit == 1) {
mutex_unlock(&dmxdev->mutex);
wake_up(&dmxdev->dvbdev->wait_queue);
} else
@@ -1263,14 +1261,14 @@ EXPORT_SYMBOL(dvb_dmxdev_init);
void dvb_dmxdev_release(struct dmxdev *dmxdev)
{
- dmxdev->exit=1;
+ dmxdev->exit = 1;
if (dmxdev->dvbdev->users > 1) {
wait_event(dmxdev->dvbdev->wait_queue,
- dmxdev->dvbdev->users==1);
+ dmxdev->dvbdev->users == 1);
}
if (dmxdev->dvr_dvbdev->users > 1) {
wait_event(dmxdev->dvr_dvbdev->wait_queue,
- dmxdev->dvr_dvbdev->users==1);
+ dmxdev->dvr_dvbdev->users == 1);
}
dvb_unregister_device(dmxdev->dvbdev);
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 48c6cf92ab99..054fd4eb6192 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DMXDEV_H_
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 779f4224b63e..e200aa6f2d2f 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -73,11 +73,13 @@
#define USB_VID_GIGABYTE 0x1044
#define USB_VID_YUAN 0x1164
#define USB_VID_XTENSIONS 0x1ae7
+#define USB_VID_ZYDAS 0x0ace
#define USB_VID_HUMAX_COEX 0x10b9
#define USB_VID_774 0x7a69
#define USB_VID_EVOLUTEPC 0x1e59
#define USB_VID_AZUREWAVE 0x13d3
#define USB_VID_TECHNISAT 0x14f7
+#define USB_VID_HAMA 0x147f
/* Product IDs */
#define USB_PID_ADSTECH_USB2_COLD 0xa333
@@ -412,5 +414,6 @@
#define USB_PID_SVEON_STV27 0xd3af
#define USB_PID_TURBOX_DTT_2000 0xd3a4
#define USB_PID_WINTV_SOLOHD 0x0264
-#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
+#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
+#define USB_PID_HAMA_DVBT_HYBRID 0x2758
#endif
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index fd893141211c..000d737ad827 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -21,11 +21,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#define pr_fmt(fmt) "dvb_ca_en50221: " fmt
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index bbbff72bbb2a..4eac71e50c5f 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#define pr_fmt(fmt) "dvb_demux: " fmt
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 9235b008ea0a..6f572ca8d339 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVB_DEMUX_H_
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index db74cb74d271..85ae3669aa66 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -18,11 +18,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
/* Enables DVBv3 compatibility bits at the headers */
@@ -2536,9 +2533,13 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
fepriv->voltage = -1;
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- if (fe->dvb->mdev && fe->dvb->mdev->enable_source) {
- ret = fe->dvb->mdev->enable_source(dvbdev->entity,
+ if (fe->dvb->mdev) {
+ mutex_lock(&fe->dvb->mdev->graph_mutex);
+ if (fe->dvb->mdev->enable_source)
+ ret = fe->dvb->mdev->enable_source(
+ dvbdev->entity,
&fepriv->pipe);
+ mutex_unlock(&fe->dvb->mdev->graph_mutex);
if (ret) {
dev_err(fe->dvb->device,
"Tuner is busy. Error %d\n", ret);
@@ -2562,8 +2563,12 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
err3:
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- if (fe->dvb->mdev && fe->dvb->mdev->disable_source)
- fe->dvb->mdev->disable_source(dvbdev->entity);
+ if (fe->dvb->mdev) {
+ mutex_lock(&fe->dvb->mdev->graph_mutex);
+ if (fe->dvb->mdev->disable_source)
+ fe->dvb->mdev->disable_source(dvbdev->entity);
+ mutex_unlock(&fe->dvb->mdev->graph_mutex);
+ }
err2:
#endif
dvb_generic_release(inode, file);
@@ -2595,8 +2600,12 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
if (dvbdev->users == -1) {
wake_up(&fepriv->wait_queue);
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- if (fe->dvb->mdev && fe->dvb->mdev->disable_source)
- fe->dvb->mdev->disable_source(dvbdev->entity);
+ if (fe->dvb->mdev) {
+ mutex_lock(&fe->dvb->mdev->graph_mutex);
+ if (fe->dvb->mdev->disable_source)
+ fe->dvb->mdev->disable_source(dvbdev->entity);
+ mutex_unlock(&fe->dvb->mdev->graph_mutex);
+ }
#endif
if (fe->exit != DVB_FE_NO_EXIT)
wake_up(&dvbdev->wait_queue);
diff --git a/drivers/media/dvb-core/dvb_math.c b/drivers/media/dvb-core/dvb_math.c
index beb7c93aa6cb..a2e1810dd83a 100644
--- a/drivers/media/dvb-core/dvb_math.c
+++ b/drivers/media/dvb-core/dvb_math.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/bitops.h>
diff --git a/drivers/media/dvb-core/dvb_math.h b/drivers/media/dvb-core/dvb_math.h
index 4d11d3529c14..8690ec42954d 100644
--- a/drivers/media/dvb-core/dvb_math.h
+++ b/drivers/media/dvb-core/dvb_math.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __DVB_MATH_H
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index bc5e8cfe7ca2..9947b342633e 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -23,11 +23,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
/*
@@ -719,6 +716,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
ETH_ALEN);
skb_pull(h->priv->ule_skb, ETH_ALEN);
+ } else {
+ /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
+ eth_zero_addr(dest_addr);
}
/* Handle ULE Extension Headers. */
@@ -750,16 +750,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
if (!h->priv->ule_bridged) {
skb_push(h->priv->ule_skb, ETH_HLEN);
h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
- if (!h->priv->ule_dbit) {
- /*
- * dest_addr buffer is only valid if
- * h->priv->ule_dbit == 0
- */
- memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
- eth_zero_addr(h->ethh->h_source);
- } else /* zeroize source and dest */
- memset(h->ethh, 0, ETH_ALEN * 2);
-
+ memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+ eth_zero_addr(h->ethh->h_source);
h->ethh->h_proto = htons(h->priv->ule_sndu_type);
}
/* else: skb is in correct state; nothing to do. */
diff --git a/drivers/media/dvb-core/dvb_net.h b/drivers/media/dvb-core/dvb_net.h
index ede78e8c8aa8..e9b18aa03e02 100644
--- a/drivers/media/dvb-core/dvb_net.h
+++ b/drivers/media/dvb-core/dvb_net.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVB_NET_H_
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 5c4b5a1f604f..2322af1b8742 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 38c844667789..41aad0f99d73 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#define pr_fmt(fmt) "dvbdev: " fmt
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 8c0a7b51555e..49189392cf3b 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#ifndef _DVBDEV_H_
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index c841fa1770be..e8c6554a47aa 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -447,13 +447,6 @@ config DVB_EC100
help
Say Y when you want to support this frontend.
-config DVB_HD29L2
- tristate "HDIC HD29L2"
- depends on DVB_CORE && I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
-
config DVB_STV0367
tristate "ST STV0367 based"
depends on DVB_CORE && I2C
@@ -513,6 +506,13 @@ config DVB_AS102_FE
depends on DVB_CORE
default DVB_AS102
+config DVB_ZD1301_DEMOD
+ tristate "ZyDAS ZD1301"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
config DVB_GP8PSK_FE
tristate
depends on DVB_CORE
@@ -619,7 +619,7 @@ config DVB_LGDT3305
config DVB_LGDT3306A
tristate "LG Electronics LGDT3306A based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && I2C_MUX
default m if !MEDIA_SUBDRV_AUTOSELECT
help
An ATSC 8VSB and QAM-B 64/256 demodulator module. Say Y when you want
@@ -852,6 +852,7 @@ config DVB_M88RS2000
config DVB_AF9033
tristate "Afatech AF9033 DVB-T demodulator"
depends on DVB_CORE && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
config DVB_HORUS3A
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 93921a4eaa27..3fccaf34ef52 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -99,7 +99,6 @@ obj-$(CONFIG_DVB_MN88472) += mn88472.o
obj-$(CONFIG_DVB_MN88473) += mn88473.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
-obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
obj-$(CONFIG_DVB_TS2020) += ts2020.o
obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
@@ -126,3 +125,4 @@ obj-$(CONFIG_DVB_TC90522) += tc90522.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
obj-$(CONFIG_DVB_HELENE) += helene.o
+obj-$(CONFIG_DVB_ZD1301_DEMOD) += zd1301_demod.o
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index c6cb3bbc912a..b978002af4d8 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "af9013_priv.h"
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h
index dcdd163ace85..277112863719 100644
--- a/drivers/media/dvb-frontends/af9013.h
+++ b/drivers/media/dvb-frontends/af9013.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef AF9013_H
diff --git a/drivers/media/dvb-frontends/af9013_priv.h b/drivers/media/dvb-frontends/af9013_priv.h
index 8b9392cfc00d..31d6538abfae 100644
--- a/drivers/media/dvb-frontends/af9013_priv.h
+++ b/drivers/media/dvb-frontends/af9013_priv.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef AF9013_PRIV_H
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index f8818028752e..aaed7cfe5f66 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -13,19 +13,13 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "af9033_priv.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
struct af9033_dev {
struct i2c_client *client;
+ struct regmap *regmap;
struct dvb_frontend fe;
struct af9033_config cfg;
bool is_af9035;
@@ -43,146 +37,19 @@ struct af9033_dev {
u64 total_block_count;
};
-/* write multiple registers */
-static int af9033_wr_regs(struct af9033_dev *dev, u32 reg, const u8 *val,
- int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = dev->client->addr,
- .flags = 0,
- .len = 3 + len,
- .buf = buf,
- }
- };
-
- if (3 + len > sizeof(buf)) {
- dev_warn(&dev->client->dev,
- "i2c wr reg=%04x: len=%d is too big!\n",
- reg, len);
- return -EINVAL;
- }
-
- buf[0] = (reg >> 16) & 0xff;
- buf[1] = (reg >> 8) & 0xff;
- buf[2] = (reg >> 0) & 0xff;
- memcpy(&buf[3], val, len);
-
- ret = i2c_transfer(dev->client->adapter, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&dev->client->dev, "i2c wr failed=%d reg=%06x len=%d\n",
- ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* read multiple registers */
-static int af9033_rd_regs(struct af9033_dev *dev, u32 reg, u8 *val, int len)
-{
- int ret;
- u8 buf[3] = { (reg >> 16) & 0xff, (reg >> 8) & 0xff,
- (reg >> 0) & 0xff };
- struct i2c_msg msg[2] = {
- {
- .addr = dev->client->addr,
- .flags = 0,
- .len = sizeof(buf),
- .buf = buf
- }, {
- .addr = dev->client->addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = val
- }
- };
-
- ret = i2c_transfer(dev->client->adapter, msg, 2);
- if (ret == 2) {
- ret = 0;
- } else {
- dev_warn(&dev->client->dev, "i2c rd failed=%d reg=%06x len=%d\n",
- ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-
-/* write single register */
-static int af9033_wr_reg(struct af9033_dev *dev, u32 reg, u8 val)
-{
- return af9033_wr_regs(dev, reg, &val, 1);
-}
-
-/* read single register */
-static int af9033_rd_reg(struct af9033_dev *dev, u32 reg, u8 *val)
-{
- return af9033_rd_regs(dev, reg, val, 1);
-}
-
-/* write single register with mask */
-static int af9033_wr_reg_mask(struct af9033_dev *dev, u32 reg, u8 val,
- u8 mask)
-{
- int ret;
- u8 tmp;
-
- /* no need for read if whole reg is written */
- if (mask != 0xff) {
- ret = af9033_rd_regs(dev, reg, &tmp, 1);
- if (ret)
- return ret;
-
- val &= mask;
- tmp &= ~mask;
- val |= tmp;
- }
-
- return af9033_wr_regs(dev, reg, &val, 1);
-}
-
-/* read single register with mask */
-static int af9033_rd_reg_mask(struct af9033_dev *dev, u32 reg, u8 *val,
- u8 mask)
-{
- int ret, i;
- u8 tmp;
-
- ret = af9033_rd_regs(dev, reg, &tmp, 1);
- if (ret)
- return ret;
-
- tmp &= mask;
-
- /* find position of the first bit */
- for (i = 0; i < 8; i++) {
- if ((mask >> i) & 0x01)
- break;
- }
- *val = tmp >> i;
-
- return 0;
-}
-
-/* write reg val table using reg addr auto increment */
+/* Write reg val table using reg addr auto increment */
static int af9033_wr_reg_val_tab(struct af9033_dev *dev,
- const struct reg_val *tab, int tab_len)
+ const struct reg_val *tab, int tab_len)
{
+ struct i2c_client *client = dev->client;
#define MAX_TAB_LEN 212
int ret, i, j;
u8 buf[1 + MAX_TAB_LEN];
- dev_dbg(&dev->client->dev, "tab_len=%d\n", tab_len);
+ dev_dbg(&client->dev, "tab_len=%d\n", tab_len);
if (tab_len > sizeof(buf)) {
- dev_warn(&dev->client->dev, "tab len %d is too big\n", tab_len);
+ dev_warn(&client->dev, "tab len %d is too big\n", tab_len);
return -EINVAL;
}
@@ -190,8 +57,9 @@ static int af9033_wr_reg_val_tab(struct af9033_dev *dev,
buf[j] = tab[i].val;
if (i == tab_len - 1 || tab[i].reg != tab[i + 1].reg - 1) {
- ret = af9033_wr_regs(dev, tab[i].reg - j, buf, j + 1);
- if (ret < 0)
+ ret = regmap_bulk_write(dev->regmap, tab[i].reg - j,
+ buf, j + 1);
+ if (ret)
goto err;
j = 0;
@@ -201,47 +69,20 @@ static int af9033_wr_reg_val_tab(struct af9033_dev *dev,
}
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static u32 af9033_div(struct af9033_dev *dev, u32 a, u32 b, u32 x)
-{
- u32 r = 0, c = 0, i;
-
- dev_dbg(&dev->client->dev, "a=%d b=%d x=%d\n", a, b, x);
-
- if (a > b) {
- c = a / b;
- a = a - c * b;
- }
-
- for (i = 0; i < x; i++) {
- if (a >= b) {
- r += 1;
- a -= b;
- }
- a <<= 1;
- r <<= 1;
- }
- r = (c << (u32)x) + r;
-
- dev_dbg(&dev->client->dev, "a=%d b=%d x=%d r=%d r=%x\n", a, b, x, r, r);
-
- return r;
-}
-
static int af9033_init(struct dvb_frontend *fe)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, len;
+ unsigned int utmp;
const struct reg_val *init;
u8 buf[4];
- u32 adc_cw, clock_cw;
struct reg_val_mask tab[] = {
{ 0x80fb24, 0x00, 0x08 },
{ 0x80004c, 0x00, 0xff },
@@ -271,80 +112,76 @@ static int af9033_init(struct dvb_frontend *fe)
{ 0x800045, dev->cfg.adc_multiplier, 0xff },
};
- /* program clock control */
- clock_cw = af9033_div(dev, dev->cfg.clock, 1000000ul, 19ul);
- buf[0] = (clock_cw >> 0) & 0xff;
- buf[1] = (clock_cw >> 8) & 0xff;
- buf[2] = (clock_cw >> 16) & 0xff;
- buf[3] = (clock_cw >> 24) & 0xff;
-
- dev_dbg(&dev->client->dev, "clock=%d clock_cw=%08x\n",
- dev->cfg.clock, clock_cw);
+ dev_dbg(&client->dev, "\n");
- ret = af9033_wr_regs(dev, 0x800025, buf, 4);
- if (ret < 0)
+ /* Main clk control */
+ utmp = div_u64((u64)dev->cfg.clock * 0x80000, 1000000);
+ buf[0] = (utmp >> 0) & 0xff;
+ buf[1] = (utmp >> 8) & 0xff;
+ buf[2] = (utmp >> 16) & 0xff;
+ buf[3] = (utmp >> 24) & 0xff;
+ ret = regmap_bulk_write(dev->regmap, 0x800025, buf, 4);
+ if (ret)
goto err;
- /* program ADC control */
+ dev_dbg(&client->dev, "clk=%u clk_cw=%08x\n", dev->cfg.clock, utmp);
+
+ /* ADC clk control */
for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
if (clock_adc_lut[i].clock == dev->cfg.clock)
break;
}
if (i == ARRAY_SIZE(clock_adc_lut)) {
- dev_err(&dev->client->dev,
- "Couldn't find ADC config for clock=%d\n",
+ dev_err(&client->dev, "Couldn't find ADC config for clock %d\n",
dev->cfg.clock);
goto err;
}
- adc_cw = af9033_div(dev, clock_adc_lut[i].adc, 1000000ul, 19ul);
- buf[0] = (adc_cw >> 0) & 0xff;
- buf[1] = (adc_cw >> 8) & 0xff;
- buf[2] = (adc_cw >> 16) & 0xff;
-
- dev_dbg(&dev->client->dev, "adc=%d adc_cw=%06x\n",
- clock_adc_lut[i].adc, adc_cw);
-
- ret = af9033_wr_regs(dev, 0x80f1cd, buf, 3);
- if (ret < 0)
+ utmp = div_u64((u64)clock_adc_lut[i].adc * 0x80000, 1000000);
+ buf[0] = (utmp >> 0) & 0xff;
+ buf[1] = (utmp >> 8) & 0xff;
+ buf[2] = (utmp >> 16) & 0xff;
+ ret = regmap_bulk_write(dev->regmap, 0x80f1cd, buf, 3);
+ if (ret)
goto err;
- /* program register table */
+ dev_dbg(&client->dev, "adc=%u adc_cw=%06x\n",
+ clock_adc_lut[i].adc, utmp);
+
+ /* Config register table */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = af9033_wr_reg_mask(dev, tab[i].reg, tab[i].val,
- tab[i].mask);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, tab[i].reg, tab[i].mask,
+ tab[i].val);
+ if (ret)
goto err;
}
- /* clock output */
+ /* Demod clk output */
if (dev->cfg.dyn0_clk) {
- ret = af9033_wr_reg(dev, 0x80fba8, 0x00);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x80fba8, 0x00);
+ if (ret)
goto err;
}
- /* settings for TS interface */
+ /* TS interface */
if (dev->cfg.ts_mode == AF9033_TS_MODE_USB) {
- ret = af9033_wr_reg_mask(dev, 0x80f9a5, 0x00, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x80f9a5, 0x01, 0x00);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg_mask(dev, 0x80f9b5, 0x01, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x80f9b5, 0x01, 0x01);
+ if (ret)
goto err;
} else {
- ret = af9033_wr_reg_mask(dev, 0x80f990, 0x00, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x80f990, 0x01, 0x00);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg_mask(dev, 0x80f9b5, 0x00, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x80f9b5, 0x01, 0x00);
+ if (ret)
goto err;
}
- /* load OFSM settings */
- dev_dbg(&dev->client->dev, "load ofsm settings\n");
+ /* Demod core settings */
+ dev_dbg(&client->dev, "load ofsm settings\n");
switch (dev->cfg.tuner) {
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
@@ -365,11 +202,11 @@ static int af9033_init(struct dvb_frontend *fe)
}
ret = af9033_wr_reg_val_tab(dev, init, len);
- if (ret < 0)
+ if (ret)
goto err;
- /* load tuner specific settings */
- dev_dbg(&dev->client->dev, "load tuner specific settings\n");
+ /* Demod tuner specific settings */
+ dev_dbg(&client->dev, "load tuner specific settings\n");
switch (dev->cfg.tuner) {
case AF9033_TUNER_TUA9001:
len = ARRAY_SIZE(tuner_init_tua9001);
@@ -420,27 +257,25 @@ static int af9033_init(struct dvb_frontend *fe)
init = tuner_init_it9135_62;
break;
default:
- dev_dbg(&dev->client->dev, "unsupported tuner ID=%d\n",
- dev->cfg.tuner);
+ dev_dbg(&client->dev, "unsupported tuner ID=%d\n",
+ dev->cfg.tuner);
ret = -ENODEV;
goto err;
}
ret = af9033_wr_reg_val_tab(dev, init, len);
- if (ret < 0)
+ if (ret)
goto err;
if (dev->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
- ret = af9033_wr_reg_mask(dev, 0x00d91c, 0x01, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x00d91c, 0x01, 0x01);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg_mask(dev, 0x00d917, 0x00, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x00d917, 0x01, 0x00);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg_mask(dev, 0x00d916, 0x00, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x00d916, 0x01, 0x00);
+ if (ret)
goto err;
}
@@ -448,13 +283,13 @@ static int af9033_init(struct dvb_frontend *fe)
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
- ret = af9033_wr_reg(dev, 0x800000, 0x01);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x800000, 0x01);
+ if (ret)
goto err;
}
- dev->bandwidth_hz = 0; /* force to program all parameters */
- /* init stats here in order signal app which stats are supported */
+ dev->bandwidth_hz = 0; /* Force to program all parameters */
+ /* Init stats here in order signal app which stats are supported */
c->strength.len = 1;
c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->cnr.len = 1;
@@ -469,68 +304,53 @@ static int af9033_init(struct dvb_frontend *fe)
c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_sleep(struct dvb_frontend *fe)
{
struct af9033_dev *dev = fe->demodulator_priv;
- int ret, i;
- u8 tmp;
+ struct i2c_client *client = dev->client;
+ int ret;
+ unsigned int utmp;
- ret = af9033_wr_reg(dev, 0x80004c, 1);
- if (ret < 0)
- goto err;
+ dev_dbg(&client->dev, "\n");
- ret = af9033_wr_reg(dev, 0x800000, 0);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x80004c, 0x01);
+ if (ret)
goto err;
-
- for (i = 100, tmp = 1; i && tmp; i--) {
- ret = af9033_rd_reg(dev, 0x80004c, &tmp);
- if (ret < 0)
- goto err;
-
- usleep_range(200, 10000);
- }
-
- dev_dbg(&dev->client->dev, "loop=%d\n", i);
-
- if (i == 0) {
- ret = -ETIMEDOUT;
+ ret = regmap_write(dev->regmap, 0x800000, 0x00);
+ if (ret)
goto err;
- }
-
- ret = af9033_wr_reg_mask(dev, 0x80fb24, 0x08, 0x08);
- if (ret < 0)
+ ret = regmap_read_poll_timeout(dev->regmap, 0x80004c, utmp, utmp == 0,
+ 5000, 1000000);
+ if (ret)
+ goto err;
+ ret = regmap_update_bits(dev->regmap, 0x80fb24, 0x08, 0x08);
+ if (ret)
goto err;
- /* prevent current leak (?) */
+ /* Prevent current leak by setting TS interface to parallel mode */
if (dev->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
- /* enable parallel TS */
- ret = af9033_wr_reg_mask(dev, 0x00d917, 0x00, 0x01);
- if (ret < 0)
+ /* Enable parallel TS */
+ ret = regmap_update_bits(dev->regmap, 0x00d917, 0x01, 0x00);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg_mask(dev, 0x00d916, 0x01, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x00d916, 0x01, 0x01);
+ if (ret)
goto err;
}
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *fesettings)
+ struct dvb_frontend_tune_settings *fesettings)
{
/* 800 => 2000 because IT9135 v2 is slow to gain lock */
fesettings->min_delay_ms = 2000;
@@ -543,15 +363,17 @@ static int af9033_get_tune_settings(struct dvb_frontend *fe,
static int af9033_set_frontend(struct dvb_frontend *fe)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i, spec_inv, sampling_freq;
+ int ret, i;
+ unsigned int utmp, adc_freq;
u8 tmp, buf[3], bandwidth_reg_val;
- u32 if_frequency, freq_cw, adc_freq;
+ u32 if_frequency;
- dev_dbg(&dev->client->dev, "frequency=%d bandwidth_hz=%d\n",
- c->frequency, c->bandwidth_hz);
+ dev_dbg(&client->dev, "frequency=%u bandwidth_hz=%u\n",
+ c->frequency, c->bandwidth_hz);
- /* check bandwidth */
+ /* Check bandwidth */
switch (c->bandwidth_hz) {
case 6000000:
bandwidth_reg_val = 0x00;
@@ -563,105 +385,91 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
bandwidth_reg_val = 0x02;
break;
default:
- dev_dbg(&dev->client->dev, "invalid bandwidth_hz\n");
+ dev_dbg(&client->dev, "invalid bandwidth_hz\n");
ret = -EINVAL;
goto err;
}
- /* program tuner */
+ /* Program tuner */
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
- /* program CFOE coefficients */
+ /* Coefficients */
if (c->bandwidth_hz != dev->bandwidth_hz) {
for (i = 0; i < ARRAY_SIZE(coeff_lut); i++) {
if (coeff_lut[i].clock == dev->cfg.clock &&
- coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
+ coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
break;
}
}
if (i == ARRAY_SIZE(coeff_lut)) {
- dev_err(&dev->client->dev,
- "Couldn't find LUT config for clock=%d\n",
+ dev_err(&client->dev,
+ "Couldn't find config for clock %u\n",
dev->cfg.clock);
ret = -EINVAL;
goto err;
}
- ret = af9033_wr_regs(dev, 0x800001,
- coeff_lut[i].val, sizeof(coeff_lut[i].val));
+ ret = regmap_bulk_write(dev->regmap, 0x800001, coeff_lut[i].val,
+ sizeof(coeff_lut[i].val));
+ if (ret)
+ goto err;
}
- /* program frequency control */
+ /* IF frequency control */
if (c->bandwidth_hz != dev->bandwidth_hz) {
- spec_inv = dev->cfg.spec_inv ? -1 : 1;
-
for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
if (clock_adc_lut[i].clock == dev->cfg.clock)
break;
}
if (i == ARRAY_SIZE(clock_adc_lut)) {
- dev_err(&dev->client->dev,
- "Couldn't find ADC clock for clock=%d\n",
+ dev_err(&client->dev,
+ "Couldn't find ADC clock for clock %u\n",
dev->cfg.clock);
ret = -EINVAL;
goto err;
}
adc_freq = clock_adc_lut[i].adc;
- /* get used IF frequency */
+ if (dev->cfg.adc_multiplier == AF9033_ADC_MULTIPLIER_2X)
+ adc_freq = 2 * adc_freq;
+
+ /* Get used IF frequency */
if (fe->ops.tuner_ops.get_if_frequency)
fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
else
if_frequency = 0;
- sampling_freq = if_frequency;
-
- while (sampling_freq > (adc_freq / 2))
- sampling_freq -= adc_freq;
-
- if (sampling_freq >= 0)
- spec_inv *= -1;
- else
- sampling_freq *= -1;
-
- freq_cw = af9033_div(dev, sampling_freq, adc_freq, 23ul);
+ utmp = DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x800000,
+ adc_freq);
- if (spec_inv == -1)
- freq_cw = 0x800000 - freq_cw;
+ if (!dev->cfg.spec_inv && if_frequency)
+ utmp = 0x800000 - utmp;
- if (dev->cfg.adc_multiplier == AF9033_ADC_MULTIPLIER_2X)
- freq_cw /= 2;
-
- buf[0] = (freq_cw >> 0) & 0xff;
- buf[1] = (freq_cw >> 8) & 0xff;
- buf[2] = (freq_cw >> 16) & 0x7f;
-
- /* FIXME: there seems to be calculation error here... */
- if (if_frequency == 0)
- buf[2] = 0;
-
- ret = af9033_wr_regs(dev, 0x800029, buf, 3);
- if (ret < 0)
+ buf[0] = (utmp >> 0) & 0xff;
+ buf[1] = (utmp >> 8) & 0xff;
+ buf[2] = (utmp >> 16) & 0xff;
+ ret = regmap_bulk_write(dev->regmap, 0x800029, buf, 3);
+ if (ret)
goto err;
+ dev_dbg(&client->dev, "if_frequency_cw=%06x\n", utmp);
+
dev->bandwidth_hz = c->bandwidth_hz;
}
- ret = af9033_wr_reg_mask(dev, 0x80f904, bandwidth_reg_val, 0x03);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x80f904, 0x03,
+ bandwidth_reg_val);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg(dev, 0x800040, 0x00);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x800040, 0x00);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg(dev, 0x800047, 0x00);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x800047, 0x00);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg_mask(dev, 0x80f999, 0x00, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x80f999, 0x01, 0x00);
+ if (ret)
goto err;
if (c->frequency <= 230000000)
@@ -669,19 +477,17 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
else
tmp = 0x01; /* UHF */
- ret = af9033_wr_reg(dev, 0x80004b, tmp);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x80004b, tmp);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg(dev, 0x800000, 0x00);
- if (ret < 0)
+ /* Reset FSM */
+ ret = regmap_write(dev->regmap, 0x800000, 0x00);
+ if (ret)
goto err;
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -689,14 +495,15 @@ static int af9033_get_frontend(struct dvb_frontend *fe,
struct dtv_frontend_properties *c)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
int ret;
u8 buf[8];
- dev_dbg(&dev->client->dev, "\n");
+ dev_dbg(&client->dev, "\n");
- /* read all needed registers */
- ret = af9033_rd_regs(dev, 0x80f900, buf, sizeof(buf));
- if (ret < 0)
+ /* Read all needed TPS registers */
+ ret = regmap_bulk_read(dev->regmap, 0x80f900, buf, 8);
+ if (ret)
goto err;
switch ((buf[0] >> 0) & 3) {
@@ -805,49 +612,49 @@ static int af9033_get_frontend(struct dvb_frontend *fe,
}
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i, tmp = 0;
- u8 u8tmp, buf[7];
+ int ret, tmp = 0;
+ u8 buf[7];
+ unsigned int utmp, utmp1;
- dev_dbg(&dev->client->dev, "\n");
+ dev_dbg(&client->dev, "\n");
*status = 0;
- /* radio channel status, 0=no result, 1=has signal, 2=no signal */
- ret = af9033_rd_reg(dev, 0x800047, &u8tmp);
- if (ret < 0)
+ /* Radio channel status: 0=no result, 1=has signal, 2=no signal */
+ ret = regmap_read(dev->regmap, 0x800047, &utmp);
+ if (ret)
goto err;
- /* has signal */
- if (u8tmp == 0x01)
+ /* Has signal */
+ if (utmp == 0x01)
*status |= FE_HAS_SIGNAL;
- if (u8tmp != 0x02) {
+ if (utmp != 0x02) {
/* TPS lock */
- ret = af9033_rd_reg_mask(dev, 0x80f5a9, &u8tmp, 0x01);
- if (ret < 0)
+ ret = regmap_read(dev->regmap, 0x80f5a9, &utmp);
+ if (ret)
goto err;
- if (u8tmp)
+ if ((utmp >> 0) & 0x01)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
- /* full lock */
- ret = af9033_rd_reg_mask(dev, 0x80f999, &u8tmp, 0x01);
- if (ret < 0)
+ /* Full lock */
+ ret = regmap_read(dev->regmap, 0x80f999, &utmp);
+ if (ret)
goto err;
- if (u8tmp)
+ if ((utmp >> 0) & 0x01)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
@@ -855,18 +662,18 @@ static int af9033_read_status(struct dvb_frontend *fe, enum fe_status *status)
dev->fe_status = *status;
- /* signal strength */
+ /* Signal strength */
if (dev->fe_status & FE_HAS_SIGNAL) {
if (dev->is_af9035) {
- ret = af9033_rd_reg(dev, 0x80004a, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x80004a, &utmp);
if (ret)
goto err;
- tmp = -u8tmp * 1000;
+ tmp = -utmp * 1000;
} else {
- ret = af9033_rd_reg(dev, 0x8000f7, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x8000f7, &utmp);
if (ret)
goto err;
- tmp = (u8tmp - 100) * 1000;
+ tmp = (utmp - 100) * 1000;
}
c->strength.len = 1;
@@ -879,87 +686,101 @@ static int af9033_read_status(struct dvb_frontend *fe, enum fe_status *status)
/* CNR */
if (dev->fe_status & FE_HAS_VITERBI) {
- u32 snr_val, snr_lut_size;
- const struct val_snr *snr_lut = NULL;
-
- /* read value */
- ret = af9033_rd_regs(dev, 0x80002c, buf, 3);
+ /* Read raw SNR value */
+ ret = regmap_bulk_read(dev->regmap, 0x80002c, buf, 3);
if (ret)
goto err;
- snr_val = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0);
+ utmp1 = buf[2] << 16 | buf[1] << 8 | buf[0] << 0;
- /* read superframe number */
- ret = af9033_rd_reg(dev, 0x80f78b, &u8tmp);
+ /* Read superframe number */
+ ret = regmap_read(dev->regmap, 0x80f78b, &utmp);
if (ret)
goto err;
- if (u8tmp)
- snr_val /= u8tmp;
+ if (utmp)
+ utmp1 /= utmp;
- /* read current transmission mode */
- ret = af9033_rd_reg(dev, 0x80f900, &u8tmp);
+ /* Read current transmission mode */
+ ret = regmap_read(dev->regmap, 0x80f900, &utmp);
if (ret)
goto err;
- switch ((u8tmp >> 0) & 3) {
+ switch ((utmp >> 0) & 3) {
case 0:
- snr_val *= 4;
+ /* 2k */
+ utmp1 *= 4;
break;
case 1:
- snr_val *= 1;
+ /* 8k */
+ utmp1 *= 1;
break;
case 2:
- snr_val *= 2;
+ /* 4k */
+ utmp1 *= 2;
break;
default:
- snr_val *= 0;
+ utmp1 *= 0;
break;
}
- /* read current modulation */
- ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
+ /* Read current modulation */
+ ret = regmap_read(dev->regmap, 0x80f903, &utmp);
if (ret)
goto err;
- switch ((u8tmp >> 0) & 3) {
+ switch ((utmp >> 0) & 3) {
case 0:
- snr_lut_size = ARRAY_SIZE(qpsk_snr_lut);
- snr_lut = qpsk_snr_lut;
+ /*
+ * QPSK
+ * CNR[dB] 13 * -log10((1690000 - value) / value) + 2.6
+ * value [653799, 1689999], 2.6 / 13 = 3355443
+ */
+ utmp1 = clamp(utmp1, 653799U, 1689999U);
+ utmp1 = ((u64)(intlog10(utmp1)
+ - intlog10(1690000 - utmp1)
+ + 3355443) * 13 * 1000) >> 24;
break;
case 1:
- snr_lut_size = ARRAY_SIZE(qam16_snr_lut);
- snr_lut = qam16_snr_lut;
+ /*
+ * QAM-16
+ * CNR[dB] 6 * log10((value - 370000) / (828000 - value)) + 15.7
+ * value [371105, 827999], 15.7 / 6 = 43900382
+ */
+ utmp1 = clamp(utmp1, 371105U, 827999U);
+ utmp1 = ((u64)(intlog10(utmp1 - 370000)
+ - intlog10(828000 - utmp1)
+ + 43900382) * 6 * 1000) >> 24;
break;
case 2:
- snr_lut_size = ARRAY_SIZE(qam64_snr_lut);
- snr_lut = qam64_snr_lut;
+ /*
+ * QAM-64
+ * CNR[dB] 8 * log10((value - 193000) / (425000 - value)) + 23.8
+ * value [193246, 424999], 23.8 / 8 = 49912218
+ */
+ utmp1 = clamp(utmp1, 193246U, 424999U);
+ utmp1 = ((u64)(intlog10(utmp1 - 193000)
+ - intlog10(425000 - utmp1)
+ + 49912218) * 8 * 1000) >> 24;
break;
default:
- snr_lut_size = 0;
- tmp = 0;
+ utmp1 = 0;
break;
}
- for (i = 0; i < snr_lut_size; i++) {
- tmp = snr_lut[i].snr * 1000;
- if (snr_val < snr_lut[i].val)
- break;
- }
+ dev_dbg(&client->dev, "cnr=%u\n", utmp1);
- c->cnr.len = 1;
c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
- c->cnr.stat[0].svalue = tmp;
+ c->cnr.stat[0].svalue = utmp1;
} else {
- c->cnr.len = 1;
c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
/* UCB/PER/BER */
if (dev->fe_status & FE_HAS_LOCK) {
- /* outer FEC, 204 byte packets */
+ /* Outer FEC, 204 byte packets */
u16 abort_packet_count, rsd_packet_count;
- /* inner FEC, bits */
+ /* Inner FEC, bits */
u32 rsd_bit_err_count;
/*
@@ -967,7 +788,7 @@ static int af9033_read_status(struct dvb_frontend *fe, enum fe_status *status)
* (rsd_packet_count). Maybe it should be increased?
*/
- ret = af9033_rd_regs(dev, 0x800032, buf, 7);
+ ret = regmap_bulk_read(dev->regmap, 0x800032, buf, 7);
if (ret)
goto err;
@@ -998,21 +819,22 @@ static int af9033_read_status(struct dvb_frontend *fe, enum fe_status *status)
}
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
int ret;
- u8 u8tmp;
+ unsigned int utmp;
+
+ dev_dbg(&client->dev, "\n");
- /* use DVBv5 CNR */
+ /* Use DVBv5 CNR */
if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) {
/* Return 0.1 dB for AF9030 and 0-0xffff for IT9130. */
if (dev->is_af9035) {
@@ -1022,13 +844,13 @@ static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
/* 1000x => 1x (1 dB) */
*snr = div_s64(c->cnr.stat[0].svalue, 1000);
- /* read current modulation */
- ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
+ /* Read current modulation */
+ ret = regmap_read(dev->regmap, 0x80f903, &utmp);
if (ret)
goto err;
/* scale value to 0x0000-0xffff */
- switch ((u8tmp >> 0) & 3) {
+ switch ((utmp >> 0) & 3) {
case 0:
*snr = *snr * 0xffff / 23;
break;
@@ -1047,35 +869,37 @@ static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
}
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
int ret, tmp, power_real;
- u8 u8tmp, gain_offset, buf[7];
+ unsigned int utmp;
+ u8 gain_offset, buf[7];
+
+ dev_dbg(&client->dev, "\n");
if (dev->is_af9035) {
- /* read signal strength of 0-100 scale */
- ret = af9033_rd_reg(dev, 0x800048, &u8tmp);
- if (ret < 0)
+ /* Read signal strength of 0-100 scale */
+ ret = regmap_read(dev->regmap, 0x800048, &utmp);
+ if (ret)
goto err;
- /* scale value to 0x0000-0xffff */
- *strength = u8tmp * 0xffff / 100;
+ /* Scale value to 0x0000-0xffff */
+ *strength = utmp * 0xffff / 100;
} else {
- ret = af9033_rd_reg(dev, 0x8000f7, &u8tmp);
- if (ret < 0)
+ ret = regmap_read(dev->regmap, 0x8000f7, &utmp);
+ if (ret)
goto err;
- ret = af9033_rd_regs(dev, 0x80f900, buf, 7);
- if (ret < 0)
+ ret = regmap_bulk_read(dev->regmap, 0x80f900, buf, 7);
+ if (ret)
goto err;
if (c->frequency <= 300000000)
@@ -1083,7 +907,7 @@ static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
else
gain_offset = 4; /* UHF */
- power_real = (u8tmp - 100 - gain_offset) -
+ power_real = (utmp - 100 - gain_offset) -
power_reference[((buf[3] >> 0) & 3)][((buf[6] >> 0) & 7)];
if (power_real < -15)
@@ -1097,15 +921,13 @@ static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
else
tmp = 100;
- /* scale value to 0x0000-0xffff */
+ /* Scale value to 0x0000-0xffff */
*strength = tmp * 0xffff / 100;
}
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -1124,82 +946,78 @@ static int af9033_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
struct af9033_dev *dev = fe->demodulator_priv;
*ucblocks = dev->error_block_count;
+
return 0;
}
static int af9033_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
int ret;
- dev_dbg(&dev->client->dev, "enable=%d\n", enable);
+ dev_dbg(&client->dev, "enable=%d\n", enable);
- ret = af9033_wr_reg_mask(dev, 0x00fa04, enable, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x00fa04, 0x01, enable);
+ if (ret)
goto err;
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
int ret;
- dev_dbg(&dev->client->dev, "onoff=%d\n", onoff);
+ dev_dbg(&client->dev, "onoff=%d\n", onoff);
- ret = af9033_wr_reg_mask(dev, 0x80f993, onoff, 0x01);
- if (ret < 0)
+ ret = regmap_update_bits(dev->regmap, 0x80f993, 0x01, onoff);
+ if (ret)
goto err;
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_pid_filter(struct dvb_frontend *fe, int index, u16 pid,
- int onoff)
+ int onoff)
{
struct af9033_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
int ret;
u8 wbuf[2] = {(pid >> 0) & 0xff, (pid >> 8) & 0xff};
- dev_dbg(&dev->client->dev, "index=%d pid=%04x onoff=%d\n",
- index, pid, onoff);
+ dev_dbg(&client->dev, "index=%d pid=%04x onoff=%d\n",
+ index, pid, onoff);
if (pid > 0x1fff)
return 0;
- ret = af9033_wr_regs(dev, 0x80f996, wbuf, 2);
- if (ret < 0)
+ ret = regmap_bulk_write(dev->regmap, 0x80f996, wbuf, 2);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg(dev, 0x80f994, onoff);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x80f994, onoff);
+ if (ret)
goto err;
-
- ret = af9033_wr_reg(dev, 0x80f995, index);
- if (ret < 0)
+ ret = regmap_write(dev->regmap, 0x80f995, index);
+ if (ret)
goto err;
return 0;
-
err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static const struct dvb_frontend_ops af9033_ops = {
- .delsys = { SYS_DVBT },
+ .delsys = {SYS_DVBT},
.info = {
.name = "Afatech AF9033 (DVB-T)",
.frequency_min = 174000000,
@@ -1240,35 +1058,57 @@ static const struct dvb_frontend_ops af9033_ops = {
};
static int af9033_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct af9033_config *cfg = client->dev.platform_data;
struct af9033_dev *dev;
int ret;
u8 buf[8];
u32 reg;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 24,
+ .val_bits = 8,
+ };
- /* allocate memory for the internal state */
- dev = kzalloc(sizeof(struct af9033_dev), GFP_KERNEL);
- if (dev == NULL) {
+ /* Allocate memory for the internal state */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
ret = -ENOMEM;
- dev_err(&client->dev, "Could not allocate memory for state\n");
goto err;
}
- /* setup the state */
+ /* Setup the state */
dev->client = client;
- memcpy(&dev->cfg, cfg, sizeof(struct af9033_config));
+ memcpy(&dev->cfg, cfg, sizeof(dev->cfg));
+ switch (dev->cfg.ts_mode) {
+ case AF9033_TS_MODE_PARALLEL:
+ dev->ts_mode_parallel = true;
+ break;
+ case AF9033_TS_MODE_SERIAL:
+ dev->ts_mode_serial = true;
+ break;
+ case AF9033_TS_MODE_USB:
+ /* USB mode for AF9035 */
+ default:
+ break;
+ }
if (dev->cfg.clock != 12000000) {
ret = -ENODEV;
- dev_err(&dev->client->dev,
- "unsupported clock %d Hz, only 12000000 Hz is supported currently\n",
- dev->cfg.clock);
+ dev_err(&client->dev,
+ "Unsupported clock %u Hz. Only 12000000 Hz is supported currently\n",
+ dev->cfg.clock);
+ goto err_kfree;
+ }
+
+ /* Create regmap */
+ dev->regmap = regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
goto err_kfree;
}
- /* firmware version */
+ /* Firmware version */
switch (dev->cfg.tuner) {
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
@@ -1285,20 +1125,19 @@ static int af9033_probe(struct i2c_client *client,
break;
}
- ret = af9033_rd_regs(dev, reg, &buf[0], 4);
- if (ret < 0)
- goto err_kfree;
-
- ret = af9033_rd_regs(dev, 0x804191, &buf[4], 4);
- if (ret < 0)
- goto err_kfree;
+ ret = regmap_bulk_read(dev->regmap, reg, &buf[0], 4);
+ if (ret)
+ goto err_regmap_exit;
+ ret = regmap_bulk_read(dev->regmap, 0x804191, &buf[4], 4);
+ if (ret)
+ goto err_regmap_exit;
- dev_info(&dev->client->dev,
- "firmware version: LINK %d.%d.%d.%d - OFDM %d.%d.%d.%d\n",
- buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
- buf[7]);
+ dev_info(&client->dev,
+ "firmware version: LINK %d.%d.%d.%d - OFDM %d.%d.%d.%d\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7]);
- /* sleep */
+ /* Sleep as chip seems to be partly active by default */
switch (dev->cfg.tuner) {
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
@@ -1309,41 +1148,30 @@ static int af9033_probe(struct i2c_client *client,
/* IT9135 did not like to sleep at that early */
break;
default:
- ret = af9033_wr_reg(dev, 0x80004c, 1);
- if (ret < 0)
- goto err_kfree;
-
- ret = af9033_wr_reg(dev, 0x800000, 0);
- if (ret < 0)
- goto err_kfree;
- }
-
- /* configure internal TS mode */
- switch (dev->cfg.ts_mode) {
- case AF9033_TS_MODE_PARALLEL:
- dev->ts_mode_parallel = true;
- break;
- case AF9033_TS_MODE_SERIAL:
- dev->ts_mode_serial = true;
- break;
- case AF9033_TS_MODE_USB:
- /* usb mode for AF9035 */
- default:
- break;
+ ret = regmap_write(dev->regmap, 0x80004c, 0x01);
+ if (ret)
+ goto err_regmap_exit;
+ ret = regmap_write(dev->regmap, 0x800000, 0x00);
+ if (ret)
+ goto err_regmap_exit;
}
- /* create dvb_frontend */
- memcpy(&dev->fe.ops, &af9033_ops, sizeof(struct dvb_frontend_ops));
+ /* Create dvb frontend */
+ memcpy(&dev->fe.ops, &af9033_ops, sizeof(dev->fe.ops));
dev->fe.demodulator_priv = dev;
*cfg->fe = &dev->fe;
if (cfg->ops) {
cfg->ops->pid_filter = af9033_pid_filter;
cfg->ops->pid_filter_ctrl = af9033_pid_filter_ctrl;
}
+ cfg->regmap = dev->regmap;
i2c_set_clientdata(client, dev);
- dev_info(&dev->client->dev, "Afatech AF9033 successfully attached\n");
+ dev_info(&client->dev, "Afatech AF9033 successfully attached\n");
+
return 0;
+err_regmap_exit:
+ regmap_exit(dev->regmap);
err_kfree:
kfree(dev);
err:
@@ -1355,10 +1183,9 @@ static int af9033_remove(struct i2c_client *client)
{
struct af9033_dev *dev = i2c_get_clientdata(client);
- dev_dbg(&dev->client->dev, "\n");
+ dev_dbg(&client->dev, "\n");
- dev->fe.ops.release = NULL;
- dev->fe.demodulator_priv = NULL;
+ regmap_exit(dev->regmap);
kfree(dev);
return 0;
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
index 5b83e4f96297..8193f9805c4f 100644
--- a/drivers/media/dvb-frontends/af9033.h
+++ b/drivers/media/dvb-frontends/af9033.h
@@ -13,18 +13,13 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef AF9033_H
#define AF9033_H
/*
- * I2C address (TODO: are these in 8-bit format?)
- * 0x38, 0x3a, 0x3c, 0x3e
+ * I2C address: 0x1c, 0x1d, 0x1e, 0x1f
*/
struct af9033_config {
/*
@@ -88,6 +83,12 @@ struct af9033_config {
* returned by that driver
*/
struct dvb_frontend **fe;
+
+ /*
+ * regmap for IT913x integrated tuner driver
+ * returned by that driver
+ */
+ struct regmap *regmap;
};
struct af9033_ops {
diff --git a/drivers/media/dvb-frontends/af9033_priv.h b/drivers/media/dvb-frontends/af9033_priv.h
index 8e23275148ed..8799cda1ae14 100644
--- a/drivers/media/dvb-frontends/af9033_priv.h
+++ b/drivers/media/dvb-frontends/af9033_priv.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef AF9033_PRIV_H
@@ -25,6 +21,9 @@
#include "dvb_frontend.h"
#include "af9033.h"
#include <linux/math64.h>
+#include <linux/regmap.h>
+#include <linux/kernel.h>
+#include "dvb_math.h"
struct reg_val {
u32 reg;
@@ -68,7 +67,7 @@ static const struct clock_adc clock_adc_lut[] = {
{ 12000000, 20250000 },
};
-/* pre-calculated coeff lookup table */
+/* Pre-calculated coeff lookup table */
static const struct coeff coeff_lut[] = {
/* 12.000 MHz */
{ 12000000, 8000000, {
@@ -91,102 +90,9 @@ static const struct coeff coeff_lut[] = {
},
};
-/* QPSK SNR lookup table */
-static const struct val_snr qpsk_snr_lut[] = {
- { 0x0b4771, 0 },
- { 0x0c1aed, 1 },
- { 0x0d0d27, 2 },
- { 0x0e4d19, 3 },
- { 0x0e5da8, 4 },
- { 0x107097, 5 },
- { 0x116975, 6 },
- { 0x1252d9, 7 },
- { 0x131fa4, 8 },
- { 0x13d5e1, 9 },
- { 0x148e53, 10 },
- { 0x15358b, 11 },
- { 0x15dd29, 12 },
- { 0x168112, 13 },
- { 0x170b61, 14 },
- { 0x17a532, 15 },
- { 0x180f94, 16 },
- { 0x186ed2, 17 },
- { 0x18b271, 18 },
- { 0x18e118, 19 },
- { 0x18ff4b, 20 },
- { 0x190af1, 21 },
- { 0x191451, 22 },
- { 0xffffff, 23 },
-};
-
-/* QAM16 SNR lookup table */
-static const struct val_snr qam16_snr_lut[] = {
- { 0x04f0d5, 0 },
- { 0x05387a, 1 },
- { 0x0573a4, 2 },
- { 0x05a99e, 3 },
- { 0x05cc80, 4 },
- { 0x05eb62, 5 },
- { 0x05fecf, 6 },
- { 0x060b80, 7 },
- { 0x062501, 8 },
- { 0x064865, 9 },
- { 0x069604, 10 },
- { 0x06f356, 11 },
- { 0x07706a, 12 },
- { 0x0804d3, 13 },
- { 0x089d1a, 14 },
- { 0x093e3d, 15 },
- { 0x09e35d, 16 },
- { 0x0a7c3c, 17 },
- { 0x0afaf8, 18 },
- { 0x0b719d, 19 },
- { 0x0bda6a, 20 },
- { 0x0c0c75, 21 },
- { 0x0c3f7d, 22 },
- { 0x0c5e62, 23 },
- { 0x0c6c31, 24 },
- { 0x0c7925, 25 },
- { 0xffffff, 26 },
-};
-
-/* QAM64 SNR lookup table */
-static const struct val_snr qam64_snr_lut[] = {
- { 0x0256d0, 0 },
- { 0x027a65, 1 },
- { 0x029873, 2 },
- { 0x02b7fe, 3 },
- { 0x02cf1e, 4 },
- { 0x02e234, 5 },
- { 0x02f409, 6 },
- { 0x030046, 7 },
- { 0x030844, 8 },
- { 0x030a02, 9 },
- { 0x030cde, 10 },
- { 0x031031, 11 },
- { 0x03144c, 12 },
- { 0x0315dd, 13 },
- { 0x031920, 14 },
- { 0x0322d0, 15 },
- { 0x0339fc, 16 },
- { 0x0364a1, 17 },
- { 0x038bcc, 18 },
- { 0x03c7d3, 19 },
- { 0x0408cc, 20 },
- { 0x043bed, 21 },
- { 0x048061, 22 },
- { 0x04be95, 23 },
- { 0x04fa7d, 24 },
- { 0x052405, 25 },
- { 0x05570d, 26 },
- { 0x059feb, 27 },
- { 0x05bf38, 28 },
- { 0x05f78f, 29 },
- { 0x0612c3, 30 },
- { 0x0626be, 31 },
- { 0xffffff, 32 },
-};
-
+/*
+ * Afatech AF9033 demod init
+ */
static const struct reg_val ofsm_init[] = {
{ 0x800051, 0x01 },
{ 0x800070, 0x0a },
@@ -298,8 +204,10 @@ static const struct reg_val ofsm_init[] = {
{ 0x80fd8b, 0x00 },
};
-/* Infineon TUA 9001 tuner init
- AF9033_TUNER_TUA9001 = 0x27 */
+/*
+ * Infineon TUA 9001 tuner init
+ * AF9033_TUNER_TUA9001 = 0x27
+ */
static const struct reg_val tuner_init_tua9001[] = {
{ 0x800046, 0x27 },
{ 0x800057, 0x00 },
@@ -340,8 +248,10 @@ static const struct reg_val tuner_init_tua9001[] = {
{ 0x80f1e6, 0x00 },
};
-/* Fitipower fc0011 tuner init
- AF9033_TUNER_FC0011 = 0x28 */
+/*
+ * Fitipower FC0011 tuner init
+ * AF9033_TUNER_FC0011 = 0x28
+ */
static const struct reg_val tuner_init_fc0011[] = {
{ 0x800046, 0x28 },
{ 0x800057, 0x00 },
@@ -401,8 +311,10 @@ static const struct reg_val tuner_init_fc0011[] = {
{ 0x80f1e6, 0x00 },
};
-/* Fitipower FC0012 tuner init
- AF9033_TUNER_FC0012 = 0x2e */
+/*
+ * Fitipower FC0012 tuner init
+ * AF9033_TUNER_FC0012 = 0x2e
+ */
static const struct reg_val tuner_init_fc0012[] = {
{ 0x800046, 0x2e },
{ 0x800057, 0x00 },
@@ -444,8 +356,10 @@ static const struct reg_val tuner_init_fc0012[] = {
{ 0x80f1e6, 0x00 },
};
-/* MaxLinear MxL5007T tuner init
- AF9033_TUNER_MXL5007T = 0xa0 */
+/*
+ * MaxLinear MxL5007T tuner init
+ * AF9033_TUNER_MXL5007T = 0xa0
+ */
static const struct reg_val tuner_init_mxl5007t[] = {
{ 0x800046, 0x1b },
{ 0x800057, 0x01 },
@@ -479,8 +393,10 @@ static const struct reg_val tuner_init_mxl5007t[] = {
{ 0x80f1e6, 0x00 },
};
-/* NXP TDA 18218HN tuner init
- AF9033_TUNER_TDA18218 = 0xa1 */
+/*
+ * NXP TDA18218HN tuner init
+ * AF9033_TUNER_TDA18218 = 0xa1
+ */
static const struct reg_val tuner_init_tda18218[] = {
{0x800046, 0xa1},
{0x800057, 0x01},
@@ -513,7 +429,10 @@ static const struct reg_val tuner_init_tda18218[] = {
{0x80f1e6, 0x00},
};
-/* FCI FC2580 tuner init */
+/*
+ * FCI FC2580 tuner init
+ * AF9033_TUNER_FC2580 = 0x32
+ */
static const struct reg_val tuner_init_fc2580[] = {
{ 0x800046, 0x32 },
{ 0x800057, 0x01 },
@@ -551,6 +470,9 @@ static const struct reg_val tuner_init_fc2580[] = {
{ 0x80f1e6, 0x01 },
};
+/*
+ * IT9133 AX demod init
+ */
static const struct reg_val ofsm_init_it9135_v1[] = {
{ 0x800051, 0x01 },
{ 0x800070, 0x0a },
@@ -662,8 +584,10 @@ static const struct reg_val ofsm_init_it9135_v1[] = {
{ 0x80fd8b, 0x00 },
};
-/* ITE Tech IT9135 Omega tuner init
- AF9033_TUNER_IT9135_38 = 0x38 */
+/*
+ * ITE Tech IT9133 AX Omega tuner init
+ * AF9033_TUNER_IT9135_38 = 0x38
+ */
static const struct reg_val tuner_init_it9135_38[] = {
{ 0x800043, 0x00 },
{ 0x800046, 0x38 },
@@ -879,8 +803,10 @@ static const struct reg_val tuner_init_it9135_38[] = {
{ 0x80fd8b, 0x00 },
};
-/* ITE Tech IT9135 Omega LNA config 1 tuner init
- AF9033_TUNER_IT9135_51 = 0x51 */
+/*
+ * ITE Tech IT9133 AX Omega LNA config 1 tuner init
+ * AF9033_TUNER_IT9135_51 = 0x51
+ */
static const struct reg_val tuner_init_it9135_51[] = {
{ 0x800043, 0x00 },
{ 0x800046, 0x51 },
@@ -1096,8 +1022,10 @@ static const struct reg_val tuner_init_it9135_51[] = {
{ 0x80fd8b, 0x00 },
};
-/* ITE Tech IT9135 Omega LNA config 2 tuner init
- AF9033_TUNER_IT9135_52 = 0x52 */
+/*
+ * ITE Tech IT9133 AX Omega LNA config 2 tuner init
+ * AF9033_TUNER_IT9135_52 = 0x52
+ */
static const struct reg_val tuner_init_it9135_52[] = {
{ 0x800043, 0x00 },
{ 0x800046, 0x52 },
@@ -1313,6 +1241,9 @@ static const struct reg_val tuner_init_it9135_52[] = {
{ 0x80fd8b, 0x00 },
};
+/*
+ * ITE Tech IT9133 BX demod init
+ */
static const struct reg_val ofsm_init_it9135_v2[] = {
{ 0x800051, 0x01 },
{ 0x800070, 0x0a },
@@ -1411,8 +1342,10 @@ static const struct reg_val ofsm_init_it9135_v2[] = {
{ 0x80fd8b, 0x00 },
};
-/* ITE Tech IT9135 Omega v2 tuner init
- AF9033_TUNER_IT9135_60 = 0x60 */
+/*
+ * ITE Tech IT9133 BX Omega tuner init
+ * AF9033_TUNER_IT9135_60 = 0x60
+ */
static const struct reg_val tuner_init_it9135_60[] = {
{ 0x800043, 0x00 },
{ 0x800046, 0x60 },
@@ -1625,8 +1558,10 @@ static const struct reg_val tuner_init_it9135_60[] = {
{ 0x80fd8b, 0x00 },
};
-/* ITE Tech IT9135 Omega v2 LNA config 1 tuner init
- AF9033_TUNER_IT9135_61 = 0x61 */
+/*
+ * ITE Tech IT9133 BX Omega LNA config 1 tuner init
+ * AF9033_TUNER_IT9135_61 = 0x61
+ */
static const struct reg_val tuner_init_it9135_61[] = {
{ 0x800043, 0x00 },
{ 0x800046, 0x61 },
@@ -1839,8 +1774,10 @@ static const struct reg_val tuner_init_it9135_61[] = {
{ 0x80fd8b, 0x00 },
};
-/* ITE Tech IT9135 Omega v2 LNA config 2 tuner init
- AF9033_TUNER_IT9135_62 = 0x62 */
+/*
+ * ITE Tech IT9133 BX Omega LNA config 2 tuner init
+ * AF9033_TUNER_IT9135_62 = 0x62
+ */
static const struct reg_val tuner_init_it9135_62[] = {
{ 0x800043, 0x00 },
{ 0x800046, 0x62 },
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index 07ce05578278..05850b32d6c6 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <asm/div64.h>
diff --git a/drivers/media/dvb-frontends/atbm8830.h b/drivers/media/dvb-frontends/atbm8830.h
index bb862387080f..e146d394f4ed 100644
--- a/drivers/media/dvb-frontends/atbm8830.h
+++ b/drivers/media/dvb-frontends/atbm8830.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ATBM8830_H__
diff --git a/drivers/media/dvb-frontends/atbm8830_priv.h b/drivers/media/dvb-frontends/atbm8830_priv.h
index d460058d497e..f1399451d1b0 100644
--- a/drivers/media/dvb-frontends/atbm8830_priv.h
+++ b/drivers/media/dvb-frontends/atbm8830_priv.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ATBM8830_PRIV_H
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index add246382806..a2e771305008 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
/* Developer notes:
diff --git a/drivers/media/dvb-frontends/bcm3510.h b/drivers/media/dvb-frontends/bcm3510.h
index 961c2eb87c68..b6a2d62de379 100644
--- a/drivers/media/dvb-frontends/bcm3510.h
+++ b/drivers/media/dvb-frontends/bcm3510.h
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef BCM3510_H
#define BCM3510_H
diff --git a/drivers/media/dvb-frontends/bcm3510_priv.h b/drivers/media/dvb-frontends/bcm3510_priv.h
index 67f24686c31b..475e8381bf13 100644
--- a/drivers/media/dvb-frontends/bcm3510_priv.h
+++ b/drivers/media/dvb-frontends/bcm3510_priv.h
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __BCM3510_PRIV_H__
#define __BCM3510_PRIV_H__
diff --git a/drivers/media/dvb-frontends/bsbe1-d01a.h b/drivers/media/dvb-frontends/bsbe1-d01a.h
index baaf89e768cf..1d6e8d33cd92 100644
--- a/drivers/media/dvb-frontends/bsbe1-d01a.h
+++ b/drivers/media/dvb-frontends/bsbe1-d01a.h
@@ -14,11 +14,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/bsbe1.h b/drivers/media/dvb-frontends/bsbe1.h
index 4ad766154741..cb7cb2c5b977 100644
--- a/drivers/media/dvb-frontends/bsbe1.h
+++ b/drivers/media/dvb-frontends/bsbe1.h
@@ -12,11 +12,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/bsru6.h b/drivers/media/dvb-frontends/bsru6.h
index 275c1782597d..1c203eb27491 100644
--- a/drivers/media/dvb-frontends/bsru6.h
+++ b/drivers/media/dvb-frontends/bsru6.h
@@ -12,11 +12,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index db44ebb7c561..0118c2658cf7 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
diff --git a/drivers/media/dvb-frontends/cx24113.h b/drivers/media/dvb-frontends/cx24113.h
index 194c703611b4..f013aca3a691 100644
--- a/drivers/media/dvb-frontends/cx24113.h
+++ b/drivers/media/dvb-frontends/cx24113.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef CX24113_H
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 8aed8cc9f93d..4ae3d922a8e8 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
@@ -653,7 +649,7 @@ static int cx24123_pll_tune(struct dvb_frontend *fe)
dprintk("frequency=%i\n", p->frequency);
if (cx24123_pll_calculate(fe) != 0) {
- err("%s: cx24123_pll_calcutate failed\n", __func__);
+ err("%s: cx24123_pll_calculate failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 95267c6edb3a..f6ebbb47b9b2 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -615,6 +615,7 @@ static int cxd2820r_probe(struct i2c_client *client,
}
priv->client[0] = client;
+ priv->fe.demodulator_priv = priv;
priv->i2c = client->adapter;
priv->ts_mode = pdata->ts_mode;
priv->ts_clk_inv = pdata->ts_clk_inv;
@@ -697,7 +698,6 @@ static int cxd2820r_probe(struct i2c_client *client,
memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof(priv->fe.ops));
if (!pdata->attach_in_use)
priv->fe.ops.release = NULL;
- priv->fe.demodulator_priv = priv;
i2c_set_clientdata(client, priv);
/* Setup callbacks */
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index befc8172159d..d7614b8b8782 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -14,10 +14,6 @@
*
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*
* This code is more or less generated from another driver, please
* excuse some codingstyle oddities.
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index fd3b33296b15..33af14df27bd 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -14,10 +14,6 @@
*
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*
* This code is more or less generated from another driver, please
* excuse some codingstyle oddities.
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index a27c0001f2d6..3815ea515364 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -805,13 +805,19 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
return 0;
}
-static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
+static int dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
{
u32 internal = dib7000p_get_internal_freq(state);
- s32 unit_khz_dds_val = 67108864 / (internal); /* 2**26 / Fsampling is the unit 1KHz offset */
+ s32 unit_khz_dds_val;
u32 abs_offset_khz = ABS(offset_khz);
u32 dds = state->cfg.bw->ifreq & 0x1ffffff;
u8 invert = !!(state->cfg.bw->ifreq & (1 << 25));
+ if (internal == 0) {
+ pr_warn("DIB7000P: dib7000p_get_internal_freq returned 0\n");
+ return -1;
+ }
+ /* 2**26 / Fsampling is the unit 1KHz offset */
+ unit_khz_dds_val = 67108864 / (internal);
dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d\n", offset_khz, internal, invert);
@@ -828,6 +834,7 @@ static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
dib7000p_write_word(state, 21, (u16) (((dds >> 16) & 0x1ff) | (0 << 10) | (invert << 9)));
dib7000p_write_word(state, 22, (u16) (dds & 0xffff));
}
+ return 0;
}
static int dib7000p_agc_startup(struct dvb_frontend *demod)
@@ -867,7 +874,9 @@ static int dib7000p_agc_startup(struct dvb_frontend *demod)
frequency_offset = (s32)frequency_tuner / 1000 - ch->frequency / 1000;
}
- dib7000p_set_dds(state, frequency_offset);
+ if (dib7000p_set_dds(state, frequency_offset) < 0)
+ return -1;
+
ret = 7;
(*agc_state)++;
break;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h b/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
index 8188062953af..11e1ddeeef0a 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx39xxj.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef DRX39XXJ_H
diff --git a/drivers/media/dvb-frontends/drxd.h b/drivers/media/dvb-frontends/drxd.h
index f0507cdbb503..1d4b89488ac4 100644
--- a/drivers/media/dvb-frontends/drxd.h
+++ b/drivers/media/dvb-frontends/drxd.h
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef _DRXD_H_
diff --git a/drivers/media/dvb-frontends/drxd_firm.c b/drivers/media/dvb-frontends/drxd_firm.c
index 5418b0b1dadc..4e1d8905e06a 100644
--- a/drivers/media/dvb-frontends/drxd_firm.c
+++ b/drivers/media/dvb-frontends/drxd_firm.c
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
/* TODO: generate this file with a script from a settings file */
diff --git a/drivers/media/dvb-frontends/drxd_firm.h b/drivers/media/dvb-frontends/drxd_firm.h
index 41597e89941c..7d9f9fa7ab3c 100644
--- a/drivers/media/dvb-frontends/drxd_firm.h
+++ b/drivers/media/dvb-frontends/drxd_firm.h
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef _DRXD_FIRM_H_
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 4143f0326684..71910561005f 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/drxd_map_firm.h b/drivers/media/dvb-frontends/drxd_map_firm.h
index 6bc553abf215..8e5bd2e8de40 100644
--- a/drivers/media/dvb-frontends/drxd_map_firm.h
+++ b/drivers/media/dvb-frontends/drxd_map_firm.h
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef __DRX3973D_MAP__H__
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 146edf344dd8..15d2cac588b1 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index ef976eb23344..7bec3e028bee 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index efc3c31a7635..50b2b666ef6c 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#include <linux/module.h>
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.h b/drivers/media/dvb-frontends/dvb_dummy_fe.h
index 50f1af512b62..86dd7b9d1e57 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.h
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef DVB_DUMMY_FE_H
diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
index d97ce21e26e1..fa2a96d5f94e 100644
--- a/drivers/media/dvb-frontends/ec100.c
+++ b/drivers/media/dvb-frontends/ec100.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/ec100.h b/drivers/media/dvb-frontends/ec100.h
index e894bdcf35a3..e43fe26654b2 100644
--- a/drivers/media/dvb-frontends/ec100.h
+++ b/drivers/media/dvb-frontends/ec100.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef EC100_H
diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c
deleted file mode 100644
index 8b53633cf325..000000000000
--- a/drivers/media/dvb-frontends/hd29l2.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- * HDIC HD29L2 DMB-TH demodulator driver
- *
- * Copyright (C) 2011 Metropolia University of Applied Sciences, Electria R&D
- *
- * Author: Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "hd29l2_priv.h"
-
-#define HD29L2_MAX_LEN (3)
-
-/* write multiple registers */
-static int hd29l2_wr_regs(struct hd29l2_priv *priv, u8 reg, u8 *val, int len)
-{
- int ret;
- u8 buf[2 + HD29L2_MAX_LEN];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->cfg.i2c_addr,
- .flags = 0,
- .len = 2 + len,
- .buf = buf,
- }
- };
-
- if (len > HD29L2_MAX_LEN)
- return -EINVAL;
- buf[0] = 0x00;
- buf[1] = reg;
- memcpy(&buf[2], val, len);
-
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* read multiple registers */
-static int hd29l2_rd_regs(struct hd29l2_priv *priv, u8 reg, u8 *val, int len)
-{
- int ret;
- u8 buf[2] = { 0x00, reg };
- struct i2c_msg msg[2] = {
- {
- .addr = priv->cfg.i2c_addr,
- .flags = 0,
- .len = 2,
- .buf = buf,
- }, {
- .addr = priv->cfg.i2c_addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = val,
- }
- };
-
- ret = i2c_transfer(priv->i2c, msg, 2);
- if (ret == 2) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c rd failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* write single register */
-static int hd29l2_wr_reg(struct hd29l2_priv *priv, u8 reg, u8 val)
-{
- return hd29l2_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-static int hd29l2_rd_reg(struct hd29l2_priv *priv, u8 reg, u8 *val)
-{
- return hd29l2_rd_regs(priv, reg, val, 1);
-}
-
-/* write single register with mask */
-static int hd29l2_wr_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 val, u8 mask)
-{
- int ret;
- u8 tmp;
-
- /* no need for read if whole reg is written */
- if (mask != 0xff) {
- ret = hd29l2_rd_regs(priv, reg, &tmp, 1);
- if (ret)
- return ret;
-
- val &= mask;
- tmp &= ~mask;
- val |= tmp;
- }
-
- return hd29l2_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register with mask */
-static int hd29l2_rd_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 *val, u8 mask)
-{
- int ret, i;
- u8 tmp;
-
- ret = hd29l2_rd_regs(priv, reg, &tmp, 1);
- if (ret)
- return ret;
-
- tmp &= mask;
-
- /* find position of the first bit */
- for (i = 0; i < 8; i++) {
- if ((mask >> i) & 0x01)
- break;
- }
- *val = tmp >> i;
-
- return 0;
-}
-
-static int hd29l2_soft_reset(struct hd29l2_priv *priv)
-{
- int ret;
- u8 tmp;
-
- ret = hd29l2_rd_reg(priv, 0x26, &tmp);
- if (ret)
- goto err;
-
- ret = hd29l2_wr_reg(priv, 0x26, 0x0d);
- if (ret)
- goto err;
-
- usleep_range(10000, 20000);
-
- ret = hd29l2_wr_reg(priv, 0x26, tmp);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static int hd29l2_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
- int ret, i;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- u8 tmp;
-
- dev_dbg(&priv->i2c->dev, "%s: enable=%d\n", __func__, enable);
-
- /* set tuner address for demod */
- if (!priv->tuner_i2c_addr_programmed && enable) {
- /* no need to set tuner address every time, once is enough */
- ret = hd29l2_wr_reg(priv, 0x9d, priv->cfg.tuner_i2c_addr << 1);
- if (ret)
- goto err;
-
- priv->tuner_i2c_addr_programmed = true;
- }
-
- /* open / close gate */
- ret = hd29l2_wr_reg(priv, 0x9f, enable);
- if (ret)
- goto err;
-
- /* wait demod ready */
- for (i = 10; i; i--) {
- ret = hd29l2_rd_reg(priv, 0x9e, &tmp);
- if (ret)
- goto err;
-
- if (tmp == enable)
- break;
-
- usleep_range(5000, 10000);
- }
-
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
-
- return ret;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static int hd29l2_read_status(struct dvb_frontend *fe, enum fe_status *status)
-{
- int ret;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- u8 buf[2];
-
- *status = 0;
-
- ret = hd29l2_rd_reg(priv, 0x05, &buf[0]);
- if (ret)
- goto err;
-
- if (buf[0] & 0x01) {
- /* full lock */
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
- FE_HAS_SYNC | FE_HAS_LOCK;
- } else {
- ret = hd29l2_rd_reg(priv, 0x0d, &buf[1]);
- if (ret)
- goto err;
-
- if ((buf[1] & 0xfe) == 0x78)
- /* partial lock */
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC;
- }
-
- priv->fe_status = *status;
-
- return 0;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static int hd29l2_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- int ret;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- u8 buf[2];
- u16 tmp;
-
- if (!(priv->fe_status & FE_HAS_LOCK)) {
- *snr = 0;
- ret = 0;
- goto err;
- }
-
- ret = hd29l2_rd_regs(priv, 0x0b, buf, 2);
- if (ret)
- goto err;
-
- tmp = (buf[0] << 8) | buf[1];
-
- /* report SNR in dB * 10 */
- #define LOG10_20736_24 72422627 /* log10(20736) << 24 */
- if (tmp)
- *snr = (LOG10_20736_24 - intlog10(tmp)) / ((1 << 24) / 100);
- else
- *snr = 0;
-
- return 0;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static int hd29l2_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
-{
- int ret;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- u8 buf[2];
- u16 tmp;
-
- *strength = 0;
-
- ret = hd29l2_rd_regs(priv, 0xd5, buf, 2);
- if (ret)
- goto err;
-
- tmp = buf[0] << 8 | buf[1];
- tmp = ~tmp & 0x0fff;
-
- /* scale value to 0x0000-0xffff from 0x0000-0x0fff */
- *strength = tmp * 0xffff / 0x0fff;
-
- return 0;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static int hd29l2_read_ber(struct dvb_frontend *fe, u32 *ber)
-{
- int ret;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- u8 buf[2];
-
- if (!(priv->fe_status & FE_HAS_SYNC)) {
- *ber = 0;
- ret = 0;
- goto err;
- }
-
- ret = hd29l2_rd_regs(priv, 0xd9, buf, 2);
- if (ret) {
- *ber = 0;
- goto err;
- }
-
- /* LDPC BER */
- *ber = ((buf[0] & 0x0f) << 8) | buf[1];
-
- return 0;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static int hd29l2_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
-{
- /* no way to read? */
- *ucblocks = 0;
- return 0;
-}
-
-static enum dvbfe_search hd29l2_search(struct dvb_frontend *fe)
-{
- int ret, i;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- u8 tmp, buf[3];
- u8 modulation, carrier, guard_interval, interleave, code_rate;
- u64 num64;
- u32 if_freq, if_ctl;
- bool auto_mode;
-
- dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d frequency=%d " \
- "bandwidth_hz=%d modulation=%d inversion=%d " \
- "fec_inner=%d guard_interval=%d\n", __func__,
- c->delivery_system, c->frequency, c->bandwidth_hz,
- c->modulation, c->inversion, c->fec_inner,
- c->guard_interval);
-
- /* as for now we detect always params automatically */
- auto_mode = true;
-
- /* program tuner */
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe);
-
- /* get and program IF */
- if (fe->ops.tuner_ops.get_if_frequency)
- fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
- else
- if_freq = 0;
-
- if (if_freq) {
- /* normal IF */
-
- /* calc IF control value */
- num64 = if_freq;
- num64 *= 0x800000;
- num64 = div_u64(num64, HD29L2_XTAL);
- num64 -= 0x800000;
- if_ctl = num64;
-
- tmp = 0xfc; /* tuner type normal */
- } else {
- /* zero IF */
- if_ctl = 0;
- tmp = 0xfe; /* tuner type Zero-IF */
- }
-
- buf[0] = ((if_ctl >> 0) & 0xff);
- buf[1] = ((if_ctl >> 8) & 0xff);
- buf[2] = ((if_ctl >> 16) & 0xff);
-
- /* program IF control */
- ret = hd29l2_wr_regs(priv, 0x14, buf, 3);
- if (ret)
- goto err;
-
- /* program tuner type */
- ret = hd29l2_wr_reg(priv, 0xab, tmp);
- if (ret)
- goto err;
-
- dev_dbg(&priv->i2c->dev, "%s: if_freq=%d if_ctl=%x\n",
- __func__, if_freq, if_ctl);
-
- if (auto_mode) {
- /*
- * use auto mode
- */
-
- /* disable quick mode */
- ret = hd29l2_wr_reg_mask(priv, 0xac, 0 << 7, 0x80);
- if (ret)
- goto err;
-
- ret = hd29l2_wr_reg_mask(priv, 0x82, 1 << 1, 0x02);
- if (ret)
- goto err;
-
- /* enable auto mode */
- ret = hd29l2_wr_reg_mask(priv, 0x7d, 1 << 6, 0x40);
- if (ret)
- goto err;
-
- ret = hd29l2_wr_reg_mask(priv, 0x81, 1 << 3, 0x08);
- if (ret)
- goto err;
-
- /* soft reset */
- ret = hd29l2_soft_reset(priv);
- if (ret)
- goto err;
-
- /* detect modulation */
- for (i = 30; i; i--) {
- msleep(100);
-
- ret = hd29l2_rd_reg(priv, 0x0d, &tmp);
- if (ret)
- goto err;
-
- if ((((tmp & 0xf0) >= 0x10) &&
- ((tmp & 0x0f) == 0x08)) || (tmp >= 0x2c))
- break;
- }
-
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
-
- if (i == 0)
- /* detection failed */
- return DVBFE_ALGO_SEARCH_FAILED;
-
- /* read modulation */
- ret = hd29l2_rd_reg_mask(priv, 0x7d, &modulation, 0x07);
- if (ret)
- goto err;
- } else {
- /*
- * use manual mode
- */
-
- modulation = HD29L2_QAM64;
- carrier = HD29L2_CARRIER_MULTI;
- guard_interval = HD29L2_PN945;
- interleave = HD29L2_INTERLEAVER_420;
- code_rate = HD29L2_CODE_RATE_08;
-
- tmp = (code_rate << 3) | modulation;
- ret = hd29l2_wr_reg_mask(priv, 0x7d, tmp, 0x5f);
- if (ret)
- goto err;
-
- tmp = (carrier << 2) | guard_interval;
- ret = hd29l2_wr_reg_mask(priv, 0x81, tmp, 0x0f);
- if (ret)
- goto err;
-
- tmp = interleave;
- ret = hd29l2_wr_reg_mask(priv, 0x82, tmp, 0x03);
- if (ret)
- goto err;
- }
-
- /* ensure modulation validy */
- /* 0=QAM4_NR, 1=QAM4, 2=QAM16, 3=QAM32, 4=QAM64 */
- if (modulation > (ARRAY_SIZE(reg_mod_vals_tab[0].val) - 1)) {
- dev_dbg(&priv->i2c->dev, "%s: modulation=%d not valid\n",
- __func__, modulation);
- goto err;
- }
-
- /* program registers according to modulation */
- for (i = 0; i < ARRAY_SIZE(reg_mod_vals_tab); i++) {
- ret = hd29l2_wr_reg(priv, reg_mod_vals_tab[i].reg,
- reg_mod_vals_tab[i].val[modulation]);
- if (ret)
- goto err;
- }
-
- /* read guard interval */
- ret = hd29l2_rd_reg_mask(priv, 0x81, &guard_interval, 0x03);
- if (ret)
- goto err;
-
- /* read carrier mode */
- ret = hd29l2_rd_reg_mask(priv, 0x81, &carrier, 0x04);
- if (ret)
- goto err;
-
- dev_dbg(&priv->i2c->dev,
- "%s: modulation=%d guard_interval=%d carrier=%d\n",
- __func__, modulation, guard_interval, carrier);
-
- if ((carrier == HD29L2_CARRIER_MULTI) && (modulation == HD29L2_QAM64) &&
- (guard_interval == HD29L2_PN945)) {
- dev_dbg(&priv->i2c->dev, "%s: C=3780 && QAM64 && PN945\n",
- __func__);
-
- ret = hd29l2_wr_reg(priv, 0x42, 0x33);
- if (ret)
- goto err;
-
- ret = hd29l2_wr_reg(priv, 0xdd, 0x01);
- if (ret)
- goto err;
- }
-
- usleep_range(10000, 20000);
-
- /* soft reset */
- ret = hd29l2_soft_reset(priv);
- if (ret)
- goto err;
-
- /* wait demod lock */
- for (i = 30; i; i--) {
- msleep(100);
-
- /* read lock bit */
- ret = hd29l2_rd_reg_mask(priv, 0x05, &tmp, 0x01);
- if (ret)
- goto err;
-
- if (tmp)
- break;
- }
-
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
-
- if (i == 0)
- return DVBFE_ALGO_SEARCH_AGAIN;
-
- return DVBFE_ALGO_SEARCH_SUCCESS;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return DVBFE_ALGO_SEARCH_ERROR;
-}
-
-static int hd29l2_get_frontend_algo(struct dvb_frontend *fe)
-{
- return DVBFE_ALGO_CUSTOM;
-}
-
-static int hd29l2_get_frontend(struct dvb_frontend *fe,
- struct dtv_frontend_properties *c)
-{
- int ret;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- u8 buf[3];
- u32 if_ctl;
- char *str_constellation, *str_code_rate, *str_constellation_code_rate,
- *str_guard_interval, *str_carrier, *str_guard_interval_carrier,
- *str_interleave, *str_interleave_;
-
- ret = hd29l2_rd_reg(priv, 0x7d, &buf[0]);
- if (ret)
- goto err;
-
- ret = hd29l2_rd_regs(priv, 0x81, &buf[1], 2);
- if (ret)
- goto err;
-
- /* constellation, 0x7d[2:0] */
- switch ((buf[0] >> 0) & 0x07) {
- case 0: /* QAM4NR */
- str_constellation = "QAM4NR";
- c->modulation = QAM_AUTO; /* FIXME */
- break;
- case 1: /* QAM4 */
- str_constellation = "QAM4";
- c->modulation = QPSK; /* FIXME */
- break;
- case 2:
- str_constellation = "QAM16";
- c->modulation = QAM_16;
- break;
- case 3:
- str_constellation = "QAM32";
- c->modulation = QAM_32;
- break;
- case 4:
- str_constellation = "QAM64";
- c->modulation = QAM_64;
- break;
- default:
- str_constellation = "?";
- }
-
- /* LDPC code rate, 0x7d[4:3] */
- switch ((buf[0] >> 3) & 0x03) {
- case 0: /* 0.4 */
- str_code_rate = "0.4";
- c->fec_inner = FEC_AUTO; /* FIXME */
- break;
- case 1: /* 0.6 */
- str_code_rate = "0.6";
- c->fec_inner = FEC_3_5;
- break;
- case 2: /* 0.8 */
- str_code_rate = "0.8";
- c->fec_inner = FEC_4_5;
- break;
- default:
- str_code_rate = "?";
- }
-
- /* constellation & code rate set, 0x7d[6] */
- switch ((buf[0] >> 6) & 0x01) {
- case 0:
- str_constellation_code_rate = "manual";
- break;
- case 1:
- str_constellation_code_rate = "auto";
- break;
- default:
- str_constellation_code_rate = "?";
- }
-
- /* frame header, 0x81[1:0] */
- switch ((buf[1] >> 0) & 0x03) {
- case 0: /* PN945 */
- str_guard_interval = "PN945";
- c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
- break;
- case 1: /* PN595 */
- str_guard_interval = "PN595";
- c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
- break;
- case 2: /* PN420 */
- str_guard_interval = "PN420";
- c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
- break;
- default:
- str_guard_interval = "?";
- }
-
- /* carrier, 0x81[2] */
- switch ((buf[1] >> 2) & 0x01) {
- case 0:
- str_carrier = "C=1";
- break;
- case 1:
- str_carrier = "C=3780";
- break;
- default:
- str_carrier = "?";
- }
-
- /* frame header & carrier set, 0x81[3] */
- switch ((buf[1] >> 3) & 0x01) {
- case 0:
- str_guard_interval_carrier = "manual";
- break;
- case 1:
- str_guard_interval_carrier = "auto";
- break;
- default:
- str_guard_interval_carrier = "?";
- }
-
- /* interleave, 0x82[0] */
- switch ((buf[2] >> 0) & 0x01) {
- case 0:
- str_interleave = "M=720";
- break;
- case 1:
- str_interleave = "M=240";
- break;
- default:
- str_interleave = "?";
- }
-
- /* interleave set, 0x82[1] */
- switch ((buf[2] >> 1) & 0x01) {
- case 0:
- str_interleave_ = "manual";
- break;
- case 1:
- str_interleave_ = "auto";
- break;
- default:
- str_interleave_ = "?";
- }
-
- /*
- * We can read out current detected NCO and use that value next
- * time instead of calculating new value from targed IF.
- * I think it will not effect receiver sensitivity but gaining lock
- * after tune could be easier...
- */
- ret = hd29l2_rd_regs(priv, 0xb1, &buf[0], 3);
- if (ret)
- goto err;
-
- if_ctl = (buf[0] << 16) | ((buf[1] - 7) << 8) | buf[2];
-
- dev_dbg(&priv->i2c->dev, "%s: %s %s %s | %s %s %s | %s %s | NCO=%06x\n",
- __func__, str_constellation, str_code_rate,
- str_constellation_code_rate, str_guard_interval,
- str_carrier, str_guard_interval_carrier, str_interleave,
- str_interleave_, if_ctl);
- return 0;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static int hd29l2_init(struct dvb_frontend *fe)
-{
- int ret, i;
- struct hd29l2_priv *priv = fe->demodulator_priv;
- u8 tmp;
- static const struct reg_val tab[] = {
- { 0x3a, 0x06 },
- { 0x3b, 0x03 },
- { 0x3c, 0x04 },
- { 0xaf, 0x06 },
- { 0xb0, 0x1b },
- { 0x80, 0x64 },
- { 0x10, 0x38 },
- };
-
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
-
- /* reset demod */
- /* it is recommended to HW reset chip using RST_N pin */
- if (fe->callback) {
- ret = fe->callback(fe, DVB_FRONTEND_COMPONENT_DEMOD, 0, 0);
- if (ret)
- goto err;
-
- /* reprogramming needed because HW reset clears registers */
- priv->tuner_i2c_addr_programmed = false;
- }
-
- /* init */
- for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = hd29l2_wr_reg(priv, tab[i].reg, tab[i].val);
- if (ret)
- goto err;
- }
-
- /* TS params */
- ret = hd29l2_rd_reg(priv, 0x36, &tmp);
- if (ret)
- goto err;
-
- tmp &= 0x1b;
- tmp |= priv->cfg.ts_mode;
- ret = hd29l2_wr_reg(priv, 0x36, tmp);
- if (ret)
- goto err;
-
- ret = hd29l2_rd_reg(priv, 0x31, &tmp);
- tmp &= 0xef;
-
- if (!(priv->cfg.ts_mode >> 7))
- /* set b4 for serial TS */
- tmp |= 0x10;
-
- ret = hd29l2_wr_reg(priv, 0x31, tmp);
- if (ret)
- goto err;
-
- return ret;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static void hd29l2_release(struct dvb_frontend *fe)
-{
- struct hd29l2_priv *priv = fe->demodulator_priv;
- kfree(priv);
-}
-
-static const struct dvb_frontend_ops hd29l2_ops;
-
-struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
- struct i2c_adapter *i2c)
-{
- int ret;
- struct hd29l2_priv *priv = NULL;
- u8 tmp;
-
- /* allocate memory for the internal state */
- priv = kzalloc(sizeof(struct hd29l2_priv), GFP_KERNEL);
- if (priv == NULL)
- goto err;
-
- /* setup the state */
- priv->i2c = i2c;
- memcpy(&priv->cfg, config, sizeof(struct hd29l2_config));
-
-
- /* check if the demod is there */
- ret = hd29l2_rd_reg(priv, 0x00, &tmp);
- if (ret)
- goto err;
-
- /* create dvb_frontend */
- memcpy(&priv->fe.ops, &hd29l2_ops, sizeof(struct dvb_frontend_ops));
- priv->fe.demodulator_priv = priv;
-
- return &priv->fe;
-err:
- kfree(priv);
- return NULL;
-}
-EXPORT_SYMBOL(hd29l2_attach);
-
-static const struct dvb_frontend_ops hd29l2_ops = {
- .delsys = { SYS_DVBT },
- .info = {
- .name = "HDIC HD29L2 DMB-TH",
- .frequency_min = 474000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 10000,
- .caps = FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_QAM_16 |
- FE_CAN_QAM_32 |
- FE_CAN_QAM_64 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_BANDWIDTH_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO |
- FE_CAN_RECOVER
- },
-
- .release = hd29l2_release,
-
- .init = hd29l2_init,
-
- .get_frontend_algo = hd29l2_get_frontend_algo,
- .search = hd29l2_search,
- .get_frontend = hd29l2_get_frontend,
-
- .read_status = hd29l2_read_status,
- .read_snr = hd29l2_read_snr,
- .read_signal_strength = hd29l2_read_signal_strength,
- .read_ber = hd29l2_read_ber,
- .read_ucblocks = hd29l2_read_ucblocks,
-
- .i2c_gate_ctrl = hd29l2_i2c_gate_ctrl,
-};
-
-MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
-MODULE_DESCRIPTION("HDIC HD29L2 DMB-TH demodulator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/hd29l2.h b/drivers/media/dvb-frontends/hd29l2.h
deleted file mode 100644
index a14d6f36dbf6..000000000000
--- a/drivers/media/dvb-frontends/hd29l2.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * HDIC HD29L2 DMB-TH demodulator driver
- *
- * Copyright (C) 2011 Metropolia University of Applied Sciences, Electria R&D
- *
- * Author: Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef HD29L2_H
-#define HD29L2_H
-
-#include <linux/dvb/frontend.h>
-
-struct hd29l2_config {
- /*
- * demodulator I2C address
- */
- u8 i2c_addr;
-
- /*
- * tuner I2C address
- * only needed when tuner is behind demod I2C-gate
- */
- u8 tuner_i2c_addr;
-
- /*
- * TS settings
- */
-#define HD29L2_TS_SERIAL 0x00
-#define HD29L2_TS_PARALLEL 0x80
-#define HD29L2_TS_CLK_NORMAL 0x40
-#define HD29L2_TS_CLK_INVERTED 0x00
-#define HD29L2_TS_CLK_GATED 0x20
-#define HD29L2_TS_CLK_FREE 0x00
- u8 ts_mode;
-};
-
-
-#if IS_REACHABLE(CONFIG_DVB_HD29L2)
-extern struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
- struct i2c_adapter *i2c);
-#else
-static inline struct dvb_frontend *hd29l2_attach(
-const struct hd29l2_config *config, struct i2c_adapter *i2c)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
-
-#endif /* HD29L2_H */
diff --git a/drivers/media/dvb-frontends/hd29l2_priv.h b/drivers/media/dvb-frontends/hd29l2_priv.h
deleted file mode 100644
index 6dc225c4bc91..000000000000
--- a/drivers/media/dvb-frontends/hd29l2_priv.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * HDIC HD29L2 DMB-TH demodulator driver
- *
- * Copyright (C) 2011 Metropolia University of Applied Sciences, Electria R&D
- *
- * Author: Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef HD29L2_PRIV
-#define HD29L2_PRIV
-
-#include <linux/dvb/version.h>
-#include "dvb_frontend.h"
-#include "dvb_math.h"
-#include "hd29l2.h"
-
-#define HD29L2_XTAL 30400000 /* Hz */
-
-
-#define HD29L2_QAM4NR 0x00
-#define HD29L2_QAM4 0x01
-#define HD29L2_QAM16 0x02
-#define HD29L2_QAM32 0x03
-#define HD29L2_QAM64 0x04
-
-#define HD29L2_CODE_RATE_04 0x00
-#define HD29L2_CODE_RATE_06 0x08
-#define HD29L2_CODE_RATE_08 0x10
-
-#define HD29L2_PN945 0x00
-#define HD29L2_PN595 0x01
-#define HD29L2_PN420 0x02
-
-#define HD29L2_CARRIER_SINGLE 0x00
-#define HD29L2_CARRIER_MULTI 0x01
-
-#define HD29L2_INTERLEAVER_720 0x00
-#define HD29L2_INTERLEAVER_420 0x01
-
-struct reg_val {
- u8 reg;
- u8 val;
-};
-
-struct reg_mod_vals {
- u8 reg;
- u8 val[5];
-};
-
-struct hd29l2_priv {
- struct i2c_adapter *i2c;
- struct dvb_frontend fe;
- struct hd29l2_config cfg;
- u8 tuner_i2c_addr_programmed:1;
-
- enum fe_status fe_status;
-};
-
-static const struct reg_mod_vals reg_mod_vals_tab[] = {
- /* REG, QAM4NR, QAM4,QAM16,QAM32,QAM64 */
- { 0x01, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
- { 0x02, { 0x07, 0x07, 0x07, 0x07, 0x07 } },
- { 0x03, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
- { 0x04, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x05, { 0x61, 0x60, 0x60, 0x61, 0x60 } },
- { 0x06, { 0xff, 0xff, 0xff, 0xff, 0xff } },
- { 0x07, { 0xff, 0xff, 0xff, 0xff, 0xff } },
- { 0x08, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x09, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x0a, { 0x15, 0x15, 0x03, 0x03, 0x03 } },
- { 0x0d, { 0x78, 0x78, 0x88, 0x78, 0x78 } },
- { 0x0e, { 0xa0, 0x90, 0xa0, 0xa0, 0xa0 } },
- { 0x0f, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x10, { 0xa0, 0xa0, 0x58, 0x38, 0x38 } },
- { 0x11, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x12, { 0x5a, 0x5a, 0x5a, 0x5a, 0x5a } },
- { 0x13, { 0xa2, 0xa2, 0xa2, 0xa2, 0xa2 } },
- { 0x17, { 0x40, 0x40, 0x40, 0x40, 0x40 } },
- { 0x18, { 0x21, 0x21, 0x42, 0x52, 0x42 } },
- { 0x19, { 0x21, 0x21, 0x62, 0x72, 0x62 } },
- { 0x1a, { 0x32, 0x43, 0xa9, 0xb9, 0xa9 } },
- { 0x1b, { 0x32, 0x43, 0xb9, 0xd8, 0xb9 } },
- { 0x1c, { 0x02, 0x02, 0x03, 0x02, 0x03 } },
- { 0x1d, { 0x0c, 0x0c, 0x01, 0x02, 0x02 } },
- { 0x1e, { 0x02, 0x02, 0x02, 0x01, 0x02 } },
- { 0x1f, { 0x02, 0x02, 0x01, 0x02, 0x04 } },
- { 0x20, { 0x01, 0x02, 0x01, 0x01, 0x01 } },
- { 0x21, { 0x08, 0x08, 0x0a, 0x0a, 0x0a } },
- { 0x22, { 0x06, 0x06, 0x04, 0x05, 0x05 } },
- { 0x23, { 0x06, 0x06, 0x05, 0x03, 0x05 } },
- { 0x24, { 0x08, 0x08, 0x05, 0x07, 0x07 } },
- { 0x25, { 0x16, 0x10, 0x10, 0x0a, 0x10 } },
- { 0x26, { 0x14, 0x14, 0x04, 0x04, 0x04 } },
- { 0x27, { 0x58, 0x58, 0x58, 0x5c, 0x58 } },
- { 0x28, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
- { 0x29, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
- { 0x2a, { 0x08, 0x0a, 0x08, 0x08, 0x08 } },
- { 0x2b, { 0x08, 0x08, 0x08, 0x08, 0x08 } },
- { 0x2c, { 0x06, 0x06, 0x06, 0x06, 0x06 } },
- { 0x2d, { 0x05, 0x06, 0x06, 0x06, 0x06 } },
- { 0x2e, { 0x21, 0x21, 0x21, 0x21, 0x21 } },
- { 0x2f, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x30, { 0x14, 0x14, 0x14, 0x14, 0x14 } },
- { 0x33, { 0xb7, 0xb7, 0xb7, 0xb7, 0xb7 } },
- { 0x34, { 0x81, 0x81, 0x81, 0x81, 0x81 } },
- { 0x35, { 0x80, 0x80, 0x80, 0x80, 0x80 } },
- { 0x37, { 0x70, 0x70, 0x70, 0x70, 0x70 } },
- { 0x38, { 0x04, 0x04, 0x02, 0x02, 0x02 } },
- { 0x39, { 0x07, 0x07, 0x05, 0x05, 0x05 } },
- { 0x3a, { 0x06, 0x06, 0x06, 0x06, 0x06 } },
- { 0x3b, { 0x03, 0x03, 0x03, 0x03, 0x03 } },
- { 0x3c, { 0x07, 0x06, 0x04, 0x04, 0x04 } },
- { 0x3d, { 0xf0, 0xf0, 0xf0, 0xf0, 0x80 } },
- { 0x3e, { 0x60, 0x60, 0x60, 0x60, 0xff } },
- { 0x3f, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x40, { 0x5b, 0x5b, 0x5b, 0x57, 0x50 } },
- { 0x41, { 0x30, 0x30, 0x30, 0x30, 0x18 } },
- { 0x42, { 0x20, 0x20, 0x20, 0x00, 0x30 } },
- { 0x43, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x44, { 0x3f, 0x3f, 0x3f, 0x3f, 0x3f } },
- { 0x45, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x46, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
- { 0x47, { 0x00, 0x00, 0x95, 0x00, 0x95 } },
- { 0x48, { 0xc0, 0xc0, 0xc0, 0xc0, 0xc0 } },
- { 0x49, { 0xc0, 0xc0, 0xc0, 0xc0, 0xc0 } },
- { 0x4a, { 0x40, 0x40, 0x33, 0x11, 0x11 } },
- { 0x4b, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
- { 0x4c, { 0x40, 0x40, 0x99, 0x11, 0x11 } },
- { 0x4d, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
- { 0x4e, { 0x40, 0x40, 0x66, 0x77, 0x77 } },
- { 0x4f, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
- { 0x50, { 0x40, 0x40, 0x88, 0x33, 0x11 } },
- { 0x51, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
- { 0x52, { 0x40, 0x40, 0x88, 0x02, 0x02 } },
- { 0x53, { 0x40, 0x40, 0x00, 0x02, 0x02 } },
- { 0x54, { 0x00, 0x00, 0x88, 0x33, 0x33 } },
- { 0x55, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
- { 0x56, { 0x00, 0x00, 0x00, 0x0b, 0x00 } },
- { 0x57, { 0x40, 0x40, 0x0a, 0x0b, 0x0a } },
- { 0x58, { 0xaa, 0x00, 0x00, 0x00, 0x00 } },
- { 0x59, { 0x7a, 0x40, 0x02, 0x02, 0x02 } },
- { 0x5a, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
- { 0x5b, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
- { 0x5c, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
- { 0x5d, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
- { 0x5e, { 0xc0, 0xc0, 0xc0, 0xff, 0xc0 } },
- { 0x5f, { 0xc0, 0xc0, 0xc0, 0xff, 0xc0 } },
- { 0x60, { 0x40, 0x40, 0x00, 0x30, 0x30 } },
- { 0x61, { 0x40, 0x40, 0x10, 0x30, 0x30 } },
- { 0x62, { 0x40, 0x40, 0x00, 0x30, 0x30 } },
- { 0x63, { 0x40, 0x40, 0x05, 0x30, 0x30 } },
- { 0x64, { 0x40, 0x40, 0x06, 0x00, 0x30 } },
- { 0x65, { 0x40, 0x40, 0x06, 0x08, 0x30 } },
- { 0x66, { 0x40, 0x40, 0x00, 0x00, 0x20 } },
- { 0x67, { 0x40, 0x40, 0x01, 0x04, 0x20 } },
- { 0x68, { 0x00, 0x00, 0x30, 0x00, 0x20 } },
- { 0x69, { 0xa0, 0xa0, 0x00, 0x08, 0x20 } },
- { 0x6a, { 0x00, 0x00, 0x30, 0x00, 0x25 } },
- { 0x6b, { 0xa0, 0xa0, 0x00, 0x06, 0x25 } },
- { 0x6c, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x6d, { 0xa0, 0x60, 0x0c, 0x03, 0x0c } },
- { 0x6e, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x6f, { 0xa0, 0x60, 0x04, 0x01, 0x04 } },
- { 0x70, { 0x58, 0x58, 0xaa, 0xaa, 0xaa } },
- { 0x71, { 0x58, 0x58, 0xaa, 0xaa, 0xaa } },
- { 0x72, { 0x58, 0x58, 0xff, 0xff, 0xff } },
- { 0x73, { 0x58, 0x58, 0xff, 0xff, 0xff } },
- { 0x74, { 0x06, 0x06, 0x09, 0x05, 0x05 } },
- { 0x75, { 0x06, 0x06, 0x0a, 0x10, 0x10 } },
- { 0x76, { 0x10, 0x10, 0x06, 0x0a, 0x0a } },
- { 0x77, { 0x12, 0x18, 0x28, 0x10, 0x28 } },
- { 0x78, { 0xf8, 0xf8, 0xf8, 0xf8, 0xf8 } },
- { 0x79, { 0x15, 0x15, 0x03, 0x03, 0x03 } },
- { 0x7a, { 0x02, 0x02, 0x01, 0x04, 0x03 } },
- { 0x7b, { 0x01, 0x02, 0x03, 0x03, 0x03 } },
- { 0x7c, { 0x28, 0x28, 0x28, 0x28, 0x28 } },
- { 0x7f, { 0x25, 0x92, 0x5f, 0x17, 0x2d } },
- { 0x80, { 0x64, 0x64, 0x64, 0x74, 0x64 } },
- { 0x83, { 0x06, 0x03, 0x04, 0x04, 0x04 } },
- { 0x84, { 0xff, 0xff, 0xff, 0xff, 0xff } },
- { 0x85, { 0x05, 0x05, 0x05, 0x05, 0x05 } },
- { 0x86, { 0x00, 0x00, 0x11, 0x11, 0x11 } },
- { 0x87, { 0x03, 0x03, 0x03, 0x03, 0x03 } },
- { 0x88, { 0x09, 0x09, 0x09, 0x09, 0x09 } },
- { 0x89, { 0x20, 0x20, 0x30, 0x20, 0x20 } },
- { 0x8a, { 0x03, 0x03, 0x02, 0x03, 0x02 } },
- { 0x8b, { 0x00, 0x07, 0x09, 0x00, 0x09 } },
- { 0x8c, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x8d, { 0x4f, 0x4f, 0x4f, 0x3f, 0x4f } },
- { 0x8e, { 0xf0, 0xf0, 0x60, 0xf0, 0xa0 } },
- { 0x8f, { 0xe8, 0xe8, 0xe8, 0xe8, 0xe8 } },
- { 0x90, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
- { 0x91, { 0x40, 0x40, 0x70, 0x70, 0x10 } },
- { 0x92, { 0x00, 0x00, 0x00, 0x00, 0x04 } },
- { 0x93, { 0x60, 0x60, 0x60, 0x60, 0x60 } },
- { 0x94, { 0x00, 0x00, 0x00, 0x00, 0x03 } },
- { 0x95, { 0x09, 0x09, 0x47, 0x47, 0x47 } },
- { 0x96, { 0x80, 0xa0, 0xa0, 0x40, 0xa0 } },
- { 0x97, { 0x60, 0x60, 0x60, 0x60, 0x60 } },
- { 0x98, { 0x50, 0x50, 0x50, 0x30, 0x50 } },
- { 0x99, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
- { 0x9a, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0x9b, { 0x40, 0x40, 0x40, 0x30, 0x40 } },
- { 0x9c, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xa0, { 0xf0, 0xf0, 0xf0, 0xf0, 0xf0 } },
- { 0xa1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xa2, { 0x30, 0x30, 0x00, 0x30, 0x00 } },
- { 0xa3, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xa4, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xa5, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xa6, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xa7, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xa8, { 0x77, 0x77, 0x77, 0x77, 0x77 } },
- { 0xa9, { 0x02, 0x02, 0x02, 0x02, 0x02 } },
- { 0xaa, { 0x40, 0x40, 0x40, 0x40, 0x40 } },
- { 0xac, { 0x1f, 0x1f, 0x1f, 0x1f, 0x1f } },
- { 0xad, { 0x14, 0x14, 0x14, 0x14, 0x14 } },
- { 0xae, { 0x78, 0x78, 0x78, 0x78, 0x78 } },
- { 0xaf, { 0x06, 0x06, 0x06, 0x06, 0x07 } },
- { 0xb0, { 0x1b, 0x1b, 0x1b, 0x19, 0x1b } },
- { 0xb1, { 0x18, 0x17, 0x17, 0x18, 0x17 } },
- { 0xb2, { 0x35, 0x82, 0x82, 0x38, 0x82 } },
- { 0xb3, { 0xb6, 0xce, 0xc7, 0x5c, 0xb0 } },
- { 0xb4, { 0x3f, 0x3e, 0x3e, 0x3f, 0x3e } },
- { 0xb5, { 0x70, 0x58, 0x50, 0x68, 0x50 } },
- { 0xb6, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xb7, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xb8, { 0x03, 0x03, 0x01, 0x01, 0x01 } },
- { 0xb9, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xba, { 0x06, 0x06, 0x0a, 0x05, 0x0a } },
- { 0xbb, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xbc, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xbd, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xbe, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xbf, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xc0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xc1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xc2, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xc3, { 0x00, 0x00, 0x88, 0x66, 0x88 } },
- { 0xc4, { 0x10, 0x10, 0x00, 0x00, 0x00 } },
- { 0xc5, { 0x00, 0x00, 0x44, 0x60, 0x44 } },
- { 0xc6, { 0x10, 0x0a, 0x00, 0x00, 0x00 } },
- { 0xc7, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xc8, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xc9, { 0x90, 0x04, 0x00, 0x00, 0x00 } },
- { 0xca, { 0x90, 0x08, 0x01, 0x01, 0x01 } },
- { 0xcb, { 0xa0, 0x04, 0x00, 0x44, 0x00 } },
- { 0xcc, { 0xa0, 0x10, 0x03, 0x00, 0x03 } },
- { 0xcd, { 0x06, 0x06, 0x06, 0x05, 0x06 } },
- { 0xce, { 0x05, 0x05, 0x01, 0x01, 0x01 } },
- { 0xcf, { 0x40, 0x20, 0x18, 0x18, 0x18 } },
- { 0xd0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xd1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xd2, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xd3, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xd4, { 0x05, 0x05, 0x05, 0x05, 0x05 } },
- { 0xd5, { 0x05, 0x05, 0x05, 0x03, 0x05 } },
- { 0xd6, { 0xac, 0x22, 0xca, 0x8f, 0xca } },
- { 0xd7, { 0x20, 0x20, 0x20, 0x20, 0x20 } },
- { 0xd8, { 0x01, 0x01, 0x01, 0x01, 0x01 } },
- { 0xd9, { 0x00, 0x00, 0x0f, 0x00, 0x0f } },
- { 0xda, { 0x00, 0xff, 0xff, 0x0e, 0xff } },
- { 0xdb, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
- { 0xdc, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
- { 0xdd, { 0x05, 0x05, 0x05, 0x05, 0x05 } },
- { 0xde, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
- { 0xdf, { 0x42, 0x42, 0x44, 0x44, 0x04 } },
- { 0xe0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xe1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xe2, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xe3, { 0x00, 0x00, 0x26, 0x06, 0x26 } },
- { 0xe4, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xe5, { 0x01, 0x0a, 0x01, 0x01, 0x01 } },
- { 0xe6, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xe7, { 0x08, 0x08, 0x08, 0x08, 0x08 } },
- { 0xe8, { 0x63, 0x63, 0x63, 0x63, 0x63 } },
- { 0xe9, { 0x59, 0x59, 0x59, 0x59, 0x59 } },
- { 0xea, { 0x80, 0x80, 0x20, 0x80, 0x80 } },
- { 0xeb, { 0x37, 0x37, 0x78, 0x37, 0x77 } },
- { 0xec, { 0x1f, 0x1f, 0x25, 0x25, 0x25 } },
- { 0xed, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
- { 0xee, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
- { 0xef, { 0x70, 0x70, 0x58, 0x38, 0x58 } },
- { 0xf0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
-};
-
-#endif /* HD29L2_PRIV */
diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c
index 6913cd687b4d..2fc8d3c72c11 100644
--- a/drivers/media/dvb-frontends/isl6405.c
+++ b/drivers/media/dvb-frontends/isl6405.c
@@ -15,11 +15,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/isl6405.h b/drivers/media/dvb-frontends/isl6405.h
index 4a23d3bdf3e6..18fe714f9999 100644
--- a/drivers/media/dvb-frontends/isl6405.h
+++ b/drivers/media/dvb-frontends/isl6405.h
@@ -15,11 +15,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
index 0b6d3837d5de..838b42771a05 100644
--- a/drivers/media/dvb-frontends/isl6421.c
+++ b/drivers/media/dvb-frontends/isl6421.c
@@ -15,11 +15,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/isl6421.h b/drivers/media/dvb-frontends/isl6421.h
index 00f9874ca5a2..4deeddec5140 100644
--- a/drivers/media/dvb-frontends/isl6421.h
+++ b/drivers/media/dvb-frontends/isl6421.h
@@ -15,11 +15,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index 475525134327..5bb1e73a10b4 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#include <linux/module.h>
diff --git a/drivers/media/dvb-frontends/itd1000.h b/drivers/media/dvb-frontends/itd1000.h
index a691bb6f26de..f8a2256a0b36 100644
--- a/drivers/media/dvb-frontends/itd1000.h
+++ b/drivers/media/dvb-frontends/itd1000.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef ITD1000_H
diff --git a/drivers/media/dvb-frontends/itd1000_priv.h b/drivers/media/dvb-frontends/itd1000_priv.h
index 08ca851223c9..6c99d95d1056 100644
--- a/drivers/media/dvb-frontends/itd1000_priv.h
+++ b/drivers/media/dvb-frontends/itd1000_priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef ITD1000_PRIV_H
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index ca371680a69f..534b24fa2b95 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/module.h>
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h
index 5eab39744b23..0b0a431c74f6 100644
--- a/drivers/media/dvb-frontends/ix2505v.h
+++ b/drivers/media/dvb-frontends/ix2505v.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef DVB_IX2505V_H
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 3b31e5f20f46..5798079add10 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/jiffies.h>
diff --git a/drivers/media/dvb-frontends/lg2160.h b/drivers/media/dvb-frontends/lg2160.h
index 8c74ddc6b88a..ba99125deac0 100644
--- a/drivers/media/dvb-frontends/lg2160.h
+++ b/drivers/media/dvb-frontends/lg2160.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _LG2160_H_
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 9f5d9380bf5f..0af4d9104761 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <asm/div64.h>
diff --git a/drivers/media/dvb-frontends/lgdt3305.h b/drivers/media/dvb-frontends/lgdt3305.h
index e7dceb60e572..2fb60d91f7b4 100644
--- a/drivers/media/dvb-frontends/lgdt3305.h
+++ b/drivers/media/dvb-frontends/lgdt3305.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _LGDT3305_H_
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 19dca46b1171..c9b1eb38444e 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -22,6 +22,7 @@
#include <linux/dvb/frontend.h>
#include "dvb_math.h"
#include "lgdt3306a.h"
+#include <linux/i2c-mux.h>
static int debug;
@@ -65,6 +66,8 @@ struct lgdt3306a_state {
enum fe_modulation current_modulation;
u32 current_frequency;
u32 snr;
+
+ struct i2c_mux_core *muxc;
};
/*
@@ -2131,6 +2134,111 @@ static const struct dvb_frontend_ops lgdt3306a_ops = {
.search = lgdt3306a_search,
};
+static int lgdt3306a_select(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct i2c_client *client = i2c_mux_priv(muxc);
+ struct lgdt3306a_state *state = i2c_get_clientdata(client);
+
+ return lgdt3306a_i2c_gate_ctrl(&state->frontend, 1);
+}
+
+static int lgdt3306a_deselect(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct i2c_client *client = i2c_mux_priv(muxc);
+ struct lgdt3306a_state *state = i2c_get_clientdata(client);
+
+ return lgdt3306a_i2c_gate_ctrl(&state->frontend, 0);
+}
+
+static int lgdt3306a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lgdt3306a_config *config;
+ struct lgdt3306a_state *state;
+ struct dvb_frontend *fe;
+ int ret;
+
+ config = kzalloc(sizeof(struct lgdt3306a_config), GFP_KERNEL);
+ if (config == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memcpy(config, client->dev.platform_data,
+ sizeof(struct lgdt3306a_config));
+
+ config->i2c_addr = client->addr;
+ fe = lgdt3306a_attach(config, client->adapter);
+ if (fe == NULL) {
+ ret = -ENODEV;
+ goto err_fe;
+ }
+
+ i2c_set_clientdata(client, fe->demodulator_priv);
+ state = fe->demodulator_priv;
+
+ /* create mux i2c adapter for tuner */
+ state->muxc = i2c_mux_alloc(client->adapter, &client->dev,
+ 1, 0, I2C_MUX_LOCKED,
+ lgdt3306a_select, lgdt3306a_deselect);
+ if (!state->muxc) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+ state->muxc->priv = client;
+ ret = i2c_mux_add_adapter(state->muxc, 0, 0, 0);
+ if (ret)
+ goto err_kfree;
+
+ /* create dvb_frontend */
+ fe->ops.i2c_gate_ctrl = NULL;
+ *config->i2c_adapter = state->muxc->adapter[0];
+ *config->fe = fe;
+
+ return 0;
+
+err_kfree:
+ kfree(state);
+err_fe:
+ kfree(config);
+fail:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int lgdt3306a_remove(struct i2c_client *client)
+{
+ struct lgdt3306a_state *state = i2c_get_clientdata(client);
+
+ i2c_mux_del_adapters(state->muxc);
+
+ state->frontend.ops.release = NULL;
+ state->frontend.demodulator_priv = NULL;
+
+ kfree(state->cfg);
+ kfree(state);
+
+ return 0;
+}
+
+static const struct i2c_device_id lgdt3306a_id_table[] = {
+ {"lgdt3306a", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lgdt3306a_id_table);
+
+static struct i2c_driver lgdt3306a_driver = {
+ .driver = {
+ .name = "lgdt3306a",
+ .suppress_bind_attrs = true,
+ },
+ .probe = lgdt3306a_probe,
+ .remove = lgdt3306a_remove,
+ .id_table = lgdt3306a_id_table,
+};
+
+module_i2c_driver(lgdt3306a_driver);
+
MODULE_DESCRIPTION("LG Electronics LGDT3306A ATSC/QAM-B Demodulator Driver");
MODULE_AUTHOR("Fred Richter <frichter@hauppauge.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/lgdt3306a.h b/drivers/media/dvb-frontends/lgdt3306a.h
index 9dbb2dced1fe..6ce337ec5272 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.h
+++ b/drivers/media/dvb-frontends/lgdt3306a.h
@@ -56,6 +56,10 @@ struct lgdt3306a_config {
/* demod clock freq in MHz; 24 or 25 supported */
int xtalMHz;
+
+ /* returned by driver if using i2c bus multiplexing */
+ struct dvb_frontend **fe;
+ struct i2c_adapter **i2c_adapter;
};
#if IS_REACHABLE(CONFIG_DVB_LGDT3306A)
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 2f4a0316f89c..06f47dc8cd3d 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
/*
diff --git a/drivers/media/dvb-frontends/lgdt330x.h b/drivers/media/dvb-frontends/lgdt330x.h
index c73eeb45e330..61434cbecd2c 100644
--- a/drivers/media/dvb-frontends/lgdt330x.h
+++ b/drivers/media/dvb-frontends/lgdt330x.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef LGDT330X_H
diff --git a/drivers/media/dvb-frontends/lgdt330x_priv.h b/drivers/media/dvb-frontends/lgdt330x_priv.h
index 1922f09a02d0..dcb9a317eddc 100644
--- a/drivers/media/dvb-frontends/lgdt330x_priv.h
+++ b/drivers/media/dvb-frontends/lgdt330x_priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _LGDT330X_PRIV_
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 6d2e62469d58..e6bf60e1138c 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <asm/div64.h>
diff --git a/drivers/media/dvb-frontends/lgs8gxx.h b/drivers/media/dvb-frontends/lgs8gxx.h
index 7519c0210399..aa83ea46807b 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.h
+++ b/drivers/media/dvb-frontends/lgs8gxx.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __LGS8GXX_H__
diff --git a/drivers/media/dvb-frontends/lgs8gxx_priv.h b/drivers/media/dvb-frontends/lgs8gxx_priv.h
index 8ef376f1414d..42ecbbd14c90 100644
--- a/drivers/media/dvb-frontends/lgs8gxx_priv.h
+++ b/drivers/media/dvb-frontends/lgs8gxx_priv.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef LGS8913_PRIV_H
diff --git a/drivers/media/dvb-frontends/lnbh24.h b/drivers/media/dvb-frontends/lnbh24.h
index 24431dfdce1f..332d639025ba 100644
--- a/drivers/media/dvb-frontends/lnbh24.h
+++ b/drivers/media/dvb-frontends/lnbh24.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LNBH24_H
diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c
index 6261460d93a7..392d7be93774 100644
--- a/drivers/media/dvb-frontends/lnbp21.c
+++ b/drivers/media/dvb-frontends/lnbp21.c
@@ -15,11 +15,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/lnbp21.h b/drivers/media/dvb-frontends/lnbp21.h
index 4bb6439068ec..ee9d050ddc04 100644
--- a/drivers/media/dvb-frontends/lnbp21.h
+++ b/drivers/media/dvb-frontends/lnbp21.h
@@ -14,11 +14,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c
index 5c5fd04fd4a7..39326a2ebab2 100644
--- a/drivers/media/dvb-frontends/lnbp22.c
+++ b/drivers/media/dvb-frontends/lnbp22.c
@@ -15,11 +15,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/lnbp22.h b/drivers/media/dvb-frontends/lnbp22.h
index 0cb72126c498..f4c59ff7b7ca 100644
--- a/drivers/media/dvb-frontends/lnbp22.h
+++ b/drivers/media/dvb-frontends/lnbp22.h
@@ -15,11 +15,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index c221c7d2ac3e..15874244fd8b 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -223,6 +223,13 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
+ /* PLP */
+ if (c->delivery_system == SYS_DVBT2) {
+ ret = regmap_write(dev->regmap[2], 0x36, c->stream_id);
+ if (ret)
+ goto err;
+ }
+
/* Reset FSM */
ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
if (ret)
@@ -592,7 +599,8 @@ static const struct dvb_frontend_ops mn88473_ops = {
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO |
FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM
},
.get_tune_settings = mn88473_get_tune_settings,
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index 48ea0408f02a..e127090f2d22 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -24,10 +24,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/mt352.h b/drivers/media/dvb-frontends/mt352.h
index 5873263bd1af..b4c03b7405fb 100644
--- a/drivers/media/dvb-frontends/mt352.h
+++ b/drivers/media/dvb-frontends/mt352.h
@@ -24,10 +24,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef MT352_H
diff --git a/drivers/media/dvb-frontends/mt352_priv.h b/drivers/media/dvb-frontends/mt352_priv.h
index 44ad0d4c8f12..79bbb894b287 100644
--- a/drivers/media/dvb-frontends/mt352_priv.h
+++ b/drivers/media/dvb-frontends/mt352_priv.h
@@ -24,10 +24,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef _MT352_PRIV_
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 2fe40372ca07..bf6e5cd572c5 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
/*
diff --git a/drivers/media/dvb-frontends/nxt200x.h b/drivers/media/dvb-frontends/nxt200x.h
index 825b928ef542..360320645913 100644
--- a/drivers/media/dvb-frontends/nxt200x.h
+++ b/drivers/media/dvb-frontends/nxt200x.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef NXT200X_H
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 17bdadd7d0e1..4b67d7e0116d 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
/*
diff --git a/drivers/media/dvb-frontends/or51132.h b/drivers/media/dvb-frontends/or51132.h
index 9acf8dc87413..96b70e78e30a 100644
--- a/drivers/media/dvb-frontends/or51132.h
+++ b/drivers/media/dvb-frontends/or51132.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef OR51132_H
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index 27eb73aa4f62..d14fa9736ae5 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
diff --git a/drivers/media/dvb-frontends/or51211.h b/drivers/media/dvb-frontends/or51211.h
index cc6adab63249..03b476982ad0 100644
--- a/drivers/media/dvb-frontends/or51211.h
+++ b/drivers/media/dvb-frontends/or51211.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef OR51211_H
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index e038e886731b..c6e78d870ccd 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -956,7 +956,7 @@ static void rtl2832_sdr_stop_streaming(struct vb2_queue *vq)
mutex_unlock(&dev->v4l2_lock);
}
-static struct vb2_ops rtl2832_sdr_vb2_ops = {
+static const struct vb2_ops rtl2832_sdr_vb2_ops = {
.queue_setup = rtl2832_sdr_queue_setup,
.buf_prepare = rtl2832_sdr_buf_prepare,
.buf_queue = rtl2832_sdr_buf_queue,
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index f9a18fe94d88..cba9bff05b12 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/s5h1420.h b/drivers/media/dvb-frontends/s5h1420.h
index 142d93e7d02b..43d0de6f3a55 100644
--- a/drivers/media/dvb-frontends/s5h1420.h
+++ b/drivers/media/dvb-frontends/s5h1420.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef S5H1420_H
#define S5H1420_H
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index a32fd9bc51a9..4de50fe0c638 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/s5h1432.h b/drivers/media/dvb-frontends/s5h1432.h
index b81c9bd4e422..af3a157b5e77 100644
--- a/drivers/media/dvb-frontends/s5h1432.h
+++ b/drivers/media/dvb-frontends/s5h1432.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __S5H1432_H__
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 20b4a659e2e4..680ba06c29fb 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -85,7 +85,8 @@ static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct i2c_client *client = fe->demodulator_priv;
struct si2168_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret;
+ int ret, i;
+ unsigned int utmp, utmp1, utmp2;
struct si2168_cmd cmd;
*status = 0;
@@ -144,6 +145,61 @@ static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
dev_dbg(&client->dev, "status=%02x args=%*ph\n",
*status, cmd.rlen, cmd.args);
+ /* BER */
+ if (*status & FE_HAS_VITERBI) {
+ memcpy(cmd.args, "\x82\x00", 2);
+ cmd.wlen = 2;
+ cmd.rlen = 3;
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ /*
+ * Firmware returns [0, 255] mantissa and [0, 8] exponent.
+ * Convert to DVB API: mantissa * 10^(8 - exponent) / 10^8
+ */
+ utmp = clamp(8 - cmd.args[1], 0, 8);
+ for (i = 0, utmp1 = 1; i < utmp; i++)
+ utmp1 = utmp1 * 10;
+
+ utmp1 = cmd.args[2] * utmp1;
+ utmp2 = 100000000; /* 10^8 */
+
+ dev_dbg(&client->dev,
+ "post_bit_error=%u post_bit_count=%u ber=%u*10^-%u\n",
+ utmp1, utmp2, cmd.args[2], cmd.args[1]);
+
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue += utmp1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue += utmp2;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* UCB */
+ if (*status & FE_HAS_SYNC) {
+ memcpy(cmd.args, "\x84\x01", 2);
+ cmd.wlen = 2;
+ cmd.rlen = 3;
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ utmp1 = cmd.args[2] << 8 | cmd.args[1] << 0;
+ dev_dbg(&client->dev, "block_error=%u\n", utmp1);
+
+ /* Sometimes firmware returns bogus value */
+ if (utmp1 == 0xffff)
+ utmp1 = 0;
+
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue += utmp1;
+ } else {
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -355,6 +411,7 @@ static int si2168_init(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->demodulator_priv;
struct si2168_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, len, remaining;
const struct firmware *fw;
struct si2168_cmd cmd;
@@ -493,10 +550,19 @@ static int si2168_init(struct dvb_frontend *fe)
dev->warm = true;
warm:
+ /* Init stats here to indicate which stats are supported */
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
dev->active = true;
return 0;
-
err_release_firmware:
release_firmware(fw);
err:
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 7843ccb448a0..2fecac6231ff 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -21,6 +21,7 @@
#include "dvb_frontend.h"
#include <linux/firmware.h>
#include <linux/i2c-mux.h>
+#include <linux/kernel.h>
#define SI2168_A20_FIRMWARE "dvb-demod-si2168-a20-01.fw"
#define SI2168_A30_FIRMWARE "dvb-demod-si2168-a30-01.fw"
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 4ac1ce2831ba..fd49c436a36d 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv0367.h b/drivers/media/dvb-frontends/stv0367.h
index b88166a9716f..26c38a0503c8 100644
--- a/drivers/media/dvb-frontends/stv0367.h
+++ b/drivers/media/dvb-frontends/stv0367.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef STV0367_H
diff --git a/drivers/media/dvb-frontends/stv0367_priv.h b/drivers/media/dvb-frontends/stv0367_priv.h
index 89bf6f64b078..8abc451dd524 100644
--- a/drivers/media/dvb-frontends/stv0367_priv.h
+++ b/drivers/media/dvb-frontends/stv0367_priv.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Common driver error constants */
diff --git a/drivers/media/dvb-frontends/stv0367_regs.h b/drivers/media/dvb-frontends/stv0367_regs.h
index a96fbdc7e25e..1d1586221239 100644
--- a/drivers/media/dvb-frontends/stv0367_regs.h
+++ b/drivers/media/dvb-frontends/stv0367_regs.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef STV0367_REGS_H
diff --git a/drivers/media/dvb-frontends/stv0900.h b/drivers/media/dvb-frontends/stv0900.h
index 9ca2da90c7d7..1571a465e05c 100644
--- a/drivers/media/dvb-frontends/stv0900.h
+++ b/drivers/media/dvb-frontends/stv0900.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef STV0900_H
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 43a0f69b4b14..0b739725e3c0 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv0900_init.h b/drivers/media/dvb-frontends/stv0900_init.h
index b684df9995d8..411941442086 100644
--- a/drivers/media/dvb-frontends/stv0900_init.h
+++ b/drivers/media/dvb-frontends/stv0900_init.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef STV0900_INIT_H
diff --git a/drivers/media/dvb-frontends/stv0900_priv.h b/drivers/media/dvb-frontends/stv0900_priv.h
index e0ea74c8e093..7a95f955627b 100644
--- a/drivers/media/dvb-frontends/stv0900_priv.h
+++ b/drivers/media/dvb-frontends/stv0900_priv.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef STV0900_PRIV_H
diff --git a/drivers/media/dvb-frontends/stv0900_reg.h b/drivers/media/dvb-frontends/stv0900_reg.h
index 511ed2a2d987..59f264c2f8f5 100644
--- a/drivers/media/dvb-frontends/stv0900_reg.h
+++ b/drivers/media/dvb-frontends/stv0900_reg.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef STV0900_REG_H
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index bded82774f4b..c97a39120ea5 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "stv0900.h"
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 6a72d0be2ec5..e4fd9c1b0560 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
diff --git a/drivers/media/dvb-frontends/stv6110.h b/drivers/media/dvb-frontends/stv6110.h
index 4604f793d954..ab73124c0dec 100644
--- a/drivers/media/dvb-frontends/stv6110.h
+++ b/drivers/media/dvb-frontends/stv6110.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __DVB_STV6110_H__
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index 6859fa5d5a85..2d2778be2d2f 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -14,12 +14,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/tdhd1.h b/drivers/media/dvb-frontends/tdhd1.h
index 2b9e8732c802..68358c0d869f 100644
--- a/drivers/media/dvb-frontends/tdhd1.h
+++ b/drivers/media/dvb-frontends/tdhd1.h
@@ -13,11 +13,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* The project's page is at https://linuxtv.org
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 05ee16d29851..18e6d4c5be21 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
diff --git a/drivers/media/dvb-frontends/tua6100.h b/drivers/media/dvb-frontends/tua6100.h
index 52919e04e258..9f15cbdfdeca 100644
--- a/drivers/media/dvb-frontends/tua6100.h
+++ b/drivers/media/dvb-frontends/tua6100.h
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __DVB_TUA6100_H__
diff --git a/drivers/media/dvb-frontends/zd1301_demod.c b/drivers/media/dvb-frontends/zd1301_demod.c
new file mode 100644
index 000000000000..fcf5f69de0c5
--- /dev/null
+++ b/drivers/media/dvb-frontends/zd1301_demod.c
@@ -0,0 +1,551 @@
+/*
+ * ZyDAS ZD1301 driver (demodulator)
+ *
+ * Copyright (C) 2015 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "zd1301_demod.h"
+
+static u8 zd1301_demod_gain = 0x38;
+module_param_named(gain, zd1301_demod_gain, byte, 0644);
+MODULE_PARM_DESC(gain, "gain (value: 0x00 - 0x70, default: 0x38)");
+
+struct zd1301_demod_dev {
+ struct platform_device *pdev;
+ struct dvb_frontend frontend;
+ struct i2c_adapter adapter;
+ u8 gain;
+};
+
+static int zd1301_demod_wreg(struct zd1301_demod_dev *dev, u16 reg, u8 val)
+{
+ struct platform_device *pdev = dev->pdev;
+ struct zd1301_demod_platform_data *pdata = pdev->dev.platform_data;
+
+ return pdata->reg_write(pdata->reg_priv, reg, val);
+}
+
+static int zd1301_demod_rreg(struct zd1301_demod_dev *dev, u16 reg, u8 *val)
+{
+ struct platform_device *pdev = dev->pdev;
+ struct zd1301_demod_platform_data *pdata = pdev->dev.platform_data;
+
+ return pdata->reg_read(pdata->reg_priv, reg, val);
+}
+
+static int zd1301_demod_set_frontend(struct dvb_frontend *fe)
+{
+ struct zd1301_demod_dev *dev = fe->demodulator_priv;
+ struct platform_device *pdev = dev->pdev;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u32 if_frequency;
+ u8 r6a50_val;
+
+ dev_dbg(&pdev->dev, "frequency=%u bandwidth_hz=%u\n",
+ c->frequency, c->bandwidth_hz);
+
+ /* Program tuner */
+ if (fe->ops.tuner_ops.set_params &&
+ fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (ret)
+ goto err;
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
+ if (ret)
+ goto err;
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev_dbg(&pdev->dev, "if_frequency=%u\n", if_frequency);
+ if (if_frequency != 36150000) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ r6a50_val = 0x78;
+ break;
+ case 7000000:
+ r6a50_val = 0x68;
+ break;
+ case 8000000:
+ r6a50_val = 0x58;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = zd1301_demod_wreg(dev, 0x6a60, 0x11);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a47, 0x46);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a48, 0x46);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a4a, 0x15);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a4b, 0x63);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a5b, 0x99);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a3b, 0x10);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6806, 0x01);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a41, 0x08);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a42, 0x46);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a44, 0x14);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a45, 0x67);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a38, 0x00);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a4c, 0x52);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a49, 0x2a);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6840, 0x2e);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a50, r6a50_val);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a38, 0x07);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&pdev->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int zd1301_demod_sleep(struct dvb_frontend *fe)
+{
+ struct zd1301_demod_dev *dev = fe->demodulator_priv;
+ struct platform_device *pdev = dev->pdev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "\n");
+
+ ret = zd1301_demod_wreg(dev, 0x6a43, 0x70);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x684e, 0x00);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6849, 0x00);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x68e2, 0xd7);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x68e0, 0x39);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6840, 0x21);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&pdev->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int zd1301_demod_init(struct dvb_frontend *fe)
+{
+ struct zd1301_demod_dev *dev = fe->demodulator_priv;
+ struct platform_device *pdev = dev->pdev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "\n");
+
+ ret = zd1301_demod_wreg(dev, 0x6840, 0x26);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x68e0, 0xff);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x68e2, 0xd8);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6849, 0x4e);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x684e, 0x01);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6a43, zd1301_demod_gain);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&pdev->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int zd1301_demod_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *settings)
+{
+ struct zd1301_demod_dev *dev = fe->demodulator_priv;
+ struct platform_device *pdev = dev->pdev;
+
+ dev_dbg(&pdev->dev, "\n");
+
+ /* ~180ms seems to be enough */
+ settings->min_delay_ms = 400;
+
+ return 0;
+}
+
+static int zd1301_demod_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ struct zd1301_demod_dev *dev = fe->demodulator_priv;
+ struct platform_device *pdev = dev->pdev;
+ int ret;
+ u8 u8tmp;
+
+ ret = zd1301_demod_rreg(dev, 0x6a24, &u8tmp);
+ if (ret)
+ goto err;
+ if (u8tmp > 0x00 && u8tmp < 0x20)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
+ FE_HAS_SYNC | FE_HAS_LOCK;
+ else
+ *status = 0;
+
+ dev_dbg(&pdev->dev, "lock byte=%02x\n", u8tmp);
+
+ /*
+ * Interesting registers here are:
+ * 0x6a05: get some gain value
+ * 0x6a06: get about same gain value than set to 0x6a43
+ * 0x6a07: get some gain value
+ * 0x6a43: set gain value by driver
+ * 0x6a24: get demod lock bits (FSM stage?)
+ *
+ * Driver should implement some kind of algorithm to calculate suitable
+ * value for register 0x6a43, based likely values from register 0x6a05
+ * and 0x6a07. Looks like gain register 0x6a43 value could be from
+ * range 0x00 - 0x70.
+ */
+
+ if (dev->gain != zd1301_demod_gain) {
+ dev->gain = zd1301_demod_gain;
+
+ ret = zd1301_demod_wreg(dev, 0x6a43, dev->gain);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&pdev->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static const struct dvb_frontend_ops zd1301_demod_ops = {
+ .delsys = {SYS_DVBT},
+ .info = {
+ .name = "ZyDAS ZD1301",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS
+ },
+
+ .sleep = zd1301_demod_sleep,
+ .init = zd1301_demod_init,
+ .set_frontend = zd1301_demod_set_frontend,
+ .get_tune_settings = zd1301_demod_get_tune_settings,
+ .read_status = zd1301_demod_read_status,
+};
+
+struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *pdev)
+{
+ struct zd1301_demod_dev *dev = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "\n");
+
+ return &dev->frontend;
+}
+EXPORT_SYMBOL(zd1301_demod_get_dvb_frontend);
+
+static int zd1301_demod_i2c_master_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msg[], int num)
+{
+ struct zd1301_demod_dev *dev = i2c_get_adapdata(adapter);
+ struct platform_device *pdev = dev->pdev;
+ int ret, i;
+ unsigned long timeout;
+ u8 u8tmp;
+
+ #define I2C_XFER_TIMEOUT 5
+ #define ZD1301_IS_I2C_XFER_WRITE_READ(_msg, _num) \
+ (_num == 2 && !(_msg[0].flags & I2C_M_RD) && (_msg[1].flags & I2C_M_RD))
+ #define ZD1301_IS_I2C_XFER_WRITE(_msg, _num) \
+ (_num == 1 && !(_msg[0].flags & I2C_M_RD))
+ #define ZD1301_IS_I2C_XFER_READ(_msg, _num) \
+ (_num == 1 && (_msg[0].flags & I2C_M_RD))
+ if (ZD1301_IS_I2C_XFER_WRITE_READ(msg, num)) {
+ dev_dbg(&pdev->dev, "write&read msg[0].len=%u msg[1].len=%u\n",
+ msg[0].len, msg[1].len);
+ if (msg[0].len > 1 || msg[1].len > 8) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ret = zd1301_demod_wreg(dev, 0x6811, 0x80);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6812, 0x05);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6813, msg[1].addr << 1);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6801, msg[0].buf[0]);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6802, 0x00);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6803, 0x06);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6805, 0x00);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6804, msg[1].len);
+ if (ret)
+ goto err;
+
+ /* Poll xfer ready */
+ timeout = jiffies + msecs_to_jiffies(I2C_XFER_TIMEOUT);
+ for (u8tmp = 1; !time_after(jiffies, timeout) && u8tmp;) {
+ usleep_range(500, 800);
+
+ ret = zd1301_demod_rreg(dev, 0x6804, &u8tmp);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < msg[1].len; i++) {
+ ret = zd1301_demod_rreg(dev, 0x0600 + i, &msg[1].buf[i]);
+ if (ret)
+ goto err;
+ }
+ } else if (ZD1301_IS_I2C_XFER_WRITE(msg, num)) {
+ dev_dbg(&pdev->dev, "write msg[0].len=%u\n", msg[0].len);
+ if (msg[0].len > 1 + 8) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ret = zd1301_demod_wreg(dev, 0x6811, 0x80);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6812, 0x01);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6813, msg[0].addr << 1);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6800, msg[0].buf[0]);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6802, 0x00);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6803, 0x06);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < msg[0].len - 1; i++) {
+ ret = zd1301_demod_wreg(dev, 0x0600 + i, msg[0].buf[1 + i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = zd1301_demod_wreg(dev, 0x6805, 0x80);
+ if (ret)
+ goto err;
+ ret = zd1301_demod_wreg(dev, 0x6804, msg[0].len - 1);
+ if (ret)
+ goto err;
+
+ /* Poll xfer ready */
+ timeout = jiffies + msecs_to_jiffies(I2C_XFER_TIMEOUT);
+ for (u8tmp = 1; !time_after(jiffies, timeout) && u8tmp;) {
+ usleep_range(500, 800);
+
+ ret = zd1301_demod_rreg(dev, 0x6804, &u8tmp);
+ if (ret)
+ goto err;
+ }
+ } else {
+ dev_dbg(&pdev->dev, "unknown msg[0].len=%u\n", msg[0].len);
+ ret = -EOPNOTSUPP;
+ if (ret)
+ goto err;
+ }
+
+ return num;
+err:
+ dev_dbg(&pdev->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static u32 zd1301_demod_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm zd1301_demod_i2c_algorithm = {
+ .master_xfer = zd1301_demod_i2c_master_xfer,
+ .functionality = zd1301_demod_i2c_functionality,
+};
+
+struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *pdev)
+{
+ struct zd1301_demod_dev *dev = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "\n");
+
+ return &dev->adapter;
+}
+EXPORT_SYMBOL(zd1301_demod_get_i2c_adapter);
+
+/* Platform driver interface */
+static int zd1301_demod_probe(struct platform_device *pdev)
+{
+ struct zd1301_demod_dev *dev;
+ struct zd1301_demod_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ dev_dbg(&pdev->dev, "\n");
+
+ if (!pdata) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "cannot proceed without platform data\n");
+ goto err;
+ }
+ if (!pdev->dev.parent->driver) {
+ ret = -EINVAL;
+ dev_dbg(&pdev->dev, "no parent device\n");
+ goto err;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Setup the state */
+ dev->pdev = pdev;
+ dev->gain = zd1301_demod_gain;
+
+ /* Sleep */
+ ret = zd1301_demod_wreg(dev, 0x6840, 0x21);
+ if (ret)
+ goto err_kfree;
+ ret = zd1301_demod_wreg(dev, 0x6a38, 0x07);
+ if (ret)
+ goto err_kfree;
+
+ /* Create I2C adapter */
+ strlcpy(dev->adapter.name, "ZyDAS ZD1301 demod", sizeof(dev->adapter.name));
+ dev->adapter.algo = &zd1301_demod_i2c_algorithm;
+ dev->adapter.algo_data = NULL;
+ dev->adapter.dev.parent = pdev->dev.parent;
+ i2c_set_adapdata(&dev->adapter, dev);
+ ret = i2c_add_adapter(&dev->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "I2C adapter add failed %d\n", ret);
+ goto err_kfree;
+ }
+
+ /* Create dvb frontend */
+ memcpy(&dev->frontend.ops, &zd1301_demod_ops, sizeof(dev->frontend.ops));
+ dev->frontend.demodulator_priv = dev;
+ platform_set_drvdata(pdev, dev);
+ dev_info(&pdev->dev, "ZyDAS ZD1301 demod attached\n");
+
+ return 0;
+err_kfree:
+ kfree(dev);
+err:
+ dev_dbg(&pdev->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int zd1301_demod_remove(struct platform_device *pdev)
+{
+ struct zd1301_demod_dev *dev = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "\n");
+
+ i2c_del_adapter(&dev->adapter);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver zd1301_demod_driver = {
+ .driver = {
+ .name = "zd1301_demod",
+ .suppress_bind_attrs = true,
+ },
+ .probe = zd1301_demod_probe,
+ .remove = zd1301_demod_remove,
+};
+module_platform_driver(zd1301_demod_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("ZyDAS ZD1301 demodulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/zd1301_demod.h b/drivers/media/dvb-frontends/zd1301_demod.h
new file mode 100644
index 000000000000..ceb2e05e873c
--- /dev/null
+++ b/drivers/media/dvb-frontends/zd1301_demod.h
@@ -0,0 +1,73 @@
+/*
+ * ZyDAS ZD1301 driver (demodulator)
+ *
+ * Copyright (C) 2015 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ZD1301_DEMOD_H
+#define ZD1301_DEMOD_H
+
+#include <linux/platform_device.h>
+#include <linux/dvb/frontend.h>
+#include "dvb_frontend.h"
+
+/**
+ * struct zd1301_demod_platform_data - Platform data for the zd1301_demod driver
+ * @reg_priv: First argument of reg_read and reg_write callbacks.
+ * @reg_read: Register read callback.
+ * @reg_write: Register write callback.
+ */
+
+struct zd1301_demod_platform_data {
+ void *reg_priv;
+ int (*reg_read)(void *, u16, u8 *);
+ int (*reg_write)(void *, u16, u8);
+};
+
+#if IS_REACHABLE(CONFIG_DVB_ZD1301_DEMOD)
+/**
+ * zd1301_demod_get_dvb_frontend() - Get pointer to DVB frontend
+ * @pdev: Pointer to platform device
+ *
+ * Return: Pointer to DVB frontend which given platform device owns.
+ */
+
+struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
+
+/**
+ * zd1301_demod_get_i2c_adapter() - Get pointer to I2C adapter
+ * @pdev: Pointer to platform device
+ *
+ * Return: Pointer to I2C adapter which given platform device owns.
+ */
+
+struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *);
+
+#else
+
+static inline struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *dev)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+
+ return NULL;
+}
+static inline struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *dev)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+
+ return NULL;
+}
+
+#endif
+
+#endif /* ZD1301_DEMOD_H */
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index a6d020fe9b8b..062282739ce5 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
**
* The data sheet for this tuner can be found at:
* http://www.mcmilk.de/projects/dvb-card/datasheets/ZL10036.pdf
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index c568d8d59de3..88751adfecf7 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef DVB_ZL10036_H
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index 60a2954f8ff8..623355fc2666 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 4f3ff3e853ac..47c0549eb7b2 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/zl10353.h b/drivers/media/dvb-frontends/zl10353.h
index 37aa6e8f454a..cb6248c00089 100644
--- a/drivers/media/dvb-frontends/zl10353.h
+++ b/drivers/media/dvb-frontends/zl10353.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef ZL10353_H
diff --git a/drivers/media/dvb-frontends/zl10353_priv.h b/drivers/media/dvb-frontends/zl10353_priv.h
index e0dd1d3e09dd..a1d902b2d47a 100644
--- a/drivers/media/dvb-frontends/zl10353_priv.h
+++ b/drivers/media/dvb-frontends/zl10353_priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZL10353_PRIV_
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index b31fa6fae009..cee1dae6e014 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
config VIDEO_S5K4ECGX
tristate "Samsung S5K4ECGX sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select CRC32
---help---
This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
@@ -667,6 +668,7 @@ config VIDEO_S5K5BAF
camera sensor with an embedded SoC image signal processor.
source "drivers/media/i2c/smiapp/Kconfig"
+source "drivers/media/i2c/et8ek8/Kconfig"
config VIDEO_S5C73M3
tristate "Samsung S5C73M3 sensor support"
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 92773b2e6225..5bc7bbeb5499 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -2,6 +2,7 @@ msp3400-objs := msp3400-driver.o msp3400-kthreads.o
obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
obj-$(CONFIG_VIDEO_SMIAPP) += smiapp/
+obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
obj-$(CONFIG_VIDEO_CX25840) += cx25840/
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
obj-y += soc_camera/
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index e191e295c951..ba1ec4ab9eba 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
* TODO:
* - fault interrupt handling
* - hardware strobe
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index fc9ec0f3679c..739331473429 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -302,7 +298,6 @@ static int adv7170_set_fmt(struct v4l2_subdev *sd,
{
struct v4l2_mbus_framefmt *mf = &format->format;
u8 val = adv7170_read(sd, 0x7);
- int ret = 0;
if (format->pad)
return -EINVAL;
@@ -323,9 +318,9 @@ static int adv7170_set_fmt(struct v4l2_subdev *sd,
}
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- ret = adv7170_write(sd, 0x7, val);
+ return adv7170_write(sd, 0x7, val);
- return ret;
+ return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index 72139bdae1ca..e31e8d909bb9 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index cbed2bc29325..bdbbf8cf27e4 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 04eecda74d66..8b00dc854cf8 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h
index b253d400e817..843d4998435e 100644
--- a/drivers/media/i2c/adv7183_regs.h
+++ b/drivers/media/i2c/adv7183_regs.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ADV7183_REGS_H_
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index d0375cac6a05..d8bf435db86d 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3133,6 +3133,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
state->pdata.blank_data = 1;
state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
state->pdata.bus_order = ADV7604_BUS_ORDER_RGB;
+ state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH;
return 0;
}
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index 3a795dcb7d8e..16682c8477d1 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -205,14 +205,14 @@ static int ak881x_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static struct v4l2_subdev_core_ops ak881x_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops ak881x_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ak881x_g_register,
.s_register = ak881x_s_register,
#endif
};
-static struct v4l2_subdev_video_ops ak881x_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops ak881x_subdev_video_ops = {
.s_std_output = ak881x_s_std_output,
.s_stream = ak881x_s_stream,
};
@@ -224,7 +224,7 @@ static const struct v4l2_subdev_pad_ops ak881x_subdev_pad_ops = {
.get_fmt = ak881x_fill_fmt,
};
-static struct v4l2_subdev_ops ak881x_subdev_ops = {
+static const struct v4l2_subdev_ops ak881x_subdev_ops = {
.core = &ak881x_subdev_core_ops,
.video = &ak881x_subdev_video_ops,
.pad = &ak881x_subdev_pad_ops,
diff --git a/drivers/media/i2c/aptina-pll.c b/drivers/media/i2c/aptina-pll.c
index 8153a449846b..224ae4e4cf8b 100644
--- a/drivers/media/i2c/aptina-pll.c
+++ b/drivers/media/i2c/aptina-pll.c
@@ -11,11 +11,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/device.h>
diff --git a/drivers/media/i2c/aptina-pll.h b/drivers/media/i2c/aptina-pll.h
index b370e341e75d..1632f864c44f 100644
--- a/drivers/media/i2c/aptina-pll.h
+++ b/drivers/media/i2c/aptina-pll.h
@@ -11,11 +11,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __APTINA_PLL_H
diff --git a/drivers/media/i2c/as3645a.c b/drivers/media/i2c/as3645a.c
index 2e90e4094b79..b6aeceea9850 100644
--- a/drivers/media/i2c/as3645a.c
+++ b/drivers/media/i2c/as3645a.c
@@ -15,11 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
* TODO:
* - Check hardware FSTROBE control when sensor driver add support for this
*
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 7907bcfbaed3..472e37637c8d 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index 54c627859c8e..2c039ae7d0b2 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index c7de9790d4f3..03e80278dc10 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 59c1a98c5a90..fd70fe2130a1 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/i2c/cx25840/cx25840-audio.c b/drivers/media/i2c/cx25840/cx25840-audio.c
index baf3d9c8710e..dfe94b84f1fb 100644
--- a/drivers/media/i2c/cx25840/cx25840-audio.c
+++ b/drivers/media/i2c/cx25840/cx25840-audio.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 0dcf450052ac..b8d3c070bfc1 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -30,10 +30,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
index 254ef45ce41a..55432ed42714 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.h
+++ b/drivers/media/i2c/cx25840/cx25840-core.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _CX25840_CORE_H_
diff --git a/drivers/media/i2c/cx25840/cx25840-firmware.c b/drivers/media/i2c/cx25840/cx25840-firmware.c
index 37e052923a87..a7819c463674 100644
--- a/drivers/media/i2c/cx25840/cx25840-firmware.c
+++ b/drivers/media/i2c/cx25840/cx25840-firmware.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 15fbd9607cee..9b65c7d2fa84 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include <linux/slab.h>
diff --git a/drivers/media/i2c/cx25840/cx25840-vbi.c b/drivers/media/i2c/cx25840/cx25840-vbi.c
index 0470bb6128e1..8c99a79fb726 100644
--- a/drivers/media/i2c/cx25840/cx25840-vbi.c
+++ b/drivers/media/i2c/cx25840/cx25840-vbi.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
diff --git a/drivers/media/i2c/et8ek8/Kconfig b/drivers/media/i2c/et8ek8/Kconfig
new file mode 100644
index 000000000000..14399365ad7f
--- /dev/null
+++ b/drivers/media/i2c/et8ek8/Kconfig
@@ -0,0 +1,6 @@
+config VIDEO_ET8EK8
+ tristate "ET8EK8 camera sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
+ It is used for example in Nokia N900 (RX-51).
diff --git a/drivers/media/i2c/et8ek8/Makefile b/drivers/media/i2c/et8ek8/Makefile
new file mode 100644
index 000000000000..66d1b7d44946
--- /dev/null
+++ b/drivers/media/i2c/et8ek8/Makefile
@@ -0,0 +1,2 @@
+et8ek8-objs += et8ek8_mode.o et8ek8_driver.o
+obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8.o
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
new file mode 100644
index 000000000000..bec4a563a09c
--- /dev/null
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -0,0 +1,1514 @@
+/*
+ * et8ek8_driver.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ * Tuukka Toivonen <tuukkat76@gmail.com>
+ * Pavel Machek <pavel@ucw.cz>
+ *
+ * Based on code from Toni Leinonen <toni.leinonen@offcode.fi>.
+ *
+ * This driver is based on the Micron MT9T012 camera imager driver
+ * (C) Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/v4l2-mediabus.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "et8ek8_reg.h"
+
+#define ET8EK8_NAME "et8ek8"
+#define ET8EK8_PRIV_MEM_SIZE 128
+#define ET8EK8_MAX_MSG 48
+
+struct et8ek8_sensor {
+ struct v4l2_subdev subdev;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct gpio_desc *reset;
+ struct regulator *vana;
+ struct clk *ext_clk;
+ u32 xclk_freq;
+
+ u16 version;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *pixel_rate;
+ struct et8ek8_reglist *current_reglist;
+
+ u8 priv_mem[ET8EK8_PRIV_MEM_SIZE];
+
+ struct mutex power_lock;
+ int power_count;
+};
+
+#define to_et8ek8_sensor(sd) container_of(sd, struct et8ek8_sensor, subdev)
+
+enum et8ek8_versions {
+ ET8EK8_REV_1 = 0x0001,
+ ET8EK8_REV_2,
+};
+
+/*
+ * This table describes what should be written to the sensor register
+ * for each gain value. The gain(index in the table) is in terms of
+ * 0.1EV, i.e. 10 indexes in the table give 2 time more gain [0] in
+ * the *analog gain, [1] in the digital gain
+ *
+ * Analog gain [dB] = 20*log10(regvalue/32); 0x20..0x100
+ */
+static struct et8ek8_gain {
+ u16 analog;
+ u16 digital;
+} const et8ek8_gain_table[] = {
+ { 32, 0}, /* x1 */
+ { 34, 0},
+ { 37, 0},
+ { 39, 0},
+ { 42, 0},
+ { 45, 0},
+ { 49, 0},
+ { 52, 0},
+ { 56, 0},
+ { 60, 0},
+ { 64, 0}, /* x2 */
+ { 69, 0},
+ { 74, 0},
+ { 79, 0},
+ { 84, 0},
+ { 91, 0},
+ { 97, 0},
+ {104, 0},
+ {111, 0},
+ {119, 0},
+ {128, 0}, /* x4 */
+ {137, 0},
+ {147, 0},
+ {158, 0},
+ {169, 0},
+ {181, 0},
+ {194, 0},
+ {208, 0},
+ {223, 0},
+ {239, 0},
+ {256, 0}, /* x8 */
+ {256, 73},
+ {256, 152},
+ {256, 236},
+ {256, 327},
+ {256, 424},
+ {256, 528},
+ {256, 639},
+ {256, 758},
+ {256, 886},
+ {256, 1023}, /* x16 */
+};
+
+/* Register definitions */
+#define REG_REVISION_NUMBER_L 0x1200
+#define REG_REVISION_NUMBER_H 0x1201
+
+#define PRIV_MEM_START_REG 0x0008
+#define PRIV_MEM_WIN_SIZE 8
+
+#define ET8EK8_I2C_DELAY 3 /* msec delay b/w accesses */
+
+#define USE_CRC 1
+
+/*
+ * Register access helpers
+ *
+ * Read a 8/16/32-bit i2c register. The value is returned in 'val'.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int et8ek8_i2c_read_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u32 *val)
+{
+ int r;
+ struct i2c_msg msg;
+ unsigned char data[4];
+
+ if (!client->adapter)
+ return -ENODEV;
+ if (data_length != ET8EK8_REG_8BIT && data_length != ET8EK8_REG_16BIT)
+ return -EINVAL;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (reg >> 8);
+ data[1] = (u8) (reg & 0xff);
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r < 0)
+ goto err;
+
+ msg.len = data_length;
+ msg.flags = I2C_M_RD;
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r < 0)
+ goto err;
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == ET8EK8_REG_8BIT)
+ *val = data[0];
+ else
+ *val = (data[1] << 8) + data[0];
+
+ return 0;
+
+err:
+ dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r);
+
+ return r;
+}
+
+static void et8ek8_i2c_create_msg(struct i2c_client *client, u16 len, u16 reg,
+ u32 val, struct i2c_msg *msg,
+ unsigned char *buf)
+{
+ msg->addr = client->addr;
+ msg->flags = 0; /* Write */
+ msg->len = 2 + len;
+ msg->buf = buf;
+
+ /* high byte goes out first */
+ buf[0] = (u8) (reg >> 8);
+ buf[1] = (u8) (reg & 0xff);
+
+ switch (len) {
+ case ET8EK8_REG_8BIT:
+ buf[2] = (u8) (val) & 0xff;
+ break;
+ case ET8EK8_REG_16BIT:
+ buf[2] = (u8) (val) & 0xff;
+ buf[3] = (u8) (val >> 8) & 0xff;
+ break;
+ default:
+ WARN_ONCE(1, ET8EK8_NAME ": %s: invalid message length.\n",
+ __func__);
+ }
+}
+
+/*
+ * A buffered write method that puts the wanted register write
+ * commands in a message list and passes the list to the i2c framework
+ */
+static int et8ek8_i2c_buffered_write_regs(struct i2c_client *client,
+ const struct et8ek8_reg *wnext,
+ int cnt)
+{
+ struct i2c_msg msg[ET8EK8_MAX_MSG];
+ unsigned char data[ET8EK8_MAX_MSG][6];
+ int wcnt = 0;
+ u16 reg, data_length;
+ u32 val;
+
+ if (WARN_ONCE(cnt > ET8EK8_MAX_MSG,
+ ET8EK8_NAME ": %s: too many messages.\n", __func__)) {
+ return -EINVAL;
+ }
+
+ /* Create new write messages for all writes */
+ while (wcnt < cnt) {
+ data_length = wnext->type;
+ reg = wnext->reg;
+ val = wnext->val;
+ wnext++;
+
+ et8ek8_i2c_create_msg(client, data_length, reg,
+ val, &msg[wcnt], &data[wcnt][0]);
+
+ /* Update write count */
+ wcnt++;
+ }
+
+ /* Now we send everything ... */
+ return i2c_transfer(client->adapter, msg, wcnt);
+}
+
+/*
+ * Write a list of registers to i2c device.
+ *
+ * The list of registers is terminated by ET8EK8_REG_TERM.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int et8ek8_i2c_write_regs(struct i2c_client *client,
+ const struct et8ek8_reg *regs)
+{
+ int r, cnt = 0;
+ const struct et8ek8_reg *next;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ if (!regs)
+ return -EINVAL;
+
+ /* Initialize list pointers to the start of the list */
+ next = regs;
+
+ do {
+ /*
+ * We have to go through the list to figure out how
+ * many regular writes we have in a row
+ */
+ while (next->type != ET8EK8_REG_TERM &&
+ next->type != ET8EK8_REG_DELAY) {
+ /*
+ * Here we check that the actual length fields
+ * are valid
+ */
+ if (WARN(next->type != ET8EK8_REG_8BIT &&
+ next->type != ET8EK8_REG_16BIT,
+ "Invalid type = %d", next->type)) {
+ return -EINVAL;
+ }
+ /*
+ * Increment count of successive writes and
+ * read pointer
+ */
+ cnt++;
+ next++;
+ }
+
+ /* Now we start writing ... */
+ r = et8ek8_i2c_buffered_write_regs(client, regs, cnt);
+
+ /* ... and then check that everything was OK */
+ if (r < 0) {
+ dev_err(&client->dev, "i2c transfer error!\n");
+ return r;
+ }
+
+ /*
+ * If we ran into a sleep statement when going through
+ * the list, this is where we snooze for the required time
+ */
+ if (next->type == ET8EK8_REG_DELAY) {
+ msleep(next->val);
+ /*
+ * ZZZ ...
+ * Update list pointers and cnt and start over ...
+ */
+ next++;
+ regs = next;
+ cnt = 0;
+ }
+ } while (next->type != ET8EK8_REG_TERM);
+
+ return 0;
+}
+
+/*
+ * Write to a 8/16-bit register.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int et8ek8_i2c_write_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u32 val)
+{
+ int r;
+ struct i2c_msg msg;
+ unsigned char data[6];
+
+ if (!client->adapter)
+ return -ENODEV;
+ if (data_length != ET8EK8_REG_8BIT && data_length != ET8EK8_REG_16BIT)
+ return -EINVAL;
+
+ et8ek8_i2c_create_msg(client, data_length, reg, val, &msg, data);
+
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r < 0) {
+ dev_err(&client->dev,
+ "wrote 0x%x to offset 0x%x error %d\n", val, reg, r);
+ return r;
+ }
+
+ return 0;
+}
+
+static struct et8ek8_reglist *et8ek8_reglist_find_type(
+ struct et8ek8_meta_reglist *meta,
+ u16 type)
+{
+ struct et8ek8_reglist **next = &meta->reglist[0].ptr;
+
+ while (*next) {
+ if ((*next)->type == type)
+ return *next;
+
+ next++;
+ }
+
+ return NULL;
+}
+
+static int et8ek8_i2c_reglist_find_write(struct i2c_client *client,
+ struct et8ek8_meta_reglist *meta,
+ u16 type)
+{
+ struct et8ek8_reglist *reglist;
+
+ reglist = et8ek8_reglist_find_type(meta, type);
+ if (!reglist)
+ return -EINVAL;
+
+ return et8ek8_i2c_write_regs(client, reglist->regs);
+}
+
+static struct et8ek8_reglist **et8ek8_reglist_first(
+ struct et8ek8_meta_reglist *meta)
+{
+ return &meta->reglist[0].ptr;
+}
+
+static void et8ek8_reglist_to_mbus(const struct et8ek8_reglist *reglist,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = reglist->mode.window_width;
+ fmt->height = reglist->mode.window_height;
+ fmt->code = reglist->mode.bus_format;
+}
+
+static struct et8ek8_reglist *et8ek8_reglist_find_mode_fmt(
+ struct et8ek8_meta_reglist *meta,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct et8ek8_reglist **list = et8ek8_reglist_first(meta);
+ struct et8ek8_reglist *best_match = NULL;
+ struct et8ek8_reglist *best_other = NULL;
+ struct v4l2_mbus_framefmt format;
+ unsigned int max_dist_match = (unsigned int)-1;
+ unsigned int max_dist_other = (unsigned int)-1;
+
+ /*
+ * Find the mode with the closest image size. The distance between
+ * image sizes is the size in pixels of the non-overlapping regions
+ * between the requested size and the frame-specified size.
+ *
+ * Store both the closest mode that matches the requested format, and
+ * the closest mode for all other formats. The best match is returned
+ * if found, otherwise the best mode with a non-matching format is
+ * returned.
+ */
+ for (; *list; list++) {
+ unsigned int dist;
+
+ if ((*list)->type != ET8EK8_REGLIST_MODE)
+ continue;
+
+ et8ek8_reglist_to_mbus(*list, &format);
+
+ dist = min(fmt->width, format.width)
+ * min(fmt->height, format.height);
+ dist = format.width * format.height
+ + fmt->width * fmt->height - 2 * dist;
+
+
+ if (fmt->code == format.code) {
+ if (dist < max_dist_match || !best_match) {
+ best_match = *list;
+ max_dist_match = dist;
+ }
+ } else {
+ if (dist < max_dist_other || !best_other) {
+ best_other = *list;
+ max_dist_other = dist;
+ }
+ }
+ }
+
+ return best_match ? best_match : best_other;
+}
+
+#define TIMEPERFRAME_AVG_FPS(t) \
+ (((t).denominator + ((t).numerator >> 1)) / (t).numerator)
+
+static struct et8ek8_reglist *et8ek8_reglist_find_mode_ival(
+ struct et8ek8_meta_reglist *meta,
+ struct et8ek8_reglist *current_reglist,
+ struct v4l2_fract *timeperframe)
+{
+ int fps = TIMEPERFRAME_AVG_FPS(*timeperframe);
+ struct et8ek8_reglist **list = et8ek8_reglist_first(meta);
+ struct et8ek8_mode *current_mode = &current_reglist->mode;
+
+ for (; *list; list++) {
+ struct et8ek8_mode *mode = &(*list)->mode;
+
+ if ((*list)->type != ET8EK8_REGLIST_MODE)
+ continue;
+
+ if (mode->window_width != current_mode->window_width ||
+ mode->window_height != current_mode->window_height)
+ continue;
+
+ if (TIMEPERFRAME_AVG_FPS(mode->timeperframe) == fps)
+ return *list;
+ }
+
+ return NULL;
+}
+
+static int et8ek8_reglist_cmp(const void *a, const void *b)
+{
+ const struct et8ek8_reglist **list1 = (const struct et8ek8_reglist **)a,
+ **list2 = (const struct et8ek8_reglist **)b;
+
+ /* Put real modes in the beginning. */
+ if ((*list1)->type == ET8EK8_REGLIST_MODE &&
+ (*list2)->type != ET8EK8_REGLIST_MODE)
+ return -1;
+ if ((*list1)->type != ET8EK8_REGLIST_MODE &&
+ (*list2)->type == ET8EK8_REGLIST_MODE)
+ return 1;
+
+ /* Descending width. */
+ if ((*list1)->mode.window_width > (*list2)->mode.window_width)
+ return -1;
+ if ((*list1)->mode.window_width < (*list2)->mode.window_width)
+ return 1;
+
+ if ((*list1)->mode.window_height > (*list2)->mode.window_height)
+ return -1;
+ if ((*list1)->mode.window_height < (*list2)->mode.window_height)
+ return 1;
+
+ return 0;
+}
+
+static int et8ek8_reglist_import(struct i2c_client *client,
+ struct et8ek8_meta_reglist *meta)
+{
+ int nlists = 0, i;
+
+ dev_info(&client->dev, "meta_reglist version %s\n", meta->version);
+
+ while (meta->reglist[nlists].ptr)
+ nlists++;
+
+ if (!nlists)
+ return -EINVAL;
+
+ sort(&meta->reglist[0].ptr, nlists, sizeof(meta->reglist[0].ptr),
+ et8ek8_reglist_cmp, NULL);
+
+ i = nlists;
+ nlists = 0;
+
+ while (i--) {
+ struct et8ek8_reglist *list;
+
+ list = meta->reglist[nlists].ptr;
+
+ dev_dbg(&client->dev,
+ "%s: type %d\tw %d\th %d\tfmt %x\tival %d/%d\tptr %p\n",
+ __func__,
+ list->type,
+ list->mode.window_width, list->mode.window_height,
+ list->mode.bus_format,
+ list->mode.timeperframe.numerator,
+ list->mode.timeperframe.denominator,
+ (void *)meta->reglist[nlists].ptr);
+
+ nlists++;
+ }
+
+ return 0;
+}
+
+/* Called to change the V4L2 gain control value. This function
+ * rounds and clamps the given value and updates the V4L2 control value.
+ * If power is on, also updates the sensor analog and digital gains.
+ * gain is in 0.1 EV (exposure value) units.
+ */
+static int et8ek8_set_gain(struct et8ek8_sensor *sensor, s32 gain)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
+ struct et8ek8_gain new;
+ int r;
+
+ new = et8ek8_gain_table[gain];
+
+ /* FIXME: optimise I2C writes! */
+ r = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT,
+ 0x124a, new.analog >> 8);
+ if (r)
+ return r;
+ r = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT,
+ 0x1249, new.analog & 0xff);
+ if (r)
+ return r;
+
+ r = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT,
+ 0x124d, new.digital >> 8);
+ if (r)
+ return r;
+ r = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT,
+ 0x124c, new.digital & 0xff);
+
+ return r;
+}
+
+static int et8ek8_set_test_pattern(struct et8ek8_sensor *sensor, s32 mode)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
+ int cbh_mode, cbv_mode, tp_mode, din_sw, r1420, rval;
+
+ /* Values for normal mode */
+ cbh_mode = 0;
+ cbv_mode = 0;
+ tp_mode = 0;
+ din_sw = 0x00;
+ r1420 = 0xF0;
+
+ if (mode) {
+ /* Test pattern mode */
+ if (mode < 5) {
+ cbh_mode = 1;
+ cbv_mode = 1;
+ tp_mode = mode + 3;
+ } else {
+ cbh_mode = 0;
+ cbv_mode = 0;
+ tp_mode = mode - 4 + 3;
+ }
+
+ din_sw = 0x01;
+ r1420 = 0xE0;
+ }
+
+ rval = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x111B,
+ tp_mode << 4);
+ if (rval)
+ return rval;
+
+ rval = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x1121,
+ cbh_mode << 7);
+ if (rval)
+ return rval;
+
+ rval = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x1124,
+ cbv_mode << 7);
+ if (rval)
+ return rval;
+
+ rval = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x112C, din_sw);
+ if (rval)
+ return rval;
+
+ return et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x1420, r1420);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int et8ek8_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct et8ek8_sensor *sensor =
+ container_of(ctrl->handler, struct et8ek8_sensor, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ return et8ek8_set_gain(sensor, ctrl->val);
+
+ case V4L2_CID_EXPOSURE:
+ {
+ struct i2c_client *client =
+ v4l2_get_subdevdata(&sensor->subdev);
+
+ return et8ek8_i2c_write_reg(client, ET8EK8_REG_16BIT, 0x1243,
+ ctrl->val);
+ }
+
+ case V4L2_CID_TEST_PATTERN:
+ return et8ek8_set_test_pattern(sensor, ctrl->val);
+
+ case V4L2_CID_PIXEL_RATE:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops et8ek8_ctrl_ops = {
+ .s_ctrl = et8ek8_set_ctrl,
+};
+
+static const char * const et8ek8_test_pattern_menu[] = {
+ "Normal",
+ "Vertical colorbar",
+ "Horizontal colorbar",
+ "Scale",
+ "Ramp",
+ "Small vertical colorbar",
+ "Small horizontal colorbar",
+ "Small scale",
+ "Small ramp",
+};
+
+static int et8ek8_init_controls(struct et8ek8_sensor *sensor)
+{
+ s32 max_rows;
+
+ v4l2_ctrl_handler_init(&sensor->ctrl_handler, 4);
+
+ /* V4L2_CID_GAIN */
+ v4l2_ctrl_new_std(&sensor->ctrl_handler, &et8ek8_ctrl_ops,
+ V4L2_CID_GAIN, 0, ARRAY_SIZE(et8ek8_gain_table) - 1,
+ 1, 0);
+
+ max_rows = sensor->current_reglist->mode.max_exp;
+ {
+ u32 min = 1, max = max_rows;
+
+ sensor->exposure =
+ v4l2_ctrl_new_std(&sensor->ctrl_handler,
+ &et8ek8_ctrl_ops, V4L2_CID_EXPOSURE,
+ min, max, min, max);
+ }
+
+ /* V4L2_CID_PIXEL_RATE */
+ sensor->pixel_rate =
+ v4l2_ctrl_new_std(&sensor->ctrl_handler, &et8ek8_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 1, INT_MAX, 1, 1);
+
+ /* V4L2_CID_TEST_PATTERN */
+ v4l2_ctrl_new_std_menu_items(&sensor->ctrl_handler,
+ &et8ek8_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(et8ek8_test_pattern_menu) - 1,
+ 0, 0, et8ek8_test_pattern_menu);
+
+ if (sensor->ctrl_handler.error)
+ return sensor->ctrl_handler.error;
+
+ sensor->subdev.ctrl_handler = &sensor->ctrl_handler;
+
+ return 0;
+}
+
+static void et8ek8_update_controls(struct et8ek8_sensor *sensor)
+{
+ struct v4l2_ctrl *ctrl;
+ struct et8ek8_mode *mode = &sensor->current_reglist->mode;
+
+ u32 min, max, pixel_rate;
+ static const int S = 8;
+
+ ctrl = sensor->exposure;
+
+ min = 1;
+ max = mode->max_exp;
+
+ /*
+ * Calculate average pixel clock per line. Assume buffers can spread
+ * the data over horizontal blanking time. Rounding upwards.
+ * Formula taken from stock Nokia N900 kernel.
+ */
+ pixel_rate = ((mode->pixel_clock + (1 << S) - 1) >> S) + mode->width;
+ pixel_rate = mode->window_width * (pixel_rate - 1) / mode->width;
+
+ __v4l2_ctrl_modify_range(ctrl, min, max, min, max);
+ __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate, pixel_rate << S);
+}
+
+static int et8ek8_configure(struct et8ek8_sensor *sensor)
+{
+ struct v4l2_subdev *subdev = &sensor->subdev;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int rval;
+
+ rval = et8ek8_i2c_write_regs(client, sensor->current_reglist->regs);
+ if (rval)
+ goto fail;
+
+ /* Controls set while the power to the sensor is turned off are saved
+ * but not applied to the hardware. Now that we're about to start
+ * streaming apply all the current values to the hardware.
+ */
+ rval = v4l2_ctrl_handler_setup(&sensor->ctrl_handler);
+ if (rval)
+ goto fail;
+
+ return 0;
+
+fail:
+ dev_err(&client->dev, "sensor configuration failed\n");
+
+ return rval;
+}
+
+static int et8ek8_stream_on(struct et8ek8_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
+
+ return et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x1252, 0xb0);
+}
+
+static int et8ek8_stream_off(struct et8ek8_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
+
+ return et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x1252, 0x30);
+}
+
+static int et8ek8_s_stream(struct v4l2_subdev *subdev, int streaming)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ int ret;
+
+ if (!streaming)
+ return et8ek8_stream_off(sensor);
+
+ ret = et8ek8_configure(sensor);
+ if (ret < 0)
+ return ret;
+
+ return et8ek8_stream_on(sensor);
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static int et8ek8_power_off(struct et8ek8_sensor *sensor)
+{
+ gpiod_set_value(sensor->reset, 0);
+ udelay(1);
+
+ clk_disable_unprepare(sensor->ext_clk);
+
+ return regulator_disable(sensor->vana);
+}
+
+static int et8ek8_power_on(struct et8ek8_sensor *sensor)
+{
+ struct v4l2_subdev *subdev = &sensor->subdev;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ unsigned int xclk_freq;
+ int val, rval;
+
+ rval = regulator_enable(sensor->vana);
+ if (rval) {
+ dev_err(&client->dev, "failed to enable vana regulator\n");
+ return rval;
+ }
+
+ if (sensor->current_reglist)
+ xclk_freq = sensor->current_reglist->mode.ext_clock;
+ else
+ xclk_freq = sensor->xclk_freq;
+
+ rval = clk_set_rate(sensor->ext_clk, xclk_freq);
+ if (rval < 0) {
+ dev_err(&client->dev, "unable to set extclk clock freq to %u\n",
+ xclk_freq);
+ goto out;
+ }
+ rval = clk_prepare_enable(sensor->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev, "failed to enable extclk\n");
+ goto out;
+ }
+
+ if (rval)
+ goto out;
+
+ udelay(10); /* I wish this is a good value */
+
+ gpiod_set_value(sensor->reset, 1);
+
+ msleep(5000 * 1000 / xclk_freq + 1); /* Wait 5000 cycles */
+
+ rval = et8ek8_i2c_reglist_find_write(client, &meta_reglist,
+ ET8EK8_REGLIST_POWERON);
+ if (rval)
+ goto out;
+
+#ifdef USE_CRC
+ rval = et8ek8_i2c_read_reg(client, ET8EK8_REG_8BIT, 0x1263, &val);
+ if (rval)
+ goto out;
+#if USE_CRC /* TODO get crc setting from DT */
+ val |= BIT(4);
+#else
+ val &= ~BIT(4);
+#endif
+ rval = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x1263, val);
+ if (rval)
+ goto out;
+#endif
+
+out:
+ if (rval)
+ et8ek8_power_off(sensor);
+
+ return rval;
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2 subdev video operations
+ */
+#define MAX_FMTS 4
+static int et8ek8_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct et8ek8_reglist **list =
+ et8ek8_reglist_first(&meta_reglist);
+ u32 pixelformat[MAX_FMTS];
+ int npixelformat = 0;
+
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ for (; *list; list++) {
+ struct et8ek8_mode *mode = &(*list)->mode;
+ int i;
+
+ if ((*list)->type != ET8EK8_REGLIST_MODE)
+ continue;
+
+ for (i = 0; i < npixelformat; i++) {
+ if (pixelformat[i] == mode->bus_format)
+ break;
+ }
+ if (i != npixelformat)
+ continue;
+
+ if (code->index == npixelformat) {
+ code->code = mode->bus_format;
+ return 0;
+ }
+
+ pixelformat[npixelformat] = mode->bus_format;
+ npixelformat++;
+ }
+
+ return -EINVAL;
+}
+
+static int et8ek8_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct et8ek8_reglist **list =
+ et8ek8_reglist_first(&meta_reglist);
+ struct v4l2_mbus_framefmt format;
+ int cmp_width = INT_MAX;
+ int cmp_height = INT_MAX;
+ int index = fse->index;
+
+ for (; *list; list++) {
+ if ((*list)->type != ET8EK8_REGLIST_MODE)
+ continue;
+
+ et8ek8_reglist_to_mbus(*list, &format);
+ if (fse->code != format.code)
+ continue;
+
+ /* Assume that the modes are grouped by frame size. */
+ if (format.width == cmp_width && format.height == cmp_height)
+ continue;
+
+ cmp_width = format.width;
+ cmp_height = format.height;
+
+ if (index-- == 0) {
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int et8ek8_enum_frame_ival(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct et8ek8_reglist **list =
+ et8ek8_reglist_first(&meta_reglist);
+ struct v4l2_mbus_framefmt format;
+ int index = fie->index;
+
+ for (; *list; list++) {
+ struct et8ek8_mode *mode = &(*list)->mode;
+
+ if ((*list)->type != ET8EK8_REGLIST_MODE)
+ continue;
+
+ et8ek8_reglist_to_mbus(*list, &format);
+ if (fie->code != format.code)
+ continue;
+
+ if (fie->width != format.width || fie->height != format.height)
+ continue;
+
+ if (index-- == 0) {
+ fie->interval = mode->timeperframe;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct v4l2_mbus_framefmt *
+__et8ek8_get_pad_format(struct et8ek8_sensor *sensor,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&sensor->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &sensor->format;
+ default:
+ return NULL;
+ }
+}
+
+static int et8ek8_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __et8ek8_get_pad_format(sensor, cfg, fmt->pad, fmt->which);
+ if (!format)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int et8ek8_set_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct et8ek8_reglist *reglist;
+
+ format = __et8ek8_get_pad_format(sensor, cfg, fmt->pad, fmt->which);
+ if (!format)
+ return -EINVAL;
+
+ reglist = et8ek8_reglist_find_mode_fmt(&meta_reglist, &fmt->format);
+ et8ek8_reglist_to_mbus(reglist, &fmt->format);
+ *format = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sensor->current_reglist = reglist;
+ et8ek8_update_controls(sensor);
+ }
+
+ return 0;
+}
+
+static int et8ek8_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+
+ memset(fi, 0, sizeof(*fi));
+ fi->interval = sensor->current_reglist->mode.timeperframe;
+
+ return 0;
+}
+
+static int et8ek8_set_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ struct et8ek8_reglist *reglist;
+
+ reglist = et8ek8_reglist_find_mode_ival(&meta_reglist,
+ sensor->current_reglist,
+ &fi->interval);
+
+ if (!reglist)
+ return -EINVAL;
+
+ if (sensor->current_reglist->mode.ext_clock != reglist->mode.ext_clock)
+ return -EINVAL;
+
+ sensor->current_reglist = reglist;
+ et8ek8_update_controls(sensor);
+
+ return 0;
+}
+
+static int et8ek8_g_priv_mem(struct v4l2_subdev *subdev)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ unsigned int length = ET8EK8_PRIV_MEM_SIZE;
+ unsigned int offset = 0;
+ u8 *ptr = sensor->priv_mem;
+ int rval = 0;
+
+ /* Read the EEPROM window-by-window, each window 8 bytes */
+ do {
+ u8 buffer[PRIV_MEM_WIN_SIZE];
+ struct i2c_msg msg;
+ int bytes, i;
+ int ofs;
+
+ /* Set the current window */
+ rval = et8ek8_i2c_write_reg(client, ET8EK8_REG_8BIT, 0x0001,
+ 0xe0 | (offset >> 3));
+ if (rval < 0)
+ return rval;
+
+ /* Wait for status bit */
+ for (i = 0; i < 1000; ++i) {
+ u32 status;
+
+ rval = et8ek8_i2c_read_reg(client, ET8EK8_REG_8BIT,
+ 0x0003, &status);
+ if (rval < 0)
+ return rval;
+ if (!(status & 0x08))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (i == 1000)
+ return -EIO;
+
+ /* Read window, 8 bytes at once, and copy to user space */
+ ofs = offset & 0x07; /* Offset within this window */
+ bytes = length + ofs > 8 ? 8-ofs : length;
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = buffer;
+ ofs += PRIV_MEM_START_REG;
+ buffer[0] = (u8)(ofs >> 8);
+ buffer[1] = (u8)(ofs & 0xFF);
+
+ rval = i2c_transfer(client->adapter, &msg, 1);
+ if (rval < 0)
+ return rval;
+
+ mdelay(ET8EK8_I2C_DELAY);
+ msg.addr = client->addr;
+ msg.len = bytes;
+ msg.flags = I2C_M_RD;
+ msg.buf = buffer;
+ memset(buffer, 0, sizeof(buffer));
+
+ rval = i2c_transfer(client->adapter, &msg, 1);
+ if (rval < 0)
+ return rval;
+
+ rval = 0;
+ memcpy(ptr, buffer, bytes);
+
+ length -= bytes;
+ offset += bytes;
+ ptr += bytes;
+ } while (length > 0);
+
+ return rval;
+}
+
+static int et8ek8_dev_init(struct v4l2_subdev *subdev)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int rval, rev_l, rev_h;
+
+ rval = et8ek8_power_on(sensor);
+ if (rval) {
+ dev_err(&client->dev, "could not power on\n");
+ return rval;
+ }
+
+ rval = et8ek8_i2c_read_reg(client, ET8EK8_REG_8BIT,
+ REG_REVISION_NUMBER_L, &rev_l);
+ if (!rval)
+ rval = et8ek8_i2c_read_reg(client, ET8EK8_REG_8BIT,
+ REG_REVISION_NUMBER_H, &rev_h);
+ if (rval) {
+ dev_err(&client->dev, "no et8ek8 sensor detected\n");
+ goto out_poweroff;
+ }
+
+ sensor->version = (rev_h << 8) + rev_l;
+ if (sensor->version != ET8EK8_REV_1 && sensor->version != ET8EK8_REV_2)
+ dev_info(&client->dev,
+ "unknown version 0x%x detected, continuing anyway\n",
+ sensor->version);
+
+ rval = et8ek8_reglist_import(client, &meta_reglist);
+ if (rval) {
+ dev_err(&client->dev,
+ "invalid register list %s, import failed\n",
+ ET8EK8_NAME);
+ goto out_poweroff;
+ }
+
+ sensor->current_reglist = et8ek8_reglist_find_type(&meta_reglist,
+ ET8EK8_REGLIST_MODE);
+ if (!sensor->current_reglist) {
+ dev_err(&client->dev,
+ "invalid register list %s, no mode found\n",
+ ET8EK8_NAME);
+ rval = -ENODEV;
+ goto out_poweroff;
+ }
+
+ et8ek8_reglist_to_mbus(sensor->current_reglist, &sensor->format);
+
+ rval = et8ek8_i2c_reglist_find_write(client, &meta_reglist,
+ ET8EK8_REGLIST_POWERON);
+ if (rval) {
+ dev_err(&client->dev,
+ "invalid register list %s, no POWERON mode found\n",
+ ET8EK8_NAME);
+ goto out_poweroff;
+ }
+ rval = et8ek8_stream_on(sensor); /* Needed to be able to read EEPROM */
+ if (rval)
+ goto out_poweroff;
+ rval = et8ek8_g_priv_mem(subdev);
+ if (rval)
+ dev_warn(&client->dev,
+ "can not read OTP (EEPROM) memory from sensor\n");
+ rval = et8ek8_stream_off(sensor);
+ if (rval)
+ goto out_poweroff;
+
+ rval = et8ek8_power_off(sensor);
+ if (rval)
+ goto out_poweroff;
+
+ return 0;
+
+out_poweroff:
+ et8ek8_power_off(sensor);
+
+ return rval;
+}
+
+/* --------------------------------------------------------------------------
+ * sysfs attributes
+ */
+static ssize_t
+et8ek8_priv_mem_read(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+
+#if PAGE_SIZE < ET8EK8_PRIV_MEM_SIZE
+#error PAGE_SIZE too small!
+#endif
+
+ memcpy(buf, sensor->priv_mem, ET8EK8_PRIV_MEM_SIZE);
+
+ return ET8EK8_PRIV_MEM_SIZE;
+}
+static DEVICE_ATTR(priv_mem, 0444, et8ek8_priv_mem_read, NULL);
+
+/* --------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+static int
+et8ek8_registered(struct v4l2_subdev *subdev)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int rval;
+
+ dev_dbg(&client->dev, "registered!");
+
+ rval = device_create_file(&client->dev, &dev_attr_priv_mem);
+ if (rval) {
+ dev_err(&client->dev, "could not register sysfs entry\n");
+ return rval;
+ }
+
+ rval = et8ek8_dev_init(subdev);
+ if (rval)
+ goto err_file;
+
+ rval = et8ek8_init_controls(sensor);
+ if (rval) {
+ dev_err(&client->dev, "controls initialization failed\n");
+ goto err_file;
+ }
+
+ __et8ek8_get_pad_format(sensor, NULL, 0, V4L2_SUBDEV_FORMAT_ACTIVE);
+
+ return 0;
+
+err_file:
+ device_remove_file(&client->dev, &dev_attr_priv_mem);
+
+ return rval;
+}
+
+static int __et8ek8_set_power(struct et8ek8_sensor *sensor, bool on)
+{
+ return on ? et8ek8_power_on(sensor) : et8ek8_power_off(sensor);
+}
+
+static int et8ek8_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ int ret = 0;
+
+ mutex_lock(&sensor->power_lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (sensor->power_count == !on) {
+ ret = __et8ek8_set_power(sensor, !!on);
+ if (ret < 0)
+ goto done;
+ }
+
+ /* Update the power count. */
+ sensor->power_count += on ? 1 : -1;
+ WARN_ON(sensor->power_count < 0);
+
+done:
+ mutex_unlock(&sensor->power_lock);
+
+ return ret;
+}
+
+static int et8ek8_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct et8ek8_reglist *reglist;
+
+ reglist = et8ek8_reglist_find_type(&meta_reglist, ET8EK8_REGLIST_MODE);
+ format = __et8ek8_get_pad_format(sensor, fh->pad, 0,
+ V4L2_SUBDEV_FORMAT_TRY);
+ et8ek8_reglist_to_mbus(reglist, format);
+
+ return et8ek8_set_power(sd, true);
+}
+
+static int et8ek8_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return et8ek8_set_power(sd, false);
+}
+
+static const struct v4l2_subdev_video_ops et8ek8_video_ops = {
+ .s_stream = et8ek8_s_stream,
+ .g_frame_interval = et8ek8_get_frame_interval,
+ .s_frame_interval = et8ek8_set_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops et8ek8_core_ops = {
+ .s_power = et8ek8_set_power,
+};
+
+static const struct v4l2_subdev_pad_ops et8ek8_pad_ops = {
+ .enum_mbus_code = et8ek8_enum_mbus_code,
+ .enum_frame_size = et8ek8_enum_frame_size,
+ .enum_frame_interval = et8ek8_enum_frame_ival,
+ .get_fmt = et8ek8_get_pad_format,
+ .set_fmt = et8ek8_set_pad_format,
+};
+
+static const struct v4l2_subdev_ops et8ek8_ops = {
+ .core = &et8ek8_core_ops,
+ .video = &et8ek8_video_ops,
+ .pad = &et8ek8_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops et8ek8_internal_ops = {
+ .registered = et8ek8_registered,
+ .open = et8ek8_open,
+ .close = et8ek8_close,
+};
+
+/* --------------------------------------------------------------------------
+ * I2C driver
+ */
+static int __maybe_unused et8ek8_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+
+ if (!sensor->power_count)
+ return 0;
+
+ return __et8ek8_set_power(sensor, false);
+}
+
+static int __maybe_unused et8ek8_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+
+ if (!sensor->power_count)
+ return 0;
+
+ return __et8ek8_set_power(sensor, true);
+}
+
+static int et8ek8_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct et8ek8_sensor *sensor;
+ struct device *dev = &client->dev;
+ int ret;
+
+ sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(sensor->reset)) {
+ dev_dbg(&client->dev, "could not request reset gpio\n");
+ return PTR_ERR(sensor->reset);
+ }
+
+ sensor->vana = devm_regulator_get(dev, "vana");
+ if (IS_ERR(sensor->vana)) {
+ dev_err(&client->dev, "could not get regulator for vana\n");
+ return PTR_ERR(sensor->vana);
+ }
+
+ sensor->ext_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->ext_clk)) {
+ dev_err(&client->dev, "could not get clock\n");
+ return PTR_ERR(sensor->ext_clk);
+ }
+
+ ret = of_property_read_u32(dev->of_node, "clock-frequency",
+ &sensor->xclk_freq);
+ if (ret) {
+ dev_warn(dev, "can't get clock-frequency\n");
+ return ret;
+ }
+
+ mutex_init(&sensor->power_lock);
+
+ v4l2_i2c_subdev_init(&sensor->subdev, client, &et8ek8_ops);
+ sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->subdev.internal_ops = &et8ek8_internal_ops;
+
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sensor->subdev.entity, 1, &sensor->pad);
+ if (ret < 0) {
+ dev_err(&client->dev, "media entity init failed!\n");
+ goto err_mutex;
+ }
+
+ ret = v4l2_async_register_subdev(&sensor->subdev);
+ if (ret < 0)
+ goto err_entity;
+
+ dev_dbg(dev, "initialized!\n");
+
+ return 0;
+
+err_entity:
+ media_entity_cleanup(&sensor->subdev.entity);
+err_mutex:
+ mutex_destroy(&sensor->power_lock);
+ return ret;
+}
+
+static int __exit et8ek8_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+
+ if (sensor->power_count) {
+ WARN_ON(1);
+ et8ek8_power_off(sensor);
+ sensor->power_count = 0;
+ }
+
+ v4l2_device_unregister_subdev(&sensor->subdev);
+ device_remove_file(&client->dev, &dev_attr_priv_mem);
+ v4l2_ctrl_handler_free(&sensor->ctrl_handler);
+ v4l2_async_unregister_subdev(&sensor->subdev);
+ media_entity_cleanup(&sensor->subdev.entity);
+ mutex_destroy(&sensor->power_lock);
+
+ return 0;
+}
+
+static const struct of_device_id et8ek8_of_table[] = {
+ { .compatible = "toshiba,et8ek8" },
+ { },
+};
+
+static const struct i2c_device_id et8ek8_id_table[] = {
+ { ET8EK8_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
+
+static const struct dev_pm_ops et8ek8_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume)
+};
+
+static struct i2c_driver et8ek8_i2c_driver = {
+ .driver = {
+ .name = ET8EK8_NAME,
+ .pm = &et8ek8_pm_ops,
+ .of_match_table = et8ek8_of_table,
+ },
+ .probe = et8ek8_probe,
+ .remove = __exit_p(et8ek8_remove),
+ .id_table = et8ek8_id_table,
+};
+
+module_i2c_driver(et8ek8_i2c_driver);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>, Pavel Machek <pavel@ucw.cz");
+MODULE_DESCRIPTION("Toshiba ET8EK8 camera sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/et8ek8/et8ek8_mode.c b/drivers/media/i2c/et8ek8/et8ek8_mode.c
new file mode 100644
index 000000000000..a79882a83885
--- /dev/null
+++ b/drivers/media/i2c/et8ek8/et8ek8_mode.c
@@ -0,0 +1,587 @@
+/*
+ * et8ek8_mode.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ * Tuukka Toivonen <tuukkat76@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "et8ek8_reg.h"
+
+/*
+ * Stingray sensor mode settings for Scooby
+ */
+
+/* Mode1_poweron_Mode2_16VGA_2592x1968_12.07fps */
+static struct et8ek8_reglist mode1_poweron_mode2_16vga_2592x1968_12_07fps = {
+/* (without the +1)
+ * SPCK = 80 MHz
+ * CCP2 = 640 MHz
+ * VCO = 640 MHz
+ * VCOUNT = 84 (2016)
+ * HCOUNT = 137 (3288)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 200
+ * VCO_DIV = 0
+ * SPCK_DIV = 7
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 0
+ */
+ .type = ET8EK8_REGLIST_POWERON,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 3288,
+ .height = 2016,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 2592,
+ .window_height = 1968,
+ .pixel_clock = 80000000,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 1207
+ },
+ .max_exp = 2012,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sensitivity = 65536
+ },
+ .regs = {
+ /* Need to set firstly */
+ { ET8EK8_REG_8BIT, 0x126C, 0xCC },
+ /* Strobe and Data of CCP2 delay are minimized. */
+ { ET8EK8_REG_8BIT, 0x1269, 0x00 },
+ /* Refined value of Min H_COUNT */
+ { ET8EK8_REG_8BIT, 0x1220, 0x89 },
+ /* Frequency of SPCK setting (SPCK=MRCK) */
+ { ET8EK8_REG_8BIT, 0x123A, 0x07 },
+ { ET8EK8_REG_8BIT, 0x1241, 0x94 },
+ { ET8EK8_REG_8BIT, 0x1242, 0x02 },
+ { ET8EK8_REG_8BIT, 0x124B, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1255, 0xFF },
+ { ET8EK8_REG_8BIT, 0x1256, 0x9F },
+ { ET8EK8_REG_8BIT, 0x1258, 0x00 },
+ /* From parallel out to serial out */
+ { ET8EK8_REG_8BIT, 0x125D, 0x88 },
+ /* From w/ embeded data to w/o embeded data */
+ { ET8EK8_REG_8BIT, 0x125E, 0xC0 },
+ /* CCP2 out is from STOP to ACTIVE */
+ { ET8EK8_REG_8BIT, 0x1263, 0x98 },
+ { ET8EK8_REG_8BIT, 0x1268, 0xC6 },
+ { ET8EK8_REG_8BIT, 0x1434, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1163, 0x44 },
+ { ET8EK8_REG_8BIT, 0x1166, 0x29 },
+ { ET8EK8_REG_8BIT, 0x1140, 0x02 },
+ { ET8EK8_REG_8BIT, 0x1011, 0x24 },
+ { ET8EK8_REG_8BIT, 0x1151, 0x80 },
+ { ET8EK8_REG_8BIT, 0x1152, 0x23 },
+ /* Initial setting for improvement2 of lower frequency noise */
+ { ET8EK8_REG_8BIT, 0x1014, 0x05 },
+ { ET8EK8_REG_8BIT, 0x1033, 0x06 },
+ { ET8EK8_REG_8BIT, 0x1034, 0x79 },
+ { ET8EK8_REG_8BIT, 0x1423, 0x3F },
+ { ET8EK8_REG_8BIT, 0x1424, 0x3F },
+ { ET8EK8_REG_8BIT, 0x1426, 0x00 },
+ /* Switch of Preset-White-balance (0d:disable / 1d:enable) */
+ { ET8EK8_REG_8BIT, 0x1439, 0x00 },
+ /* Switch of blemish correction (0d:disable / 1d:enable) */
+ { ET8EK8_REG_8BIT, 0x161F, 0x60 },
+ /* Switch of auto noise correction (0d:disable / 1d:enable) */
+ { ET8EK8_REG_8BIT, 0x1634, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1646, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1648, 0x00 },
+ { ET8EK8_REG_8BIT, 0x113E, 0x01 },
+ { ET8EK8_REG_8BIT, 0x113F, 0x22 },
+ { ET8EK8_REG_8BIT, 0x1239, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1238, 0x02 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x70 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x07 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x64 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1220, 0x89 },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0x54 },
+ { ET8EK8_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ */
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode1_16VGA_2592x1968_13.12fps_DPCM10-8 */
+static struct et8ek8_reglist mode1_16vga_2592x1968_13_12fps_dpcm10_8 = {
+/* (without the +1)
+ * SPCK = 80 MHz
+ * CCP2 = 560 MHz
+ * VCO = 560 MHz
+ * VCOUNT = 84 (2016)
+ * HCOUNT = 128 (3072)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 175
+ * VCO_DIV = 0
+ * SPCK_DIV = 6
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 0
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 3072,
+ .height = 2016,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 2592,
+ .window_height = 1968,
+ .pixel_clock = 80000000,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 1292
+ },
+ .max_exp = 2012,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x57 },
+ { ET8EK8_REG_8BIT, 0x1238, 0x82 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x70 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x06 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x64 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1220, 0x80 }, /* <-changed to v14 7E->80 */
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0x54 },
+ { ET8EK8_REG_8BIT, 0x125D, 0x83 }, /* CCP_LVDS_MODE/ */
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode3_4VGA_1296x984_29.99fps_DPCM10-8 */
+static struct et8ek8_reglist mode3_4vga_1296x984_29_99fps_dpcm10_8 = {
+/* (without the +1)
+ * SPCK = 96.5333333333333 MHz
+ * CCP2 = 579.2 MHz
+ * VCO = 579.2 MHz
+ * VCOUNT = 84 (2016)
+ * HCOUNT = 133 (3192)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 181
+ * VCO_DIV = 0
+ * SPCK_DIV = 5
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 0
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 3192,
+ .height = 1008,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 1296,
+ .window_height = 984,
+ .pixel_clock = 96533333,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 3000
+ },
+ .max_exp = 1004,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x5A },
+ { ET8EK8_REG_8BIT, 0x1238, 0x82 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x70 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x05 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x63 },
+ { ET8EK8_REG_8BIT, 0x1220, 0x85 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0x54 },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x63 },
+ { ET8EK8_REG_8BIT, 0x125D, 0x83 }, /* CCP_LVDS_MODE/ */
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode4_SVGA_864x656_29.88fps */
+static struct et8ek8_reglist mode4_svga_864x656_29_88fps = {
+/* (without the +1)
+ * SPCK = 80 MHz
+ * CCP2 = 320 MHz
+ * VCO = 640 MHz
+ * VCOUNT = 84 (2016)
+ * HCOUNT = 166 (3984)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 200
+ * VCO_DIV = 0
+ * SPCK_DIV = 7
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 1
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 3984,
+ .height = 672,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 864,
+ .window_height = 656,
+ .pixel_clock = 80000000,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 2988
+ },
+ .max_exp = 668,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1238, 0x02 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x71 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x07 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x62 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x62 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1220, 0xA6 },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0x54 },
+ { ET8EK8_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ */
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode5_VGA_648x492_29.93fps */
+static struct et8ek8_reglist mode5_vga_648x492_29_93fps = {
+/* (without the +1)
+ * SPCK = 80 MHz
+ * CCP2 = 320 MHz
+ * VCO = 640 MHz
+ * VCOUNT = 84 (2016)
+ * HCOUNT = 221 (5304)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 200
+ * VCO_DIV = 0
+ * SPCK_DIV = 7
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 1
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 5304,
+ .height = 504,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 648,
+ .window_height = 492,
+ .pixel_clock = 80000000,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 2993
+ },
+ .max_exp = 500,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1238, 0x02 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x71 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x07 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x61 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x61 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1220, 0xDD },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0x54 },
+ { ET8EK8_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ */
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode2_16VGA_2592x1968_3.99fps */
+static struct et8ek8_reglist mode2_16vga_2592x1968_3_99fps = {
+/* (without the +1)
+ * SPCK = 80 MHz
+ * CCP2 = 640 MHz
+ * VCO = 640 MHz
+ * VCOUNT = 254 (6096)
+ * HCOUNT = 137 (3288)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 200
+ * VCO_DIV = 0
+ * SPCK_DIV = 7
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 0
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 3288,
+ .height = 6096,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 2592,
+ .window_height = 1968,
+ .pixel_clock = 80000000,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 399
+ },
+ .max_exp = 6092,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1238, 0x02 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x70 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x07 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x64 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1220, 0x89 },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0xFE },
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode_648x492_5fps */
+static struct et8ek8_reglist mode_648x492_5fps = {
+/* (without the +1)
+ * SPCK = 13.3333333333333 MHz
+ * CCP2 = 53.3333333333333 MHz
+ * VCO = 640 MHz
+ * VCOUNT = 84 (2016)
+ * HCOUNT = 221 (5304)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 200
+ * VCO_DIV = 5
+ * SPCK_DIV = 7
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 1
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 5304,
+ .height = 504,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 648,
+ .window_height = 492,
+ .pixel_clock = 13333333,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 499
+ },
+ .max_exp = 500,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x64 },
+ { ET8EK8_REG_8BIT, 0x1238, 0x02 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x71 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x57 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x61 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x61 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1220, 0xDD },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0x54 },
+ { ET8EK8_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ */
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode3_4VGA_1296x984_5fps */
+static struct et8ek8_reglist mode3_4vga_1296x984_5fps = {
+/* (without the +1)
+ * SPCK = 49.4 MHz
+ * CCP2 = 395.2 MHz
+ * VCO = 790.4 MHz
+ * VCOUNT = 250 (6000)
+ * HCOUNT = 137 (3288)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 247
+ * VCO_DIV = 1
+ * SPCK_DIV = 7
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 0
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 3288,
+ .height = 3000,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 1296,
+ .window_height = 984,
+ .pixel_clock = 49400000,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 501
+ },
+ .max_exp = 2996,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x7B },
+ { ET8EK8_REG_8BIT, 0x1238, 0x82 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x70 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x17 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x63 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x63 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1220, 0x89 },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0xFA },
+ { ET8EK8_REG_8BIT, 0x125D, 0x88 }, /* CCP_LVDS_MODE/ */
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+/* Mode_4VGA_1296x984_25fps_DPCM10-8 */
+static struct et8ek8_reglist mode_4vga_1296x984_25fps_dpcm10_8 = {
+/* (without the +1)
+ * SPCK = 84.2666666666667 MHz
+ * CCP2 = 505.6 MHz
+ * VCO = 505.6 MHz
+ * VCOUNT = 88 (2112)
+ * HCOUNT = 133 (3192)
+ * CKREF_DIV = 2
+ * CKVAR_DIV = 158
+ * VCO_DIV = 0
+ * SPCK_DIV = 5
+ * MRCK_DIV = 7
+ * LVDSCK_DIV = 0
+ */
+ .type = ET8EK8_REGLIST_MODE,
+ .mode = {
+ .sensor_width = 2592,
+ .sensor_height = 1968,
+ .sensor_window_origin_x = 0,
+ .sensor_window_origin_y = 0,
+ .sensor_window_width = 2592,
+ .sensor_window_height = 1968,
+ .width = 3192,
+ .height = 1056,
+ .window_origin_x = 0,
+ .window_origin_y = 0,
+ .window_width = 1296,
+ .window_height = 984,
+ .pixel_clock = 84266667,
+ .ext_clock = 9600000,
+ .timeperframe = {
+ .numerator = 100,
+ .denominator = 2500
+ },
+ .max_exp = 1052,
+ /* .max_gain = 0, */
+ .bus_format = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .sensitivity = 65536
+ },
+ .regs = {
+ { ET8EK8_REG_8BIT, 0x1239, 0x4F },
+ { ET8EK8_REG_8BIT, 0x1238, 0x02 },
+ { ET8EK8_REG_8BIT, 0x123B, 0x70 },
+ { ET8EK8_REG_8BIT, 0x123A, 0x05 },
+ { ET8EK8_REG_8BIT, 0x121B, 0x63 },
+ { ET8EK8_REG_8BIT, 0x1220, 0x85 },
+ { ET8EK8_REG_8BIT, 0x1221, 0x00 },
+ { ET8EK8_REG_8BIT, 0x1222, 0x58 },
+ { ET8EK8_REG_8BIT, 0x1223, 0x00 },
+ { ET8EK8_REG_8BIT, 0x121D, 0x63 },
+ { ET8EK8_REG_8BIT, 0x125D, 0x83 },
+ { ET8EK8_REG_TERM, 0, 0}
+ }
+};
+
+struct et8ek8_meta_reglist meta_reglist = {
+ .version = "V14 03-June-2008",
+ .reglist = {
+ { .ptr = &mode1_poweron_mode2_16vga_2592x1968_12_07fps },
+ { .ptr = &mode1_16vga_2592x1968_13_12fps_dpcm10_8 },
+ { .ptr = &mode3_4vga_1296x984_29_99fps_dpcm10_8 },
+ { .ptr = &mode4_svga_864x656_29_88fps },
+ { .ptr = &mode5_vga_648x492_29_93fps },
+ { .ptr = &mode2_16vga_2592x1968_3_99fps },
+ { .ptr = &mode_648x492_5fps },
+ { .ptr = &mode3_4vga_1296x984_5fps },
+ { .ptr = &mode_4vga_1296x984_25fps_dpcm10_8 },
+ { .ptr = NULL }
+ }
+};
diff --git a/drivers/media/i2c/et8ek8/et8ek8_reg.h b/drivers/media/i2c/et8ek8/et8ek8_reg.h
new file mode 100644
index 000000000000..07f1873a9c3d
--- /dev/null
+++ b/drivers/media/i2c/et8ek8/et8ek8_reg.h
@@ -0,0 +1,96 @@
+/*
+ * et8ek8_reg.h
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ * Tuukka Toivonen <tuukkat76@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef ET8EK8REGS_H
+#define ET8EK8REGS_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-subdev.h>
+
+struct v4l2_mbus_framefmt;
+struct v4l2_subdev_pad_mbus_code_enum;
+
+struct et8ek8_mode {
+ /* Physical sensor resolution and current image window */
+ u16 sensor_width;
+ u16 sensor_height;
+ u16 sensor_window_origin_x;
+ u16 sensor_window_origin_y;
+ u16 sensor_window_width;
+ u16 sensor_window_height;
+
+ /* Image data coming from sensor (after scaling) */
+ u16 width;
+ u16 height;
+ u16 window_origin_x;
+ u16 window_origin_y;
+ u16 window_width;
+ u16 window_height;
+
+ u32 pixel_clock; /* in Hz */
+ u32 ext_clock; /* in Hz */
+ struct v4l2_fract timeperframe;
+ u32 max_exp; /* Maximum exposure value */
+ u32 bus_format; /* MEDIA_BUS_FMT_ */
+ u32 sensitivity; /* 16.16 fixed point */
+};
+
+#define ET8EK8_REG_8BIT 1
+#define ET8EK8_REG_16BIT 2
+#define ET8EK8_REG_DELAY 100
+#define ET8EK8_REG_TERM 0xff
+struct et8ek8_reg {
+ u16 type;
+ u16 reg; /* 16-bit offset */
+ u32 val; /* 8/16/32-bit value */
+};
+
+/* Possible struct smia_reglist types. */
+#define ET8EK8_REGLIST_STANDBY 0
+#define ET8EK8_REGLIST_POWERON 1
+#define ET8EK8_REGLIST_RESUME 2
+#define ET8EK8_REGLIST_STREAMON 3
+#define ET8EK8_REGLIST_STREAMOFF 4
+#define ET8EK8_REGLIST_DISABLED 5
+
+#define ET8EK8_REGLIST_MODE 10
+
+#define ET8EK8_REGLIST_LSC_ENABLE 100
+#define ET8EK8_REGLIST_LSC_DISABLE 101
+#define ET8EK8_REGLIST_ANR_ENABLE 102
+#define ET8EK8_REGLIST_ANR_DISABLE 103
+
+struct et8ek8_reglist {
+ u32 type;
+ struct et8ek8_mode mode;
+ struct et8ek8_reg regs[];
+};
+
+#define ET8EK8_MAX_LEN 32
+struct et8ek8_meta_reglist {
+ char version[ET8EK8_MAX_LEN];
+ union {
+ struct et8ek8_reglist *ptr;
+ } reglist[];
+};
+
+extern struct et8ek8_meta_reglist meta_reglist;
+
+#endif /* ET8EK8REGS */
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index cede3975d04b..cee7fd9cf08b 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -29,10 +29,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <asm/unaligned.h>
@@ -428,7 +424,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
* If platform_data doesn't specify rc_dev, initialize it
* internally
*/
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc)
return -ENOMEM;
}
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index 77551baab068..ab536c4a7115 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*****************************************************************************
*
* Modified and extended by
diff --git a/drivers/media/i2c/ks0127.h b/drivers/media/i2c/ks0127.h
index cb8abd5403b3..636b70a984f7 100644
--- a/drivers/media/i2c/ks0127.h
+++ b/drivers/media/i2c/ks0127.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef KS0127_H
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index 89c28c36c5bf..a7a8f9a4e45c 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index acb804bceccb..9ccb5ee55fa9 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -168,7 +168,7 @@ static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
msg[1].buf = rbuf;
/* minimum stabilization time */
- usleep_range(200, 200);
+ usleep_range(200, 300);
ret = i2c_transfer(client->adapter, msg, 2);
@@ -268,7 +268,8 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
*buf = m5mols_swap_byte((u8 *)&val, size);
- usleep_range(200, 200);
+ /* minimum stabilization time */
+ usleep_range(200, 300);
ret = i2c_transfer(client->adapter, msg, 1);
if (ret == 1)
@@ -651,7 +652,7 @@ static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
+static const struct v4l2_subdev_pad_ops m5mols_pad_ops = {
.enum_mbus_code = m5mols_enum_mbus_code,
.get_fmt = m5mols_get_fmt,
.set_fmt = m5mols_set_fmt,
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 38a20fe181ee..57ef901edb06 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -290,7 +290,7 @@ static const struct v4l2_ctrl_ops ml86v7667_ctrl_ops = {
.s_ctrl = ml86v7667_s_ctrl,
};
-static struct v4l2_subdev_video_ops ml86v7667_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops ml86v7667_subdev_video_ops = {
.g_std = ml86v7667_g_std,
.s_std = ml86v7667_s_std,
.querystd = ml86v7667_querystd,
@@ -304,14 +304,14 @@ static const struct v4l2_subdev_pad_ops ml86v7667_subdev_pad_ops = {
.set_fmt = ml86v7667_fill_fmt,
};
-static struct v4l2_subdev_core_ops ml86v7667_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops ml86v7667_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ml86v7667_g_register,
.s_register = ml86v7667_s_register,
#endif
};
-static struct v4l2_subdev_ops ml86v7667_subdev_ops = {
+static const struct v4l2_subdev_ops ml86v7667_subdev_ops = {
.core = &ml86v7667_subdev_core_ops,
.video = &ml86v7667_subdev_video_ops,
.pad = &ml86v7667_subdev_pad_ops,
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 201a9800ea52..3db966db83eb 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -39,11 +39,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c
index eec7aa4c6f98..11fc593ed908 100644
--- a/drivers/media/i2c/msp3400-kthreads.c
+++ b/drivers/media/i2c/msp3400-kthreads.c
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index da076796999e..6a9e068462fd 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -13,11 +13,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/delay.h>
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 237737fec09c..91d822fc4443 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -972,15 +972,15 @@ static int mt9p031_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
return mt9p031_set_power(subdev, 0);
}
-static struct v4l2_subdev_core_ops mt9p031_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9p031_subdev_core_ops = {
.s_power = mt9p031_set_power,
};
-static struct v4l2_subdev_video_ops mt9p031_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9p031_subdev_video_ops = {
.s_stream = mt9p031_s_stream,
};
-static struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
+static const struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
.enum_mbus_code = mt9p031_enum_mbus_code,
.enum_frame_size = mt9p031_enum_frame_size,
.get_fmt = mt9p031_get_format,
@@ -989,7 +989,7 @@ static struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
.set_selection = mt9p031_set_selection,
};
-static struct v4l2_subdev_ops mt9p031_subdev_ops = {
+static const struct v4l2_subdev_ops mt9p031_subdev_ops = {
.core = &mt9p031_subdev_core_ops,
.video = &mt9p031_subdev_video_ops,
.pad = &mt9p031_subdev_pad_ops,
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 58eb62f1ba21..2e7a6e62a358 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -266,8 +266,7 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
struct regmap *map = mt9v032->regmap;
int ret;
- if (mt9v032->reset_gpio)
- gpiod_set_value_cansleep(mt9v032->reset_gpio, 1);
+ gpiod_set_value_cansleep(mt9v032->reset_gpio, 1);
ret = clk_set_rate(mt9v032->clk, mt9v032->sysclk);
if (ret < 0)
@@ -936,15 +935,15 @@ static int mt9v032_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
return mt9v032_set_power(subdev, 0);
}
-static struct v4l2_subdev_core_ops mt9v032_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9v032_subdev_core_ops = {
.s_power = mt9v032_set_power,
};
-static struct v4l2_subdev_video_ops mt9v032_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9v032_subdev_video_ops = {
.s_stream = mt9v032_s_stream,
};
-static struct v4l2_subdev_pad_ops mt9v032_subdev_pad_ops = {
+static const struct v4l2_subdev_pad_ops mt9v032_subdev_pad_ops = {
.enum_mbus_code = mt9v032_enum_mbus_code,
.enum_frame_size = mt9v032_enum_frame_size,
.get_fmt = mt9v032_get_format,
@@ -953,7 +952,7 @@ static struct v4l2_subdev_pad_ops mt9v032_subdev_pad_ops = {
.set_selection = mt9v032_set_selection,
};
-static struct v4l2_subdev_ops mt9v032_subdev_ops = {
+static const struct v4l2_subdev_ops mt9v032_subdev_ops = {
.core = &mt9v032_subdev_core_ops,
.video = &mt9v032_subdev_video_ops,
.pad = &mt9v032_subdev_pad_ops,
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 30cb90b88d75..88c498ad45df 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -664,13 +664,13 @@ static const struct v4l2_subdev_core_ops noon010_core_ops = {
.log_status = noon010_log_status,
};
-static struct v4l2_subdev_pad_ops noon010_pad_ops = {
+static const struct v4l2_subdev_pad_ops noon010_pad_ops = {
.enum_mbus_code = noon010_enum_mbus_code,
.get_fmt = noon010_get_fmt,
.set_fmt = noon010_set_fmt,
};
-static struct v4l2_subdev_video_ops noon010_video_ops = {
+static const struct v4l2_subdev_video_ops noon010_video_ops = {
.s_stream = noon010_s_stream,
};
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 1f999e9c0118..6e6367214d40 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1121,7 +1121,6 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
return -EINVAL;
mf->colorspace = V4L2_COLORSPACE_SRGB;
- mf->code = ov2659_formats[index].code;
mf->field = V4L2_FIELD_NONE;
mutex_lock(&ov2659->lock);
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index b8961df5af33..a03b41a3639e 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/init.h>
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 502c72238a4a..2de2fbb13b85 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -522,7 +522,7 @@ static void __ov965x_set_power(struct ov965x *ov965x, int on)
if (on) {
ov965x_gpio_set(ov965x->gpios[GPIO_PWDN], 0);
ov965x_gpio_set(ov965x->gpios[GPIO_RST], 0);
- usleep_range(25000, 26000);
+ msleep(25);
} else {
ov965x_gpio_set(ov965x->gpios[GPIO_RST], 1);
ov965x_gpio_set(ov965x->gpios[GPIO_PWDN], 1);
@@ -1438,7 +1438,7 @@ static int ov965x_detect_sensor(struct v4l2_subdev *sd)
mutex_lock(&ov965x->lock);
__ov965x_set_power(ov965x, 1);
- usleep_range(25000, 26000);
+ msleep(25);
/* Check sensor revision */
ret = ov965x_read(client, REG_PID, &pid);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
index 0a060339e516..2e7185030741 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
@@ -211,7 +211,7 @@ static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
}
if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
- ret = s5c73m3_af_run(state, ~af_lock);
+ ret = s5c73m3_af_run(state, !af_lock);
return ret;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 769964057881..67dcca76f981 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -165,7 +165,7 @@ static int s5k6a3_get_fmt(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_pad_ops s5k6a3_pad_ops = {
+static const struct v4l2_subdev_pad_ops s5k6a3_pad_ops = {
.enum_mbus_code = s5k6a3_enum_mbus_code,
.get_fmt = s5k6a3_get_fmt,
.set_fmt = s5k6a3_set_fmt,
@@ -266,11 +266,11 @@ static int s5k6a3_s_power(struct v4l2_subdev *sd, int on)
return ret;
}
-static struct v4l2_subdev_core_ops s5k6a3_core_ops = {
+static const struct v4l2_subdev_core_ops s5k6a3_core_ops = {
.s_power = s5k6a3_s_power,
};
-static struct v4l2_subdev_ops s5k6a3_subdev_ops = {
+static const struct v4l2_subdev_ops s5k6a3_subdev_ops = {
.core = &s5k6a3_core_ops,
.pad = &s5k6a3_pad_ops,
};
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index ad456ce051f9..63fe521752a1 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 58062b41c923..d863b04aa2a8 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -31,10 +31,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "saa711x_regs.h"
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 8d94dcbf4366..99c303002e90 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -41,10 +41,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 1baca37f3eb6..e1f6bc219c64 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -24,10 +24,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index 119050e1197a..0e27fafaef2d 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 59872b31f832..f4e92bdfe192 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
* I2C Driver
*/
-#ifdef CONFIG_PM
-
-static int smiapp_suspend(struct device *dev)
+static int __maybe_unused smiapp_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
return 0;
}
-static int smiapp_resume(struct device *dev)
+static int __maybe_unused smiapp_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
return rval;
}
-#else
-
-#define smiapp_suspend NULL
-#define smiapp_resume NULL
-
-#endif /* CONFIG_PM */
-
static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
{
struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
if (IS_ERR(sensor->xshutdown))
return PTR_ERR(sensor->xshutdown);
- pm_runtime_enable(&client->dev);
-
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- rval = -ENODEV;
- goto out_power_off;
- }
+ rval = smiapp_power_on(&client->dev);
+ if (rval < 0)
+ return rval;
rval = smiapp_identify_module(sensor);
if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
if (rval < 0)
goto out_media_entity_cleanup;
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
smiapp_cleanup(sensor);
out_power_off:
- pm_runtime_put(&client->dev);
- pm_runtime_disable(&client->dev);
+ smiapp_power_off(&client->dev);
return rval;
}
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
- pm_runtime_suspend(&client->dev);
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ smiapp_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
for (i = 0; i < sensor->ssds_used; i++) {
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index 8c93c57af71c..65085a235128 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -233,7 +233,7 @@ static int ov9640_reg_rmw(struct i2c_client *client, u8 reg, u8 set, u8 unset)
if (ret) {
dev_err(&client->dev,
"[Read]-Modify-Write of register %02x failed!\n", reg);
- return val;
+ return ret;
}
val |= set;
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index 6b1a04ffad32..a9c067bcc0ac 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 1e3a0dd2238c..f569a05fe105 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -96,6 +96,7 @@ struct tc358743_state {
struct v4l2_dv_timings timings;
u32 mbus_fmt_code;
+ u8 csi_lanes_in_use;
struct gpio_desc *reset_gpio;
};
@@ -287,11 +288,6 @@ static int get_audio_sampling_rate(struct v4l2_subdev *sd)
return code_to_rate[i2c_rd8(sd, FS_SET) & MASK_FS];
}
-static unsigned tc358743_num_csi_lanes_in_use(struct v4l2_subdev *sd)
-{
- return ((i2c_rd32(sd, CSI_CONTROL) & MASK_NOL) >> 1) + 1;
-}
-
/* --------------- TIMINGS --------------- */
static inline unsigned fps(const struct v4l2_bt_timings *t)
@@ -372,29 +368,21 @@ static void tc358743_set_hdmi_hdcp(struct v4l2_subdev *sd, bool enable)
v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ?
"enable" : "disable");
- i2c_wr8_and_or(sd, HDCP_REG1,
- ~(MASK_AUTH_UNAUTH_SEL | MASK_AUTH_UNAUTH),
- MASK_AUTH_UNAUTH_SEL_16_FRAMES | MASK_AUTH_UNAUTH_AUTO);
+ if (enable) {
+ i2c_wr8_and_or(sd, HDCP_REG3, ~KEY_RD_CMD, KEY_RD_CMD);
- i2c_wr8_and_or(sd, HDCP_REG2, ~MASK_AUTO_P3_RESET,
- SET_AUTO_P3_RESET_FRAMES(0x0f));
+ i2c_wr8_and_or(sd, HDCP_MODE, ~MASK_MANUAL_AUTHENTICATION, 0);
- /* HDCP is disabled by configuring the receiver as HDCP repeater. The
- * repeater mode require software support to work, so HDCP
- * authentication will fail.
- */
- i2c_wr8_and_or(sd, HDCP_REG3, ~KEY_RD_CMD, enable ? KEY_RD_CMD : 0);
- i2c_wr8_and_or(sd, HDCP_MODE, ~(MASK_AUTO_CLR | MASK_MODE_RST_TN),
- enable ? (MASK_AUTO_CLR | MASK_MODE_RST_TN) : 0);
+ i2c_wr8_and_or(sd, HDCP_REG1, 0xff,
+ MASK_AUTH_UNAUTH_SEL_16_FRAMES |
+ MASK_AUTH_UNAUTH_AUTO);
- /* Apple MacBook Pro gen.8 has a bug that makes it freeze every fifth
- * second when HDCP is disabled, but the MAX_EXCED bit is handled
- * correctly and HDCP is disabled on the HDMI output.
- */
- i2c_wr8_and_or(sd, BSTATUS1, ~MASK_MAX_EXCED,
- enable ? 0 : MASK_MAX_EXCED);
- i2c_wr8_and_or(sd, BCAPS, ~(MASK_REPEATER | MASK_READY),
- enable ? 0 : MASK_REPEATER | MASK_READY);
+ i2c_wr8_and_or(sd, HDCP_REG2, ~MASK_AUTO_P3_RESET,
+ SET_AUTO_P3_RESET_FRAMES(0x0f));
+ } else {
+ i2c_wr8_and_or(sd, HDCP_MODE, ~MASK_MANUAL_AUTHENTICATION,
+ MASK_MANUAL_AUTHENTICATION);
+ }
}
static void tc358743_disable_edid(struct v4l2_subdev *sd)
@@ -416,6 +404,7 @@ static void tc358743_enable_edid(struct v4l2_subdev *sd)
if (state->edid_blocks_written == 0) {
v4l2_dbg(2, debug, sd, "%s: no EDID -> no hotplug\n", __func__);
+ tc358743_s_ctrl_detect_tx_5v(sd);
return;
}
@@ -683,6 +672,8 @@ static void tc358743_set_csi(struct v4l2_subdev *sd)
v4l2_dbg(3, debug, sd, "%s:\n", __func__);
+ state->csi_lanes_in_use = lanes;
+
tc358743_reset(sd, MASK_CTXRST);
if (lanes < 1)
@@ -1155,7 +1146,7 @@ static int tc358743_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Lanes needed: %d\n",
tc358743_num_csi_lanes_needed(sd));
v4l2_info(sd, "Lanes in use: %d\n",
- tc358743_num_csi_lanes_in_use(sd));
+ state->csi_lanes_in_use);
v4l2_info(sd, "Waiting for particular sync signal: %s\n",
(i2c_rd16(sd, CSI_STATUS) & MASK_S_WSYNC) ?
"yes" : "no");
@@ -1438,12 +1429,14 @@ static int tc358743_dv_timings_cap(struct v4l2_subdev *sd,
static int tc358743_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
+ struct tc358743_state *state = to_state(sd);
+
cfg->type = V4L2_MBUS_CSI2;
/* Support for non-continuous CSI-2 clock is missing in the driver */
cfg->flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
- switch (tc358743_num_csi_lanes_in_use(sd)) {
+ switch (state->csi_lanes_in_use) {
case 1:
cfg->flags |= V4L2_MBUS_CSI2_1_LANE;
break;
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
index 81f1db558e7c..657ef50f215f 100644
--- a/drivers/media/i2c/tc358743_regs.h
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -420,6 +420,7 @@
#define MASK_MODE_RST_TN 0x20
#define MASK_LINE_REKEY 0x10
#define MASK_AUTO_CLR 0x04
+#define MASK_MANUAL_AUTHENTICATION 0x02 /* Not in REF_01 */
#define HDCP_REG1 0x8563 /* Not in REF_01 */
#define MASK_AUTH_UNAUTH_SEL 0x70
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index cc6104da34ef..6ac26986f6a2 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -17,10 +17,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 0c62899c3667..07853d2252aa 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -23,10 +23,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/i2c.h>
diff --git a/drivers/media/i2c/tvp514x_regs.h b/drivers/media/i2c/tvp514x_regs.h
index d23aa2fbb9b1..1e6c0857590e 100644
--- a/drivers/media/i2c/tvp514x_regs.h
+++ b/drivers/media/i2c/tvp514x_regs.h
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _TVP514X_REGS_H
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3a0fe8cc64e9..48646a7f3fb0 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
- /* Svideo should enable YCrCb output and disable GPCL output
- * For Composite and TV, it should be the reverse
+ /*
+ * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
+ * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
+ * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
+ * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
+ * INTREQ/GPCL/VBLK to logic 1.
*/
val = tvp5150_read(sd, TVP5150_MISC_CTL);
if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
}
if (decoder->input == TVP5150_SVIDEO)
- val = (val & ~0x40) | 0x10;
+ val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
else
- val = (val & ~0x10) | 0x40;
+ val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
tvp5150_write(sd, TVP5150_MISC_CTL, val);
};
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
},{ /* Automatic offset and AGC enabled */
TVP5150_ANAL_CHL_CTL, 0x15
},{ /* Activate YCrCb output 0x9 or 0xd ? */
- TVP5150_MISC_CTL, 0x6f
+ TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
+ TVP5150_MISC_CTL_INTREQ_OE |
+ TVP5150_MISC_CTL_YCBCR_OE |
+ TVP5150_MISC_CTL_SYNC_OE |
+ TVP5150_MISC_CTL_VBLANK |
+ TVP5150_MISC_CTL_CLOCK_OE,
},{ /* Activates video std autodetection for all standards */
TVP5150_AUTOSW_MSK, 0x0
},{ /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f = &format->format;
- tvp5150_reset(sd, 0);
-
f->width = decoder->rect.width;
f->height = decoder->rect.height / 2;
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
{
struct tvp5150 *decoder = to_tvp5150(sd);
- /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
- int val = 0x09;
-
- /* Output format: 8-bit 4:2:2 YUV with discrete sync */
- if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
- val = 0x0d;
+ int val;
- /* Initializes TVP5150 to its default values */
- /* # set PCLK (27MHz) */
- tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+ /* Enable or disable the video output signals. */
+ val = tvp5150_read(sd, TVP5150_MISC_CTL);
+ if (val < 0)
+ return val;
+
+ val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
+ TVP5150_MISC_CTL_CLOCK_OE);
+
+ if (enable) {
+ /*
+ * Enable the YCbCr and clock outputs. In discrete sync mode
+ * (non-BT.656) additionally enable the the sync outputs.
+ */
+ val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
+ if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+ val |= TVP5150_MISC_CTL_SYNC_OE;
+ }
- if (enable)
- tvp5150_write(sd, TVP5150_MISC_CTL, val);
- else
- tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+ tvp5150_write(sd, TVP5150_MISC_CTL, val);
return 0;
}
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
res = core->hdl.error;
goto err;
}
- v4l2_ctrl_handler_setup(&core->hdl);
/* Default is no cropping */
core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
core->rect.left = 0;
core->rect.width = TVP5150_H_MAX;
+ tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
+
res = v4l2_async_register_subdev(sd);
if (res < 0)
goto err;
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 25a994944918..30a48c28d05a 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -9,6 +9,15 @@
#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
+#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
+#define TVP5150_MISC_CTL_GPCL BIT(6)
+#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
+#define TVP5150_MISC_CTL_HVLK BIT(4)
+#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
+#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
+#define TVP5150_MISC_CTL_VBLANK BIT(1)
+#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
+
#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
/* Reserved 05h */
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 3dc3341c4896..4c1190127c85 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/media/i2c/tvp7002_reg.h b/drivers/media/i2c/tvp7002_reg.h
index 0e34ca9bccf3..933673561fa2 100644
--- a/drivers/media/i2c/tvp7002_reg.h
+++ b/drivers/media/i2c/tvp7002_reg.h
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Naming conventions
diff --git a/drivers/media/i2c/tw2804.c b/drivers/media/i2c/tw2804.c
index 7347480c0b0c..bc8a3ecebffb 100644
--- a/drivers/media/i2c/tw2804.c
+++ b/drivers/media/i2c/tw2804.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index bef79cf74364..af32db3d7408 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index 316a3113ef27..5081307b2cdb 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c
index 8e17a83920d4..eb0084ebe35e 100644
--- a/drivers/media/i2c/uda1342.c
+++ b/drivers/media/i2c/uda1342.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index c03567e993cd..7ad5d51dfbc3 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 77f122f2e3c9..c7fdd7c163cb 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index ef0d8b8e3df7..c6611a3f2b3d 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index ce9f09370e22..67de79b2d550 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index be4cb7a8bdeb..f0741ab338df 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
diff --git a/drivers/media/i2c/vs6624_regs.h b/drivers/media/i2c/vs6624_regs.h
index 6ba2ee25827e..f78e7d1087a4 100644
--- a/drivers/media/i2c/vs6624_regs.h
+++ b/drivers/media/i2c/vs6624_regs.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _VS6624_REGS_H_
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index c885def54b15..23464d0641fe 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 45039d756753..704bccf0d4b2 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 8756275e9fc4..760e3e424e23 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* We need to access legacy defines from linux/media.h */
@@ -130,7 +126,7 @@ static long media_device_enum_entities(struct media_device *mdev,
* old range.
*/
if (ent->function < MEDIA_ENT_F_OLD_BASE ||
- ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
+ ent->function > MEDIA_ENT_F_TUNER) {
if (is_media_entity_v4l2_subdev(ent))
entd->type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
else if (ent->function != MEDIA_ENT_F_IO_V4L)
@@ -601,19 +597,19 @@ int __must_check media_device_register_entity(struct media_device *mdev,
if (mdev->entity_internal_idx_max
>= mdev->pm_count_walk.ent_enum.idx_max) {
- struct media_entity_graph new = { .top = 0 };
+ struct media_graph new = { .top = 0 };
/*
* Initialise the new graph walk before cleaning up
* the old one in order not to spoil the graph walk
* object of the media device if graph walk init fails.
*/
- ret = media_entity_graph_walk_init(&new, mdev);
+ ret = media_graph_walk_init(&new, mdev);
if (ret) {
mutex_unlock(&mdev->graph_mutex);
return ret;
}
- media_entity_graph_walk_cleanup(&mdev->pm_count_walk);
+ media_graph_walk_cleanup(&mdev->pm_count_walk);
mdev->pm_count_walk = new;
}
mutex_unlock(&mdev->graph_mutex);
@@ -695,7 +691,7 @@ void media_device_cleanup(struct media_device *mdev)
{
ida_destroy(&mdev->entity_internal_idx);
mdev->entity_internal_idx_max = 0;
- media_entity_graph_walk_cleanup(&mdev->pm_count_walk);
+ media_graph_walk_cleanup(&mdev->pm_count_walk);
mutex_destroy(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_cleanup);
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index f2772ba6f611..ae46753c90cb 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* --
*
* Generic media device node infrastructure to register and unregister
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index f9f723f5e4f0..5640ca29da8c 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/bitmap.h>
@@ -258,7 +254,7 @@ media_entity_other(struct media_entity *entity, struct media_link *link)
}
/* push an entity to traversal stack */
-static void stack_push(struct media_entity_graph *graph,
+static void stack_push(struct media_graph *graph,
struct media_entity *entity)
{
if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) {
@@ -270,7 +266,7 @@ static void stack_push(struct media_entity_graph *graph,
graph->stack[graph->top].entity = entity;
}
-static struct media_entity *stack_pop(struct media_entity_graph *graph)
+static struct media_entity *stack_pop(struct media_graph *graph)
{
struct media_entity *entity;
@@ -289,35 +285,35 @@ static struct media_entity *stack_pop(struct media_entity_graph *graph)
#define MEDIA_ENTITY_MAX_PADS 512
/**
- * media_entity_graph_walk_init - Allocate resources for graph walk
+ * media_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Media device
*
* Reserve resources for graph walk in media device's current
* state. The memory must be released using
- * media_entity_graph_walk_free().
+ * media_graph_walk_free().
*
* Returns error on failure, zero on success.
*/
-__must_check int media_entity_graph_walk_init(
- struct media_entity_graph *graph, struct media_device *mdev)
+__must_check int media_graph_walk_init(
+ struct media_graph *graph, struct media_device *mdev)
{
return media_entity_enum_init(&graph->ent_enum, mdev);
}
-EXPORT_SYMBOL_GPL(media_entity_graph_walk_init);
+EXPORT_SYMBOL_GPL(media_graph_walk_init);
/**
- * media_entity_graph_walk_cleanup - Release resources related to graph walking
+ * media_graph_walk_cleanup - Release resources related to graph walking
* @graph: Media graph structure that was used to walk the graph
*/
-void media_entity_graph_walk_cleanup(struct media_entity_graph *graph)
+void media_graph_walk_cleanup(struct media_graph *graph)
{
media_entity_enum_cleanup(&graph->ent_enum);
}
-EXPORT_SYMBOL_GPL(media_entity_graph_walk_cleanup);
+EXPORT_SYMBOL_GPL(media_graph_walk_cleanup);
-void media_entity_graph_walk_start(struct media_entity_graph *graph,
- struct media_entity *entity)
+void media_graph_walk_start(struct media_graph *graph,
+ struct media_entity *entity)
{
media_entity_enum_zero(&graph->ent_enum);
media_entity_enum_set(&graph->ent_enum, entity);
@@ -325,12 +321,52 @@ void media_entity_graph_walk_start(struct media_entity_graph *graph,
graph->top = 0;
graph->stack[graph->top].entity = NULL;
stack_push(graph, entity);
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "begin graph walk at '%s'\n", entity->name);
}
-EXPORT_SYMBOL_GPL(media_entity_graph_walk_start);
+EXPORT_SYMBOL_GPL(media_graph_walk_start);
-struct media_entity *
-media_entity_graph_walk_next(struct media_entity_graph *graph)
+static void media_graph_walk_iter(struct media_graph *graph)
{
+ struct media_entity *entity = stack_top(graph);
+ struct media_link *link;
+ struct media_entity *next;
+
+ link = list_entry(link_top(graph), typeof(*link), list);
+
+ /* The link is not enabled so we do not follow. */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ link_top(graph) = link_top(graph)->next;
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "walk: skipping disabled link '%s':%u -> '%s':%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+ return;
+ }
+
+ /* Get the entity in the other end of the link . */
+ next = media_entity_other(entity, link);
+
+ /* Has the entity already been visited? */
+ if (media_entity_enum_test_and_set(&graph->ent_enum, next)) {
+ link_top(graph) = link_top(graph)->next;
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "walk: skipping entity '%s' (already seen)\n",
+ next->name);
+ return;
+ }
+
+ /* Push the new entity to stack and start over. */
+ link_top(graph) = link_top(graph)->next;
+ stack_push(graph, next);
+ dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n",
+ next->name);
+}
+
+struct media_entity *media_graph_walk_next(struct media_graph *graph)
+{
+ struct media_entity *entity;
+
if (stack_top(graph) == NULL)
return NULL;
@@ -339,59 +375,39 @@ media_entity_graph_walk_next(struct media_entity_graph *graph)
* top of the stack until no more entities on the level can be
* found.
*/
- while (link_top(graph) != &stack_top(graph)->links) {
- struct media_entity *entity = stack_top(graph);
- struct media_link *link;
- struct media_entity *next;
-
- link = list_entry(link_top(graph), typeof(*link), list);
-
- /* The link is not enabled so we do not follow. */
- if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
- link_top(graph) = link_top(graph)->next;
- continue;
- }
+ while (link_top(graph) != &stack_top(graph)->links)
+ media_graph_walk_iter(graph);
- /* Get the entity in the other end of the link . */
- next = media_entity_other(entity, link);
+ entity = stack_pop(graph);
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "walk: returning entity '%s'\n", entity->name);
- /* Has the entity already been visited? */
- if (media_entity_enum_test_and_set(&graph->ent_enum, next)) {
- link_top(graph) = link_top(graph)->next;
- continue;
- }
-
- /* Push the new entity to stack and start over. */
- link_top(graph) = link_top(graph)->next;
- stack_push(graph, next);
- }
-
- return stack_pop(graph);
+ return entity;
}
-EXPORT_SYMBOL_GPL(media_entity_graph_walk_next);
+EXPORT_SYMBOL_GPL(media_graph_walk_next);
/* -----------------------------------------------------------------------------
* Pipeline management
*/
-__must_check int __media_entity_pipeline_start(struct media_entity *entity,
- struct media_pipeline *pipe)
+__must_check int __media_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
{
struct media_device *mdev = entity->graph_obj.mdev;
- struct media_entity_graph *graph = &pipe->graph;
+ struct media_graph *graph = &pipe->graph;
struct media_entity *entity_err = entity;
struct media_link *link;
int ret;
if (!pipe->streaming_count++) {
- ret = media_entity_graph_walk_init(&pipe->graph, mdev);
+ ret = media_graph_walk_init(&pipe->graph, mdev);
if (ret)
goto error_graph_walk_start;
}
- media_entity_graph_walk_start(&pipe->graph, entity);
+ media_graph_walk_start(&pipe->graph, entity);
- while ((entity = media_entity_graph_walk_next(graph))) {
+ while ((entity = media_graph_walk_next(graph))) {
DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS);
DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS);
@@ -441,7 +457,7 @@ __must_check int __media_entity_pipeline_start(struct media_entity *entity,
ret = entity->ops->link_validate(link);
if (ret < 0 && ret != -ENOIOCTLCMD) {
dev_dbg(entity->graph_obj.mdev->dev,
- "link validation failed for \"%s\":%u -> \"%s\":%u, error %d\n",
+ "link validation failed for '%s':%u -> '%s':%u, error %d\n",
link->source->entity->name,
link->source->index,
entity->name, link->sink->index, ret);
@@ -455,7 +471,7 @@ __must_check int __media_entity_pipeline_start(struct media_entity *entity,
if (!bitmap_full(active, entity->num_pads)) {
ret = -ENOLINK;
dev_dbg(entity->graph_obj.mdev->dev,
- "\"%s\":%u must be connected by an enabled link\n",
+ "'%s':%u must be connected by an enabled link\n",
entity->name,
(unsigned)find_first_zero_bit(
active, entity->num_pads));
@@ -470,11 +486,11 @@ error:
* Link validation on graph failed. We revert what we did and
* return the error.
*/
- media_entity_graph_walk_start(graph, entity_err);
+ media_graph_walk_start(graph, entity_err);
- while ((entity_err = media_entity_graph_walk_next(graph))) {
- /* don't let the stream_count go negative */
- if (entity->stream_count > 0) {
+ while ((entity_err = media_graph_walk_next(graph))) {
+ /* Sanity check for negative stream_count */
+ if (!WARN_ON_ONCE(entity_err->stream_count <= 0)) {
entity_err->stream_count--;
if (entity_err->stream_count == 0)
entity_err->pipe = NULL;
@@ -490,37 +506,37 @@ error:
error_graph_walk_start:
if (!--pipe->streaming_count)
- media_entity_graph_walk_cleanup(graph);
+ media_graph_walk_cleanup(graph);
return ret;
}
-EXPORT_SYMBOL_GPL(__media_entity_pipeline_start);
+EXPORT_SYMBOL_GPL(__media_pipeline_start);
-__must_check int media_entity_pipeline_start(struct media_entity *entity,
- struct media_pipeline *pipe)
+__must_check int media_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
{
struct media_device *mdev = entity->graph_obj.mdev;
int ret;
mutex_lock(&mdev->graph_mutex);
- ret = __media_entity_pipeline_start(entity, pipe);
+ ret = __media_pipeline_start(entity, pipe);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(media_entity_pipeline_start);
+EXPORT_SYMBOL_GPL(media_pipeline_start);
-void __media_entity_pipeline_stop(struct media_entity *entity)
+void __media_pipeline_stop(struct media_entity *entity)
{
- struct media_entity_graph *graph = &entity->pipe->graph;
+ struct media_graph *graph = &entity->pipe->graph;
struct media_pipeline *pipe = entity->pipe;
WARN_ON(!pipe->streaming_count);
- media_entity_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, entity);
- while ((entity = media_entity_graph_walk_next(graph))) {
- /* don't let the stream_count go negative */
- if (entity->stream_count > 0) {
+ while ((entity = media_graph_walk_next(graph))) {
+ /* Sanity check for negative stream_count */
+ if (!WARN_ON_ONCE(entity->stream_count <= 0)) {
entity->stream_count--;
if (entity->stream_count == 0)
entity->pipe = NULL;
@@ -528,20 +544,20 @@ void __media_entity_pipeline_stop(struct media_entity *entity)
}
if (!--pipe->streaming_count)
- media_entity_graph_walk_cleanup(graph);
+ media_graph_walk_cleanup(graph);
}
-EXPORT_SYMBOL_GPL(__media_entity_pipeline_stop);
+EXPORT_SYMBOL_GPL(__media_pipeline_stop);
-void media_entity_pipeline_stop(struct media_entity *entity)
+void media_pipeline_stop(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
mutex_lock(&mdev->graph_mutex);
- __media_entity_pipeline_stop(entity);
+ __media_pipeline_stop(entity);
mutex_unlock(&mdev->graph_mutex);
}
-EXPORT_SYMBOL_GPL(media_entity_pipeline_stop);
+EXPORT_SYMBOL_GPL(media_pipeline_stop);
/* -----------------------------------------------------------------------------
* Module use count
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 99ce28442a75..6e60decb2198 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -157,7 +157,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
if (v.irq_20c.Data_receiver_error)
deb_chk("data receiver error\n");
if (v.irq_20c.Continuity_error_flag)
- deb_chk("Contunuity error flag is set\n");
+ deb_chk("Continuity error flag is set\n");
if (v.irq_20c.LLC_SNAP_FLAG_set)
deb_chk("LLC_SNAP_FLAG_set is set\n");
if (v.irq_20c.Transport_Error)
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 4da720e4867e..2fd07a8afcd2 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -424,7 +420,7 @@ int bttv_input_init(struct bttv *btv)
return -ENODEV;
ir = kzalloc(sizeof(*ir),GFP_KERNEL);
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!ir || !rc)
goto err_out_free;
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 8681b9143a35..04d06c564602 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -475,16 +475,14 @@ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message
static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
- int i = 0;
-
- u32 command = 0;
+ int i;
+ u32 command;
struct ca_msg *hw_buffer;
int result = 0;
- if ((hw_buffer = kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) {
- dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
+ hw_buffer = kmalloc(sizeof(*hw_buffer), GFP_KERNEL);
+ if (!hw_buffer)
return -ENOMEM;
- }
dprintk(verbose, DST_CA_DEBUG, 1, " ");
if (copy_from_user(p_ca_message, arg, sizeof (struct ca_msg))) {
@@ -567,7 +565,6 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
p_ca_slot_info = kmalloc(sizeof (struct ca_slot_info), GFP_KERNEL);
p_ca_caps = kmalloc(sizeof (struct ca_caps), GFP_KERNEL);
if (!p_ca_message || !p_ca_slot_info || !p_ca_caps) {
- dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
result = -ENOMEM;
goto free_mem_and_exit;
}
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 6100fa71ece8..ad617871ce9b 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -683,6 +679,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
/* DST is not a frontend, attaching the ASIC */
if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
pr_err("%s: Could not find a Twinhan DST\n", __func__);
+ kfree(state);
break;
}
/* Attach other DST peripherals if any */
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.h b/drivers/media/pci/bt8xx/dvb-bt8xx.h
index 4499ed2ac0ed..0ec538e23b4e 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.h
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef DVB_BT8XX_H
diff --git a/drivers/media/pci/cobalt/cobalt-cpld.c b/drivers/media/pci/cobalt/cobalt-cpld.c
index 23c875fc173e..bfcecef659e3 100644
--- a/drivers/media/pci/cobalt/cobalt-cpld.c
+++ b/drivers/media/pci/cobalt/cobalt-cpld.c
@@ -71,9 +71,9 @@ static void cpld_info_ver3(struct cobalt *cobalt)
cobalt_info("\t\tMAXII program revision: 0x%04x\n",
cpld_read(cobalt, 0x30));
cobalt_info("CPLD temp and voltage ADT7411 registers (read only)\n");
- cobalt_info("\t\tBoard temperature: %u Celcius\n",
+ cobalt_info("\t\tBoard temperature: %u Celsius\n",
cpld_read(cobalt, 0x34) / 4);
- cobalt_info("\t\tFPGA temperature: %u Celcius\n",
+ cobalt_info("\t\tFPGA temperature: %u Celsius\n",
cpld_read(cobalt, 0x38) / 4);
rd = cpld_read(cobalt, 0x3c);
tmp = (rd * 33 * 1000) / (483 * 10);
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 979634000597..d5c911c09e2b 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
{
free_irq(pci_dev->irq, (void *)cobalt);
-
- if (cobalt->msi_enabled)
- pci_disable_msi(pci_dev);
+ pci_free_irq_vectors(pci_dev);
}
static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
from being generated. */
cobalt_set_interrupt(cobalt, false);
- if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
+ if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
cobalt_err("Could not enable MSI\n");
- cobalt->msi_enabled = false;
ret = -EIO;
goto err_release;
}
msi_config_show(cobalt, pci_dev);
- cobalt->msi_enabled = true;
/* Register IRQ */
if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index ed00dc9d9399..00f773ec359a 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -287,8 +287,6 @@ struct cobalt {
u32 irq_none;
u32 irq_full_fifo;
- bool msi_enabled;
-
/* omnitek dma */
int dma_channels;
int first_fifo_channel;
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 9fb7f5978c8b..2531e4b81b60 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/media/pci/cx18/cx18-alsa-mixer.c b/drivers/media/pci/cx18/cx18-alsa-mixer.c
index 284275270f1b..06b066bc9301 100644
--- a/drivers/media/pci/cx18/cx18-alsa-mixer.c
+++ b/drivers/media/pci/cx18/cx18-alsa-mixer.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/media/pci/cx18/cx18-alsa-mixer.h b/drivers/media/pci/cx18/cx18-alsa-mixer.h
index ec9238793f6f..3aed123955dd 100644
--- a/drivers/media/pci/cx18/cx18-alsa-mixer.h
+++ b/drivers/media/pci/cx18/cx18-alsa-mixer.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
int __init snd_cx18_mixer_create(struct snd_cx18_card *cxsc);
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 5344510fbea3..205a98da877c 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -16,11 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.h b/drivers/media/pci/cx18/cx18-alsa-pcm.h
index e2b2c5b01215..b9e3afe14ee0 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.h
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
int snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
diff --git a/drivers/media/pci/cx18/cx18-alsa.h b/drivers/media/pci/cx18/cx18-alsa.h
index 2718be28bf5f..d88e3bd7944e 100644
--- a/drivers/media/pci/cx18/cx18-alsa.h
+++ b/drivers/media/pci/cx18/cx18-alsa.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
struct snd_card;
diff --git a/drivers/media/pci/cx18/cx18-audio.c b/drivers/media/pci/cx18/cx18-audio.c
index 35268923911c..61fc485d3d80 100644
--- a/drivers/media/pci/cx18/cx18-audio.c
+++ b/drivers/media/pci/cx18/cx18-audio.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-audio.h b/drivers/media/pci/cx18/cx18-audio.h
index 2731d29b0ab9..f65d71a04c19 100644
--- a/drivers/media/pci/cx18/cx18-audio.h
+++ b/drivers/media/pci/cx18/cx18-audio.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
int cx18_audio_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-av-audio.c b/drivers/media/pci/cx18/cx18-av-audio.c
index 4a24ffb17a7d..8b95e9aae576 100644
--- a/drivers/media/pci/cx18/cx18-av-audio.c
+++ b/drivers/media/pci/cx18/cx18-av-audio.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 7f7306fd9a7f..cf8817e9c8b9 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-av-core.h b/drivers/media/pci/cx18/cx18-av-core.h
index 4c559e86e340..c976ce6e7a78 100644
--- a/drivers/media/pci/cx18/cx18-av-core.h
+++ b/drivers/media/pci/cx18/cx18-av-core.h
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _CX18_AV_CORE_H_
diff --git a/drivers/media/pci/cx18/cx18-av-firmware.c b/drivers/media/pci/cx18/cx18-av-firmware.c
index 160e2e53383f..543ace7a481a 100644
--- a/drivers/media/pci/cx18/cx18-av-firmware.c
+++ b/drivers/media/pci/cx18/cx18-av-firmware.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-av-vbi.c b/drivers/media/pci/cx18/cx18-av-vbi.c
index 246982841fec..a002537a387d 100644
--- a/drivers/media/pci/cx18/cx18-av-vbi.c
+++ b/drivers/media/pci/cx18/cx18-av-vbi.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
diff --git a/drivers/media/pci/cx18/cx18-cards.c b/drivers/media/pci/cx18/cx18-cards.c
index 5e01ea441dc4..11e898e66ce9 100644
--- a/drivers/media/pci/cx18/cx18-cards.c
+++ b/drivers/media/pci/cx18/cx18-cards.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h
index f6b921f3b0ac..667e2d7b1d03 100644
--- a/drivers/media/pci/cx18/cx18-cards.h
+++ b/drivers/media/pci/cx18/cx18-cards.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* hardware flags */
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index 812a2507945a..f02df985def0 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index b8eedbe51c8f..206db81ef78e 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index ef308a10e870..fef3c736fcba 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#ifndef CX18_DRIVER_H
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index 03d0478170a7..d130d65828b0 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx18-version.h"
diff --git a/drivers/media/pci/cx18/cx18-dvb.h b/drivers/media/pci/cx18/cx18-dvb.h
index bf8d8f6f5455..33dfc53e3b4f 100644
--- a/drivers/media/pci/cx18/cx18-dvb.h
+++ b/drivers/media/pci/cx18/cx18-dvb.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 78b399b8613e..98467b2089fa 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index b9e5110ad043..58b00b433708 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
/* Testing/Debugging */
diff --git a/drivers/media/pci/cx18/cx18-firmware.c b/drivers/media/pci/cx18/cx18-firmware.c
index c6c83445f8bf..1b34ea1c3730 100644
--- a/drivers/media/pci/cx18/cx18-firmware.c
+++ b/drivers/media/pci/cx18/cx18-firmware.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-firmware.h b/drivers/media/pci/cx18/cx18-firmware.h
index 38d4c05e8499..bdc4b11f74f7 100644
--- a/drivers/media/pci/cx18/cx18-firmware.h
+++ b/drivers/media/pci/cx18/cx18-firmware.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
int cx18_firmware_init(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index 38dc6b8f8254..012859e6dc7b 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-gpio.h b/drivers/media/pci/cx18/cx18-gpio.h
index 4aea2ef88e8d..0274a17a8837 100644
--- a/drivers/media/pci/cx18/cx18-gpio.h
+++ b/drivers/media/pci/cx18/cx18-gpio.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
void cx18_gpio_init(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index c9329371a3f8..eabdd4c5520a 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-i2c.h b/drivers/media/pci/cx18/cx18-i2c.h
index 1180fdc8d983..bf315ecbe5dd 100644
--- a/drivers/media/pci/cx18/cx18-i2c.h
+++ b/drivers/media/pci/cx18/cx18-i2c.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
int cx18_i2c_register(struct cx18 *cx, unsigned idx);
diff --git a/drivers/media/pci/cx18/cx18-io.c b/drivers/media/pci/cx18/cx18-io.c
index 49b9dbd06248..7090fdbce28f 100644
--- a/drivers/media/pci/cx18/cx18-io.c
+++ b/drivers/media/pci/cx18/cx18-io.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-io.h b/drivers/media/pci/cx18/cx18-io.h
index 18974d886cf7..a3c96fb5d28d 100644
--- a/drivers/media/pci/cx18/cx18-io.h
+++ b/drivers/media/pci/cx18/cx18-io.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#ifndef CX18_IO_H
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 0faeb979ceb9..80b902b12a78 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-ioctl.h b/drivers/media/pci/cx18/cx18-ioctl.h
index 43433969d633..413129004a89 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.h
+++ b/drivers/media/pci/cx18/cx18-ioctl.h
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
u16 cx18_service2vbi(int type);
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index 361426485e98..ff33ffda0126 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-irq.h b/drivers/media/pci/cx18/cx18-irq.h
index 30e7eaf8cb55..64496746ea46 100644
--- a/drivers/media/pci/cx18/cx18-irq.h
+++ b/drivers/media/pci/cx18/cx18-irq.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#define HW2_I2C1_INT (1 << 22)
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index d3cf3588879f..763f960fc918 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include <stdarg.h>
diff --git a/drivers/media/pci/cx18/cx18-mailbox.h b/drivers/media/pci/cx18/cx18-mailbox.h
index b63fdfaac49e..54b11322bd23 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.h
+++ b/drivers/media/pci/cx18/cx18-mailbox.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#ifndef _CX18_MAILBOX_H_
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 13e96d6055eb..d212f79fd3aa 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-queue.h b/drivers/media/pci/cx18/cx18-queue.h
index 4201ddc16091..093b04e0189c 100644
--- a/drivers/media/pci/cx18/cx18-queue.h
+++ b/drivers/media/pci/cx18/cx18-queue.h
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#define CX18_DMA_UNMAPPED ((u32) -1)
diff --git a/drivers/media/pci/cx18/cx18-scb.c b/drivers/media/pci/cx18/cx18-scb.c
index 85cc59637e54..83a92629519d 100644
--- a/drivers/media/pci/cx18/cx18-scb.c
+++ b/drivers/media/pci/cx18/cx18-scb.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-scb.h b/drivers/media/pci/cx18/cx18-scb.h
index 08877652e321..7c3eaea3021f 100644
--- a/drivers/media/pci/cx18/cx18-scb.h
+++ b/drivers/media/pci/cx18/cx18-scb.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#ifndef CX18_SCB_H
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 7f699f0ee76c..7c9381448966 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-streams.h b/drivers/media/pci/cx18/cx18-streams.h
index 27f8af9b11cd..75c86f1b2e26 100644
--- a/drivers/media/pci/cx18/cx18-streams.h
+++ b/drivers/media/pci/cx18/cx18-streams.h
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
u32 cx18_find_handle(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index 43360cbcf24b..72c74d60c6fb 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-vbi.h b/drivers/media/pci/cx18/cx18-vbi.h
index b365cf4b4668..8c514ea2d2ba 100644
--- a/drivers/media/pci/cx18/cx18-vbi.h
+++ b/drivers/media/pci/cx18/cx18-vbi.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_mdl *mdl,
diff --git a/drivers/media/pci/cx18/cx18-version.h b/drivers/media/pci/cx18/cx18-version.h
index fed48b6bb67b..50728c68b835 100644
--- a/drivers/media/pci/cx18/cx18-version.h
+++ b/drivers/media/pci/cx18/cx18-version.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#ifndef CX18_VERSION_H
diff --git a/drivers/media/pci/cx18/cx18-video.c b/drivers/media/pci/cx18/cx18-video.c
index 6dc84aac8f44..697d01168b63 100644
--- a/drivers/media/pci/cx18/cx18-video.c
+++ b/drivers/media/pci/cx18/cx18-video.c
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-video.h b/drivers/media/pci/cx18/cx18-video.h
index 529006a06e5c..f6eca36e7271 100644
--- a/drivers/media/pci/cx18/cx18-video.h
+++ b/drivers/media/pci/cx18/cx18-video.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
void cx18_video_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx23418.h b/drivers/media/pci/cx18/cx23418.h
index 67ffe65b56a3..901ed7fac10f 100644
--- a/drivers/media/pci/cx18/cx23418.h
+++ b/drivers/media/pci/cx18/cx23418.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
#ifndef CX23418_H
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 589a168d1df4..979b66627f60 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -920,19 +920,6 @@ static const struct m88ds3103_config dvbsky_s950c_m88ds3103_config = {
.agc = 0x99,
};
-static const struct m88ds3103_config dvbsky_s952_portc_m88ds3103_config = {
- .i2c_addr = 0x68,
- .clock = 27000000,
- .i2c_wr_max = 33,
- .clock_out = 0,
- .ts_mode = M88DS3103_TS_SERIAL,
- .ts_clk = 96000,
- .ts_clk_pol = 0,
- .lnb_en_pol = 1,
- .lnb_hv_pol = 0,
- .agc = 0x99,
-};
-
static const struct m88ds3103_config hauppauge_hvr5525_m88ds3103_config = {
.i2c_addr = 0x69,
.clock = 27000000,
@@ -1206,11 +1193,11 @@ static int dvb_register(struct cx23885_tsport *port)
struct si2165_platform_data si2165_pdata;
struct si2157_config si2157_config;
struct ts2020_config ts2020_config;
+ struct m88ds3103_platform_data m88ds3103_pdata;
struct i2c_board_info info;
struct i2c_adapter *adapter;
struct i2c_client *client_demod = NULL, *client_tuner = NULL;
struct i2c_client *client_sec = NULL;
- const struct m88ds3103_config *p_m88ds3103_config = NULL;
int (*p_set_voltage)(struct dvb_frontend *fe,
enum fe_sec_voltage voltage) = NULL;
int mfe_shared = 0; /* bus not shared by default */
@@ -2103,27 +2090,50 @@ static int dvb_register(struct cx23885_tsport *port)
port->i2c_client_tuner = client_tuner;
break;
case CX23885_BOARD_DVBSKY_S952:
+ /* attach frontend */
+ memset(&m88ds3103_pdata, 0, sizeof(m88ds3103_pdata));
+ m88ds3103_pdata.clk = 27000000;
+ m88ds3103_pdata.i2c_wr_max = 33;
+ m88ds3103_pdata.agc = 0x99;
+ m88ds3103_pdata.clk_out = M88DS3103_CLOCK_OUT_DISABLED;
+ m88ds3103_pdata.lnb_en_pol = 1;
+
switch (port->nr) {
/* port b */
case 1:
i2c_bus = &dev->i2c_bus[1];
- p_m88ds3103_config = &dvbsky_t9580_m88ds3103_config;
+ m88ds3103_pdata.ts_mode = M88DS3103_TS_PARALLEL;
+ m88ds3103_pdata.ts_clk = 16000;
+ m88ds3103_pdata.ts_clk_pol = 1;
p_set_voltage = dvbsky_t9580_set_voltage;
break;
/* port c */
case 2:
i2c_bus = &dev->i2c_bus[0];
- p_m88ds3103_config = &dvbsky_s952_portc_m88ds3103_config;
+ m88ds3103_pdata.ts_mode = M88DS3103_TS_SERIAL;
+ m88ds3103_pdata.ts_clk = 96000;
+ m88ds3103_pdata.ts_clk_pol = 0;
p_set_voltage = dvbsky_s952_portc_set_voltage;
break;
+ default:
+ return 0;
}
- /* attach frontend */
- fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
- p_m88ds3103_config,
- &i2c_bus->i2c_adap, &adapter);
- if (fe0->dvb.frontend == NULL)
- break;
+ memset(&info, 0, sizeof(info));
+ strlcpy(info.type, "m88ds3103", I2C_NAME_SIZE);
+ info.addr = 0x68;
+ info.platform_data = &m88ds3103_pdata;
+ request_module(info.type);
+ client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+ if (client_demod == NULL || client_demod->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+ adapter = m88ds3103_pdata.get_i2c_adapter(client_demod);
+ fe0->dvb.frontend = m88ds3103_pdata.get_dvb_frontend(client_demod);
/* attach tuner */
memset(&ts2020_config, 0, sizeof(ts2020_config));
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 1f092febdbd1..4367cb3162b6 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -267,7 +267,6 @@ int cx23885_input_init(struct cx23885_dev *dev)
struct cx23885_kernel_ir *kernel_ir;
struct rc_dev *rc;
char *rc_map;
- enum rc_driver_type driver_type;
u64 allowed_protos;
int ret;
@@ -285,37 +284,32 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Integrated CX2388[58] IR controller */
- driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_ALL;
+ allowed_protos = RC_BIT_ALL_IR_DECODER;
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_HAUPPAUGE;
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
/* Integrated CX23885 IR controller */
- driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_ALL;
+ allowed_protos = RC_BIT_ALL_IR_DECODER;
/* The grey Terratec remote with orange buttons */
rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
break;
case CX23885_BOARD_TEVII_S470:
/* Integrated CX23885 IR controller */
- driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_ALL;
+ allowed_protos = RC_BIT_ALL_IR_DECODER;
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
case CX23885_BOARD_MYGICA_X8507:
/* Integrated CX23885 IR controller */
- driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_ALL;
+ allowed_protos = RC_BIT_ALL_IR_DECODER;
/* A guess at the remote */
rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02;
break;
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
/* Integrated CX23885 IR controller */
- driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_ALL;
+ allowed_protos = RC_BIT_ALL_IR_DECODER;
/* A guess at the remote */
rc_map = RC_MAP_TBS_NEC;
break;
@@ -326,14 +320,12 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_DVBSKY_S952:
case CX23885_BOARD_DVBSKY_T982:
/* Integrated CX23885 IR controller */
- driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_ALL;
+ allowed_protos = RC_BIT_ALL_IR_DECODER;
rc_map = RC_MAP_DVBSKY;
break;
case CX23885_BOARD_TT_CT2_4500_CI:
/* Integrated CX23885 IR controller */
- driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_ALL;
+ allowed_protos = RC_BIT_ALL_IR_DECODER;
rc_map = RC_MAP_TT_1500;
break;
default:
@@ -352,7 +344,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
pci_name(dev->pci));
/* input device */
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc) {
ret = -ENOMEM;
goto err_out_free;
@@ -371,7 +363,6 @@ int cx23885_input_init(struct cx23885_dev *dev)
rc->input_id.product = dev->pci->device;
}
rc->dev.parent = &dev->pci->dev;
- rc->driver_type = driver_type;
rc->allowed_protocols = allowed_protos;
rc->priv = kernel_ir;
rc->open = cx23885_input_ir_open;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 4711583de8fe..519b81c0c837 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
index 7c8edb6181ec..b94eb1c0023d 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.h b/drivers/media/pci/cx25821/cx25821-audio-upstream.h
index af2ae7c5815a..2bc875d1ec9f 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.h
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/mutex.h>
diff --git a/drivers/media/pci/cx25821/cx25821-audio.h b/drivers/media/pci/cx25821/cx25821-audio.h
index 1fc2d24f5110..55df64091539 100644
--- a/drivers/media/pci/cx25821/cx25821-audio.h
+++ b/drivers/media/pci/cx25821/cx25821-audio.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __CX25821_AUDIO_H__
diff --git a/drivers/media/pci/cx25821/cx25821-biffuncs.h b/drivers/media/pci/cx25821/cx25821-biffuncs.h
index 937f5a70fb7a..7c0ada3e382d 100644
--- a/drivers/media/pci/cx25821/cx25821-biffuncs.h
+++ b/drivers/media/pci/cx25821/cx25821-biffuncs.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _BITFUNCS_H
diff --git a/drivers/media/pci/cx25821/cx25821-cards.c b/drivers/media/pci/cx25821/cx25821-cards.c
index f2ebc989b303..f3b4d89d90c8 100644
--- a/drivers/media/pci/cx25821/cx25821-cards.c
+++ b/drivers/media/pci/cx25821/cx25821-cards.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 9a5f912ca859..fbc0229183bd 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-gpio.c b/drivers/media/pci/cx25821/cx25821-gpio.c
index 95e8ddf62947..76b8f619e55a 100644
--- a/drivers/media/pci/cx25821/cx25821-gpio.c
+++ b/drivers/media/pci/cx25821/cx25821-gpio.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 63ba25b82692..263a1cf36ef1 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-defines.h b/drivers/media/pci/cx25821/cx25821-medusa-defines.h
index 7a9e6470ba22..36977090ec4c 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-defines.h
+++ b/drivers/media/pci/cx25821/cx25821-medusa-defines.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MEDUSA_DEF_H_
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-reg.h b/drivers/media/pci/cx25821/cx25821-medusa-reg.h
index 2e10643a86b7..6ef63b867879 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-reg.h
+++ b/drivers/media/pci/cx25821/cx25821-medusa-reg.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MEDUSA_REGISTERS__
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.c b/drivers/media/pci/cx25821/cx25821-medusa-video.c
index 43bdfa4dfba1..0a9db050b175 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/pci/cx25821/cx25821-medusa-video.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-medusa-video.h b/drivers/media/pci/cx25821/cx25821-medusa-video.h
index 8bf602ff27b1..176b35333f2b 100644
--- a/drivers/media/pci/cx25821/cx25821-medusa-video.h
+++ b/drivers/media/pci/cx25821/cx25821-medusa-video.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MEDUSA_VIDEO_H
diff --git a/drivers/media/pci/cx25821/cx25821-reg.h b/drivers/media/pci/cx25821/cx25821-reg.h
index a3fc25a4dc0b..061cdeb9b45b 100644
--- a/drivers/media/pci/cx25821/cx25821-reg.h
+++ b/drivers/media/pci/cx25821/cx25821-reg.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __CX25821_REGISTERS__
diff --git a/drivers/media/pci/cx25821/cx25821-sram.h b/drivers/media/pci/cx25821/cx25821-sram.h
index 5f05d153bc4d..b94e0d4df664 100644
--- a/drivers/media/pci/cx25821/cx25821-sram.h
+++ b/drivers/media/pci/cx25821/cx25821-sram.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ATHENA_SRAM_H__
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index a664997e1958..6c838c8a7924 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.h b/drivers/media/pci/cx25821/cx25821-video-upstream.h
index 268ec8aa6a61..b6cf95f2d11b 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.h
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/mutex.h>
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 7ce352a0f2d3..dbaf42ec26cd 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -18,10 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/pci/cx25821/cx25821-video.h b/drivers/media/pci/cx25821/cx25821-video.h
index ab63b3858acf..246011c1ba08 100644
--- a/drivers/media/pci/cx25821/cx25821-video.h
+++ b/drivers/media/pci/cx25821/cx25821-video.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef CX25821_VIDEO_H_
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index ef61dea982e8..0f20e89b0cde 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef CX25821_H_
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index c7b3cb406499..01f2e472a2a0 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -274,7 +274,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
*/
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
- dev = rc_allocate_device();
+ dev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir || !dev)
goto err_out_free;
@@ -484,7 +484,6 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
dev->scancode_mask = hardware_mask;
if (ir->sampling) {
- dev->driver_type = RC_DRIVER_IR_RAW;
dev->timeout = 10 * 1000 * 1000; /* 10 ms */
} else {
dev->driver_type = RC_DRIVER_SCANCODE;
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index a6c9fe235974..340cff02dee2 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/ddbridge/ddbridge-regs.h b/drivers/media/pci/ddbridge/ddbridge-regs.h
index a3ccb318b500..6ae810324b4e 100644
--- a/drivers/media/pci/ddbridge/ddbridge-regs.h
+++ b/drivers/media/pci/ddbridge/ddbridge-regs.h
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
/* DD-DVBBridgeV1.h 273 2010-09-17 05:03:16Z manfred */
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index be87fbd90456..185b423818d3 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef _DDBRIDGE_H_
diff --git a/drivers/media/pci/dm1105/Kconfig b/drivers/media/pci/dm1105/Kconfig
index 173daf0c0847..14fa7e40f2a6 100644
--- a/drivers/media/pci/dm1105/Kconfig
+++ b/drivers/media/pci/dm1105/Kconfig
@@ -1,6 +1,6 @@
config DVB_DM1105
tristate "SDMC DM1105 based PCI cards"
- depends on DVB_CORE && PCI && I2C
+ depends on DVB_CORE && PCI && I2C && I2C_ALGOBIT
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index a589aa78d1d9..a7724b78fbb4 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/i2c.h>
@@ -743,7 +739,7 @@ static int dm1105_ir_init(struct dm1105_dev *dm1105)
struct rc_dev *dev;
int err = -ENOMEM;
- dev = rc_allocate_device();
+ dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!dev)
return -ENOMEM;
@@ -752,7 +748,6 @@ static int dm1105_ir_init(struct dm1105_dev *dm1105)
dev->driver_name = MODULE_NAME;
dev->map_name = RC_MAP_DM1105_NEC;
- dev->driver_type = RC_DRIVER_SCANCODE;
dev->input_name = "DVB on-card IR receiver";
dev->input_phys = dm1105->ir.input_phys;
dev->input_id.bustype = BUS_PCI;
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 6e5867c57305..c72cbbd2d40c 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -28,6 +28,19 @@ config VIDEO_IVTV
To compile this driver as a module, choose M here: the
module will be called ivtv.
+config VIDEO_IVTV_DEPRECATED_IOCTLS
+ bool "enable the DVB ioctls abuse on ivtv driver"
+ depends on VIDEO_IVTV
+ default n
+ ---help---
+ Enable the usage of the a DVB set of ioctls that were abused by
+ IVTV driver for a while.
+
+ Those ioctls were not needed for a long time, as IVTV implements
+ the proper V4L2 ioctls since kernel 3.3.
+
+ If unsure, say N.
+
config VIDEO_IVTV_ALSA
tristate "Conexant cx23415/cx23416 ALSA interface for PCM audio capture"
depends on VIDEO_IVTV && SND
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 374f45f81ab3..029f52733f70 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -15,38 +15,25 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-
-#include <media/v4l2-device.h>
-
-#include <sound/core.h>
-#include <sound/initval.h>
-
#include "ivtv-driver.h"
#include "ivtv-version.h"
#include "ivtv-alsa.h"
#include "ivtv-alsa-mixer.h"
#include "ivtv-alsa-pcm.h"
+#include <sound/core.h>
+#include <sound/initval.h>
+
int ivtv_alsa_debug;
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-#define IVTV_DEBUG_ALSA_INFO(fmt, arg...) \
+#define IVTV_DEBUG_ALSA_INFO(__fmt, __arg...) \
do { \
if (ivtv_alsa_debug & 2) \
- pr_info("%s: " fmt, "ivtv-alsa", ## arg); \
+ printk(KERN_INFO pr_fmt("%s: alsa:" __fmt), \
+ __func__, ##__arg); \
} while (0)
module_param_named(debug, ivtv_alsa_debug, int, 0644);
@@ -235,8 +222,7 @@ static int ivtv_alsa_load(struct ivtv *itv)
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
if (s->vdev.v4l2_dev == NULL) {
- IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - skipping\n",
- __func__);
+ IVTV_DEBUG_ALSA_INFO("PCM stream for card is disabled - skipping\n");
return 0;
}
@@ -250,8 +236,7 @@ static int ivtv_alsa_load(struct ivtv *itv)
IVTV_ALSA_ERR("%s: failed to create struct snd_ivtv_card\n",
__func__);
} else {
- IVTV_DEBUG_ALSA_INFO("%s: created ivtv ALSA interface instance \n",
- __func__);
+ IVTV_DEBUG_ALSA_INFO("created ivtv ALSA interface instance\n");
}
return 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
index 79b24bde4a39..ba372a23eb5c 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
@@ -13,28 +13,18 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/videodev2.h>
+#include "ivtv-alsa.h"
+#include "ivtv-alsa-mixer.h"
+#include "ivtv-driver.h"
-#include <media/v4l2-device.h>
+#include <linux/videodev2.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/tlv.h>
-#include "ivtv-alsa.h"
-#include "ivtv-driver.h"
-
/*
* Note the cx25840-core volume scale is funny, due to the alignment of the
* scale with another chip's range:
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-mixer.h b/drivers/media/pci/ivtv/ivtv-alsa-mixer.h
index cdde36704d53..382bc36bc529 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-mixer.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa-mixer.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
int __init snd_ivtv_mixer_create(struct snd_ivtv_card *itvsc);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index a26f9800eca3..807ead20d212 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -16,22 +16,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/vmalloc.h>
-
-#include <media/v4l2-device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-
#include "ivtv-driver.h"
#include "ivtv-queue.h"
#include "ivtv-streams.h"
@@ -39,6 +25,12 @@
#include "ivtv-alsa.h"
#include "ivtv-alsa-pcm.h"
+#include <linux/vmalloc.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm");
@@ -174,6 +166,7 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
/* See if the stream is available */
if (ivtv_claim_stream(&item, item.type)) {
/* No, it's already in use */
+ v4l2_fh_exit(&item.fh);
snd_ivtv_unlock(itvsc);
return -EBUSY;
}
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
index 186814e0b2d4..147586a886fc 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa.h b/drivers/media/pci/ivtv/ivtv-alsa.h
index 4a0d8f2c254d..eae646223367 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa.h
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
*/
struct snd_card;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 0a3b80a4bd69..ab2ae53618e8 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1452,7 +1452,7 @@ static void ivtv_remove(struct pci_dev *pdev)
for (i = 0; i < IVTV_VBI_FRAMES; i++)
kfree(itv->vbi.sliced_mpeg_data[i]);
- printk(KERN_INFO "ivtv: Removed %s\n", itv->card_name);
+ pr_info("Removed %s\n", itv->card_name);
v4l2_device_unregister(&itv->v4l2_dev);
kfree(itv);
@@ -1468,25 +1468,25 @@ static struct pci_driver ivtv_pci_driver = {
static int __init module_start(void)
{
- printk(KERN_INFO "ivtv: Start initialization, version %s\n", IVTV_VERSION);
+ pr_info("Start initialization, version %s\n", IVTV_VERSION);
/* Validate parameters */
if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) {
- printk(KERN_ERR "ivtv: Exiting, ivtv_first_minor must be between 0 and %d\n",
+ pr_err("Exiting, ivtv_first_minor must be between 0 and %d\n",
IVTV_MAX_CARDS - 1);
return -1;
}
if (ivtv_debug < 0 || ivtv_debug > 2047) {
ivtv_debug = 0;
- printk(KERN_INFO "ivtv: Debug value must be >= 0 and <= 2047\n");
+ pr_info("Debug value must be >= 0 and <= 2047\n");
}
if (pci_register_driver(&ivtv_pci_driver)) {
- printk(KERN_ERR "ivtv: Error detecting PCI card\n");
+ pr_err("Error detecting PCI card\n");
return -ENODEV;
}
- printk(KERN_INFO "ivtv: End initialization\n");
+ pr_info("End initialization\n");
return 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index 6b09a9514d64..cde452e30746 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -22,6 +22,8 @@
#ifndef IVTV_DRIVER_H
#define IVTV_DRIVER_H
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
/* Internal header for ivtv project:
* Driver for the cx23415/6 chip.
* Author: Kevin Thayer (nufan_wfk at yahoo.com)
@@ -36,38 +38,37 @@
* using information provided by Jiun-Kuei Jung @ AVerMedia.
*/
-#include <linux/module.h>
-#include <linux/init.h>
+#include <asm/byteorder.h>
#include <linux/delay.h>
-#include <linux/sched.h>
+#include <linux/device.h>
#include <linux/fs.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ivtv.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/list.h>
-#include <linux/unistd.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/scatterlist.h>
-#include <linux/kthread.h>
-#include <linux/mutex.h>
+#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <asm/byteorder.h>
+#include <linux/unistd.h>
-#include <linux/dvb/video.h>
-#include <linux/dvb/audio.h>
+#include <media/drv-intf/cx2341x.h>
+#include <media/i2c/ir-kbd-i2c.h>
+#include <media/tuner.h>
#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
-#include <media/tuner.h>
-#include <media/drv-intf/cx2341x.h>
-#include <media/i2c/ir-kbd-i2c.h>
-
-#include <linux/ivtv.h>
+#include <media/v4l2-ioctl.h>
/* Memory layout */
#define IVTV_ENCODER_OFFSET 0x00000000
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 2dc4b20f3ac0..f956188f7f19 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -35,7 +35,10 @@
#include <media/i2c/saa7127.h>
#include <media/tveeprom.h>
#include <media/v4l2-event.h>
+#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
#include <linux/dvb/audio.h>
+#include <linux/dvb/video.h>
+#endif
u16 ivtv_service2vbi(int type)
{
@@ -1620,13 +1623,23 @@ static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder
return ivtv_video_command(itv, id, dec, true);
}
+#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
+static __inline__ void warn_deprecated_ioctl(const char *name)
+{
+ pr_warn_once("warning: the %s ioctl is deprecated. Don't use it, as it will be removed soon\n",
+ name);
+}
+#endif
+
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
- int nonblocking = filp->f_flags & O_NONBLOCK;
struct ivtv_stream *s = &itv->streams[id->type];
+#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
+ int nonblocking = filp->f_flags & O_NONBLOCK;
unsigned long iarg = (unsigned long)arg;
+#endif
switch (cmd) {
case IVTV_IOC_DMA_FRAME: {
@@ -1658,12 +1671,12 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
return ivtv_passthrough_mode(itv, *(int *)arg != 0);
-
+#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
case VIDEO_GET_PTS: {
s64 *pts = arg;
s64 frame;
- IVTV_DEBUG_IOCTL("VIDEO_GET_PTS\n");
+ warn_deprecated_ioctl("VIDEO_GET_PTS");
if (s->type < IVTV_DEC_STREAM_TYPE_MPG) {
*pts = s->dma_pts;
break;
@@ -1677,7 +1690,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
s64 *frame = arg;
s64 pts;
- IVTV_DEBUG_IOCTL("VIDEO_GET_FRAME_COUNT\n");
+ warn_deprecated_ioctl("VIDEO_GET_FRAME_COUNT");
if (s->type < IVTV_DEC_STREAM_TYPE_MPG) {
*frame = 0;
break;
@@ -1690,7 +1703,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
case VIDEO_PLAY: {
struct v4l2_decoder_cmd dc;
- IVTV_DEBUG_IOCTL("VIDEO_PLAY\n");
+ warn_deprecated_ioctl("VIDEO_PLAY");
memset(&dc, 0, sizeof(dc));
dc.cmd = V4L2_DEC_CMD_START;
return ivtv_video_command(itv, id, &dc, 0);
@@ -1699,7 +1712,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
case VIDEO_STOP: {
struct v4l2_decoder_cmd dc;
- IVTV_DEBUG_IOCTL("VIDEO_STOP\n");
+ warn_deprecated_ioctl("VIDEO_STOP");
memset(&dc, 0, sizeof(dc));
dc.cmd = V4L2_DEC_CMD_STOP;
dc.flags = V4L2_DEC_CMD_STOP_TO_BLACK | V4L2_DEC_CMD_STOP_IMMEDIATELY;
@@ -1709,7 +1722,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
case VIDEO_FREEZE: {
struct v4l2_decoder_cmd dc;
- IVTV_DEBUG_IOCTL("VIDEO_FREEZE\n");
+ warn_deprecated_ioctl("VIDEO_FREEZE");
memset(&dc, 0, sizeof(dc));
dc.cmd = V4L2_DEC_CMD_PAUSE;
return ivtv_video_command(itv, id, &dc, 0);
@@ -1718,7 +1731,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
case VIDEO_CONTINUE: {
struct v4l2_decoder_cmd dc;
- IVTV_DEBUG_IOCTL("VIDEO_CONTINUE\n");
+ warn_deprecated_ioctl("VIDEO_CONTINUE");
memset(&dc, 0, sizeof(dc));
dc.cmd = V4L2_DEC_CMD_RESUME;
return ivtv_video_command(itv, id, &dc, 0);
@@ -1732,9 +1745,9 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
int try = (cmd == VIDEO_TRY_COMMAND);
if (try)
- IVTV_DEBUG_IOCTL("VIDEO_TRY_COMMAND %d\n", dc->cmd);
+ warn_deprecated_ioctl("VIDEO_TRY_COMMAND");
else
- IVTV_DEBUG_IOCTL("VIDEO_COMMAND %d\n", dc->cmd);
+ warn_deprecated_ioctl("VIDEO_COMMAND");
return ivtv_video_command(itv, id, dc, try);
}
@@ -1742,7 +1755,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
struct video_event *ev = arg;
DEFINE_WAIT(wait);
- IVTV_DEBUG_IOCTL("VIDEO_GET_EVENT\n");
+ warn_deprecated_ioctl("VIDEO_GET_EVENT");
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
memset(ev, 0, sizeof(*ev));
@@ -1785,28 +1798,28 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
}
case VIDEO_SELECT_SOURCE:
- IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
+ warn_deprecated_ioctl("VIDEO_SELECT_SOURCE");
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX);
case AUDIO_SET_MUTE:
- IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
+ warn_deprecated_ioctl("AUDIO_SET_MUTE");
itv->speed_mute_audio = iarg;
return 0;
case AUDIO_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
+ warn_deprecated_ioctl("AUDIO_CHANNEL_SELECT");
if (iarg > AUDIO_STEREO_SWAPPED)
return -EINVAL;
return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg + 1);
case AUDIO_BILINGUAL_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
+ warn_deprecated_ioctl("AUDIO_BILINGUAL_CHANNEL_SELECT");
if (iarg > AUDIO_STEREO_SWAPPED)
return -EINVAL;
return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg + 1);
-
+#endif
default:
return -EINVAL;
}
@@ -1821,6 +1834,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
if (!valid_prio) {
switch (cmd) {
case IVTV_IOC_PASSTHROUGH_MODE:
+#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
case VIDEO_PLAY:
case VIDEO_STOP:
case VIDEO_FREEZE:
@@ -1830,6 +1844,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
case AUDIO_SET_MUTE:
case AUDIO_CHANNEL_SELECT:
case AUDIO_BILINGUAL_CHANNEL_SELECT:
+#endif
return -EBUSY;
}
}
@@ -1847,6 +1862,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
case IVTV_IOC_DMA_FRAME:
case IVTV_IOC_PASSTHROUGH_MODE:
+#ifdef CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS
case VIDEO_GET_PTS:
case VIDEO_GET_FRAME_COUNT:
case VIDEO_GET_EVENT:
@@ -1860,6 +1876,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
case AUDIO_SET_MUTE:
case AUDIO_CHANNEL_SELECT:
case AUDIO_BILINGUAL_CHANNEL_SELECT:
+#endif
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.c b/drivers/media/pci/ivtv/ivtv-mailbox.c
index e3ce96763785..9a2506a5edbe 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.c
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.c
@@ -19,11 +19,11 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <stdarg.h>
-
#include "ivtv-driver.h"
#include "ivtv-mailbox.h"
+#include <stdarg.h>
+
/* Firmware mailbox flags*/
#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
#define IVTV_MBOX_DRIVER_DONE 0x00000002
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 612a8402cf4d..621b2f613d81 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,25 +38,20 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include "ivtv-driver.h"
+#include "ivtv-cards.h"
+#include "ivtv-i2c.h"
+#include "ivtv-udma.h"
+#include "ivtv-mailbox.h"
+#include "ivtv-firmware.h"
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/ivtvfb.h>
-#include <linux/slab.h>
#ifdef CONFIG_X86_64
#include <asm/pat.h>
#endif
-#include "ivtv-driver.h"
-#include "ivtv-cards.h"
-#include "ivtv-i2c.h"
-#include "ivtv-udma.h"
-#include "ivtv-mailbox.h"
-#include "ivtv-firmware.h"
-
/* card parameters */
static int ivtvfb_card_id = -1;
static int ivtvfb_debug = 0;
@@ -1275,7 +1270,7 @@ static int __init ivtvfb_init(void)
if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
- printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
+ pr_err("ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
IVTV_MAX_CARDS - 1);
return -EINVAL;
}
@@ -1284,7 +1279,7 @@ static int __init ivtvfb_init(void)
err = driver_for_each_device(drv, NULL, &registered, ivtvfb_callback_init);
(void)err; /* suppress compiler warning */
if (!registered) {
- printk(KERN_ERR "ivtvfb: no cards found\n");
+ pr_err("no cards found\n");
return -ENODEV;
}
return 0;
@@ -1295,7 +1290,7 @@ static void ivtvfb_cleanup(void)
struct device_driver *drv;
int err;
- printk(KERN_INFO "ivtvfb: Unloading framebuffer module\n");
+ pr_info("Unloading framebuffer module\n");
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, NULL, ivtvfb_callback_cleanup);
diff --git a/drivers/media/pci/mantis/mantis_dvb.c b/drivers/media/pci/mantis/mantis_dvb.c
index 5a71e1791cf5..0db4de3a2285 100644
--- a/drivers/media/pci/mantis/mantis_dvb.c
+++ b/drivers/media/pci/mantis/mantis_dvb.c
@@ -226,11 +226,12 @@ int mantis_dvb_init(struct mantis_pci *mantis)
goto err5;
} else {
if (mantis->fe == NULL) {
+ result = -ENOMEM;
dprintk(MANTIS_ERROR, 1, "FE <NULL>");
goto err5;
}
-
- if (dvb_register_frontend(&mantis->dvb_adapter, mantis->fe)) {
+ result = dvb_register_frontend(&mantis->dvb_adapter, mantis->fe);
+ if (result) {
dprintk(MANTIS_ERROR, 1, "ERROR: Frontend registration failed");
if (mantis->fe->ops.release)
diff --git a/drivers/media/pci/mantis/mantis_input.c b/drivers/media/pci/mantis/mantis_input.c
index 7f7f1d4d7bb1..50d10cb7d49d 100644
--- a/drivers/media/pci/mantis/mantis_input.c
+++ b/drivers/media/pci/mantis/mantis_input.c
@@ -39,7 +39,7 @@ int mantis_input_init(struct mantis_pci *mantis)
struct rc_dev *dev;
int err;
- dev = rc_allocate_device();
+ dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!dev) {
dprintk(MANTIS_ERROR, 1, "Remote device allocation failed");
err = -ENOMEM;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 24fba633c217..9c4a024745de 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/pci.h>
@@ -1663,6 +1659,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto outenabledev;
}
+ ret = -EIO;
mchip_adr = pci_resource_start(meye.mchip_dev,0);
if (!mchip_adr) {
v4l2_err(v4l2_dev, "meye: mchip has no device base address\n");
diff --git a/drivers/media/pci/meye/meye.h b/drivers/media/pci/meye/meye.h
index 751be5e533c7..c4a8a5fe040c 100644
--- a/drivers/media/pci/meye/meye.h
+++ b/drivers/media/pci/meye/meye.h
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MEYE_PRIV_H_
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 423e8c889310..bb49620540c5 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -19,12 +19,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
@@ -781,12 +777,6 @@ static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
-static pci_ers_result_t ngene_link_reset(struct pci_dev *dev)
-{
- printk(KERN_INFO DEVICE_NAME ": link reset\n");
- return 0;
-}
-
static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
{
printk(KERN_INFO DEVICE_NAME ": slot reset\n");
@@ -800,7 +790,6 @@ static void ngene_resume(struct pci_dev *dev)
static const struct pci_error_handlers ngene_errors = {
.error_detected = ngene_error_detected,
- .link_reset = ngene_link_reset,
.slot_reset = ngene_slot_reset,
.resume = ngene_resume,
};
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 4e924e2d1638..ce69e648b663 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -19,12 +19,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/ngene/ngene-dvb.c b/drivers/media/pci/ngene/ngene-dvb.c
index 59bb2858c8d0..03fc218a45e9 100644
--- a/drivers/media/pci/ngene/ngene-dvb.c
+++ b/drivers/media/pci/ngene/ngene-dvb.c
@@ -19,12 +19,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c
index d28554f8ce99..cf39fcf54adf 100644
--- a/drivers/media/pci/ngene/ngene-i2c.c
+++ b/drivers/media/pci/ngene/ngene-i2c.c
@@ -19,12 +19,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
/* FIXME - some of these can probably be removed */
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index fa30930d7047..10d8f74c4f0a 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -13,12 +13,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef _NGENE_H_
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 65afb71ff79f..74838109afe5 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/i2c.h>
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index d5ee82aee9e8..da1eebd2016f 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
index 249273b2e0f2..f75f69556be7 100644
--- a/drivers/media/pci/pt1/va1j5jf8007s.c
+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/pci/pt1/va1j5jf8007s.h b/drivers/media/pci/pt1/va1j5jf8007s.h
index b7d6f05a0e02..efbe6ccae8b4 100644
--- a/drivers/media/pci/pt1/va1j5jf8007s.h
+++ b/drivers/media/pci/pt1/va1j5jf8007s.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef VA1J5JF8007S_H
diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
index e0766e69a370..63fda79a75c0 100644
--- a/drivers/media/pci/pt1/va1j5jf8007t.c
+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/pci/pt1/va1j5jf8007t.h b/drivers/media/pci/pt1/va1j5jf8007t.h
index 2903be519ef5..6fb119c6e73a 100644
--- a/drivers/media/pci/pt1/va1j5jf8007t.h
+++ b/drivers/media/pci/pt1/va1j5jf8007t.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef VA1J5JF8007T_H
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 8a35ecfb75e3..bf358ec7aca5 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 2b60af493de4..321253827997 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 7d6bb5c9343f..7976c5a12ca8 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index 598b8bbfe726..efdece5ab11c 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index f0fe2524259f..b1d3648dcba1 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index dca0592c5f47..9d0e69eae036 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 823b75ed47e1..78849c19f68a 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "saa7134.h"
@@ -846,7 +842,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
}
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!ir || !rc) {
err = -ENOMEM;
goto err_out_free;
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 7eaf36a41db9..578e03f8c041 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
index 38f94b742e28..68d400e1e240 100644
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index cf9a31e0a390..46193370e41a 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index cbb173d99085..4b1c4327f112 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7134.h"
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 3849083526a7..816b5282d671 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define SAA7134_VERSION "0, 2, 17"
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index e7e586c1ba53..e318ccf81277 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/wait.h>
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index 62c34504199d..a0d2129c6ca9 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index e305c02f9dc9..b2ff82fa7116 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7164.h"
diff --git a/drivers/media/pci/saa7164/saa7164-cards.c b/drivers/media/pci/saa7164/saa7164-cards.c
index 15a98c638c55..0e1cd7e153ca 100644
--- a/drivers/media/pci/saa7164/saa7164-cards.c
+++ b/drivers/media/pci/saa7164/saa7164-cards.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 45951b3cc251..f55c177fd1e4 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/wait.h>
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 03a1511a92be..75eed4cc4823 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index cd3eeda5250b..e76d3bafe2ce 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7164.h"
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 68124ce7ebc3..f21c245a54f7 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7164.h"
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 8568adfd7ece..4ba5eade7ce2 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/firmware.h>
@@ -309,7 +305,7 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
break;
}
if (err_flags & SAA_DEVICE_NO_IMAGE) {
- printk(KERN_ERR "%s() no first image\n",
+ printk(KERN_ERR "%s() no second image\n",
__func__);
break;
}
diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c
index 024f4e29e840..430f6789f222 100644
--- a/drivers/media/pci/saa7164/saa7164-i2c.c
+++ b/drivers/media/pci/saa7164/saa7164-i2c.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/saa7164/saa7164-reg.h b/drivers/media/pci/saa7164/saa7164-reg.h
index 37521a2ee504..5cf842112e43 100644
--- a/drivers/media/pci/saa7164/saa7164-reg.h
+++ b/drivers/media/pci/saa7164/saa7164-reg.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* TODO: Retest the driver with errors expressed as negatives */
diff --git a/drivers/media/pci/saa7164/saa7164-types.h b/drivers/media/pci/saa7164/saa7164-types.h
index 1efba6c64ebf..ae241103b261 100644
--- a/drivers/media/pci/saa7164/saa7164-types.h
+++ b/drivers/media/pci/saa7164/saa7164-types.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* TODO: Cleanup and shorten the namespace */
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index e5dcb81029d3..9255d7d23947 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "saa7164.h"
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index 97411b0384c1..81b3f0e19993 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
index 826c7c75e64d..d2730c3fdbae 100644
--- a/drivers/media/pci/smipcie/smipcie-ir.c
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -183,7 +183,7 @@ int smi_ir_init(struct smi_dev *dev)
struct rc_dev *rc_dev;
struct smi_rc *ir = &dev->ir;
- rc_dev = rc_allocate_device();
+ rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc_dev)
return -ENOMEM;
@@ -202,7 +202,6 @@ int smi_ir_init(struct smi_dev *dev)
rc_dev->input_id.product = dev->pci_dev->subsystem_device;
rc_dev->dev.parent = &dev->pci_dev->dev;
- rc_dev->driver_type = RC_DRIVER_SCANCODE;
rc_dev->map_name = dev->info->rc_map;
ir->rc_dev = rc_dev;
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 6a35107aca25..36e93540bb49 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -350,7 +350,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
int solo_g723_init(struct solo_dev *solo_dev)
{
- static struct snd_device_ops ops = { NULL };
+ static struct snd_device_ops ops = { };
struct snd_card *card;
struct snd_kcontrol_new kctl;
char name[32];
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index aeb2b4e2db35..6343d24eb1d5 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -377,7 +377,7 @@ static void stop_streaming(struct vb2_queue *vq)
spin_unlock(&vip->lock);
}
-static struct vb2_ops vip_video_qops = {
+static const struct vb2_ops vip_video_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.h b/drivers/media/pci/sta2x11/sta2x11_vip.h
index 4f81a13666eb..61e5c4822b52 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.h
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.h
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Author: Anders Wallin <anders.wallin@windriver.com>
*
*/
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 6e63949d6ad0..df9395c87178 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -19,11 +19,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 26c5696c193b..2aa4ba675194 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -18,11 +18,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index 96a130fb4595..f64723aea56b 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -18,11 +18,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 520414cbe087..b2b79bb73917 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -16,11 +16,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
* the project's page is at https://linuxtv.org
*/
@@ -56,11 +53,11 @@
by Nathan Laredo <laredo@gnu.org> */
int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count)
+ int addr, u32 val, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
- if (count <= 0 || count > 32764) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return -1;
}
@@ -78,12 +75,12 @@ int av7110_debiwrite(struct av7110 *av7110, u32 config,
return 0;
}
-u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, int count)
+u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
u32 result = 0;
- if (count > 32764 || count <= 0) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return 0;
}
diff --git a/drivers/media/pci/ttpci/av7110_hw.h b/drivers/media/pci/ttpci/av7110_hw.h
index 1634aba5cb84..ccb148059406 100644
--- a/drivers/media/pci/ttpci/av7110_hw.h
+++ b/drivers/media/pci/ttpci/av7110_hw.h
@@ -377,14 +377,14 @@ extern int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
/* DEBI (saa7146 data extension bus interface) access */
extern int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count);
+ int addr, u32 val, unsigned int count);
extern u32 av7110_debiread(struct av7110 *av7110, u32 config,
- int addr, int count);
+ int addr, unsigned int count);
/* DEBI during interrupt */
/* single word writes */
-static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
av7110_debiwrite(av7110, config, addr, val, count);
}
@@ -397,7 +397,7 @@ static inline void mwdebi(struct av7110 *av7110, u32 config, int addr,
av7110_debiwrite(av7110, config, addr, 0, count);
}
-static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
u32 res;
@@ -408,7 +408,7 @@ static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
}
/* DEBI outside interrupts, only for count <= 4! */
-static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
@@ -417,7 +417,7 @@ static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
spin_unlock_irqrestore(&av7110->debilock, flags);
}
-static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
u32 res;
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index 0e763a784e2b..10e28f067b45 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -13,11 +13,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*/
diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c
index 479aff02db81..397fe146dedd 100644
--- a/drivers/media/pci/ttpci/av7110_v4l.c
+++ b/drivers/media/pci/ttpci/av7110_v4l.c
@@ -16,11 +16,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
* the project's page is at https://linuxtv.org
*/
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 896c66d4b3ae..19f07d4aba6a 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -23,11 +23,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 20ad93bf0f54..68355484ba7d 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -19,11 +19,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
@@ -177,7 +174,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
struct rc_dev *dev;
int error;
- dev = rc_allocate_device();
+ dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!dev) {
printk(KERN_ERR "budget_ci: IR interface initialisation failed\n");
return -ENOMEM;
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 6d42dcfd4825..97499b2af714 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -24,11 +24,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c
index f152eda0123a..442992372008 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/media/pci/ttpci/budget-patch.c
@@ -20,11 +20,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index 3091b480ce22..5f17e1c9a207 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -24,11 +24,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ * To obtain the license, point your browser to
+ * http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at https://linuxtv.org
diff --git a/drivers/media/pci/ttpci/dvb_filter.h b/drivers/media/pci/ttpci/dvb_filter.h
index 375e3be184b1..3d410d02a987 100644
--- a/drivers/media/pci/ttpci/dvb_filter.h
+++ b/drivers/media/pci/ttpci/dvb_filter.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _DVB_FILTER_H_
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index 71a0453b1af1..336e2f9bc1b6 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -74,7 +74,7 @@ static const char *dma_mode_name(unsigned int mode)
static int tw686x_dma_mode_get(char *buffer, struct kernel_param *kp)
{
- return sprintf(buffer, dma_mode_name(dma_mode));
+ return sprintf(buffer, "%s", dma_mode_name(dma_mode));
}
static int tw686x_dma_mode_set(const char *val, struct kernel_param *kp)
diff --git a/drivers/media/pci/zoran/videocodec.c b/drivers/media/pci/zoran/videocodec.c
index 3c3cbce0f9cc..303289a7fd3f 100644
--- a/drivers/media/pci/zoran/videocodec.c
+++ b/drivers/media/pci/zoran/videocodec.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/pci/zoran/videocodec.h b/drivers/media/pci/zoran/videocodec.h
index def55585ad23..8ed5a0f7ac01 100644
--- a/drivers/media/pci/zoran/videocodec.h
+++ b/drivers/media/pci/zoran/videocodec.h
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 4e7db8939c2b..9bb3c21aa275 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _BUZ_H_
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index 9d2697f5b455..5266755add63 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 4936fead73e8..81cba177cd90 100644
--- a/drivers/media/pci/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ZORAN_CARD_H__
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index 35b552c178da..671907a6e6b6 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
diff --git a/drivers/media/pci/zoran/zoran_device.h b/drivers/media/pci/zoran/zoran_device.h
index 07f2c23ff740..a507aaad4ebb 100644
--- a/drivers/media/pci/zoran/zoran_device.h
+++ b/drivers/media/pci/zoran/zoran_device.h
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ZORAN_DEVICE_H__
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 94b9b616df98..180f3d7af3e1 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -38,10 +38,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
@@ -975,6 +971,7 @@ static int zoran_open(struct file *file)
return 0;
fail_fh:
+ v4l2_fh_exit(&fh->fh);
kfree(fh);
fail_unlock:
mutex_unlock(&zr->lock);
diff --git a/drivers/media/pci/zoran/zoran_procfs.c b/drivers/media/pci/zoran/zoran_procfs.c
index 437652761093..78ac8f853748 100644
--- a/drivers/media/pci/zoran/zoran_procfs.c
+++ b/drivers/media/pci/zoran/zoran_procfs.c
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
diff --git a/drivers/media/pci/zoran/zoran_procfs.h b/drivers/media/pci/zoran/zoran_procfs.h
index f2d5b1ba448f..0ac7cb0011f2 100644
--- a/drivers/media/pci/zoran/zoran_procfs.h
+++ b/drivers/media/pci/zoran/zoran_procfs.h
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ZORAN_PROCFS_H__
diff --git a/drivers/media/pci/zoran/zr36016.c b/drivers/media/pci/zoran/zr36016.c
index c12ca9f96bac..8736b9d8d97e 100644
--- a/drivers/media/pci/zoran/zr36016.c
+++ b/drivers/media/pci/zoran/zr36016.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/pci/zoran/zr36016.h b/drivers/media/pci/zoran/zr36016.h
index 8c79229f69d1..784bcf5727b8 100644
--- a/drivers/media/pci/zoran/zr36016.h
+++ b/drivers/media/pci/zoran/zr36016.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/pci/zoran/zr36050.c b/drivers/media/pci/zoran/zr36050.c
index e1985609af4b..5ebfc16672f3 100644
--- a/drivers/media/pci/zoran/zr36050.c
+++ b/drivers/media/pci/zoran/zr36050.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/pci/zoran/zr36050.h b/drivers/media/pci/zoran/zr36050.h
index ea083adda045..9236486d3c2b 100644
--- a/drivers/media/pci/zoran/zr36050.h
+++ b/drivers/media/pci/zoran/zr36050.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/pci/zoran/zr36057.h b/drivers/media/pci/zoran/zr36057.h
index 54c9362aa980..c9ffef15532d 100644
--- a/drivers/media/pci/zoran/zr36057.h
+++ b/drivers/media/pci/zoran/zr36057.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZR36057_H_
diff --git a/drivers/media/pci/zoran/zr36060.c b/drivers/media/pci/zoran/zr36060.c
index f08546fe2234..2c2e8130fc96 100644
--- a/drivers/media/pci/zoran/zr36060.c
+++ b/drivers/media/pci/zoran/zr36060.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/pci/zoran/zr36060.h b/drivers/media/pci/zoran/zr36060.h
index 914ffa4ad8d3..82911757ba78 100644
--- a/drivers/media/pci/zoran/zr36060.h
+++ b/drivers/media/pci/zoran/zr36060.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* ------------------------------------------------------------------------
*/
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index d944421e392d..c9106e105bab 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -162,6 +162,9 @@ config VIDEO_CODA
Coda is a range of video codec IPs that supports
H.264, MPEG-4, and other video formats.
+config VIDEO_IMX_VDOA
+ def_tristate VIDEO_CODA if SOC_IMX6Q || COMPILE_TEST
+
config VIDEO_MEDIATEK_VPU
tristate "Mediatek Video Processor Unit"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
@@ -298,6 +301,56 @@ config VIDEO_STI_HVA
To compile this driver as a module, choose M here:
the module will be called st-hva.
+config VIDEO_STI_HVA_DEBUGFS
+ bool "Export STMicroelectronics HVA internals in debugfs"
+ depends on VIDEO_STI_HVA
+ depends on DEBUG_FS
+ help
+ Select this to see information about the internal state and the last
+ operation of STMicroelectronics HVA multi-format video encoder in
+ debugfs.
+
+ Choose N unless you know you need this.
+
+config VIDEO_STI_DELTA
+ tristate "STMicroelectronics DELTA multi-format video decoder V4L2 driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_STI || COMPILE_TEST
+ depends on HAS_DMA
+ help
+ This V4L2 driver enables DELTA multi-format video decoder
+ of STMicroelectronics STiH4xx SoC series allowing hardware
+ decoding of various compressed video bitstream format in
+ raw uncompressed format.
+
+ Use this option to see the decoders available for such
+ hardware.
+
+ Please notice that the driver will only be built if
+ at least one of the DELTA decoder below is selected.
+
+if VIDEO_STI_DELTA
+
+config VIDEO_STI_DELTA_MJPEG
+ bool "STMicroelectronics DELTA MJPEG support"
+ default y
+ help
+ Enables DELTA MJPEG hardware support.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-delta.
+
+config VIDEO_STI_DELTA_DRIVER
+ tristate
+ depends on VIDEO_STI_DELTA
+ depends on VIDEO_STI_DELTA_MJPEG
+ default VIDEO_STI_DELTA_MJPEG
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select RPMSG
+
+endif # VIDEO_STI_DELTA
+
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 5b3cb271d2b8..349ddf6a69da 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -39,6 +39,8 @@ obj-$(CONFIG_VIDEO_STI_BDISP) += sti/bdisp/
obj-$(CONFIG_VIDEO_STI_HVA) += sti/hva/
obj-$(CONFIG_DVB_C8SECTPFE) += sti/c8sectpfe/
+obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
+
obj-$(CONFIG_BLACKFIN) += blackfin/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index b33b9e35e60e..05489a401c5c 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1576,7 +1576,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
return -EBUSY;
}
- ret = vpfe_try_fmt(file, priv, &format);
+ ret = __vpfe_get_format(vpfe, &format, &bpp);
if (ret)
return ret;
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 2e6edc09b58f..1c5166df46f5 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/completion.h>
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index b8f3d9fa66e9..37169054b828 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
index 9342ac57b230..858284328af9 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/coda/Makefile
@@ -3,3 +3,4 @@ ccflags-y += -I$(src)
coda-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
+obj-$(CONFIG_VIDEO_IMX_VDOA) += imx-vdoa.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index b6625047250d..466a44e4549e 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -30,6 +30,7 @@
#include <media/videobuf2-vmalloc.h>
#include "coda.h"
+#include "imx-vdoa.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -758,7 +759,7 @@ static void coda9_set_frame_cache(struct coda_ctx *ctx, u32 fourcc)
cache_config = 1 << CODA9_CACHE_PAGEMERGE_OFFSET;
}
coda_write(ctx->dev, cache_size, CODA9_CMD_SET_FRAME_CACHE_SIZE);
- if (fourcc == V4L2_PIX_FMT_NV12) {
+ if (fourcc == V4L2_PIX_FMT_NV12 || fourcc == V4L2_PIX_FMT_YUYV) {
cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
16 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
0 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
@@ -1517,6 +1518,10 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
u32 val;
int ret;
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "Video Data Order Adapter: %s\n",
+ ctx->use_vdoa ? "Enabled" : "Disabled");
+
/* Start decoding */
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
@@ -1532,10 +1537,11 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
CODA9_FRAME_TILED2LINEAR);
- if (dst_fourcc == V4L2_PIX_FMT_NV12)
+ if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
- ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
+ ctx->frame_mem_ctrl |= (0x3 << 9) |
+ ((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
ctx->display_idx = -1;
@@ -1618,6 +1624,15 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
__func__, ctx->idx, width, height);
ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED);
+ /*
+ * If the VDOA is used, the decoder needs one additional frame,
+ * because the frames are freed when the next frame is decoded.
+ * Otherwise there are visible errors in the decoded frames (green
+ * regions in displayed frames) and a broken order of frames (earlier
+ * frames are sporadically displayed after later frames).
+ */
+ if (ctx->use_vdoa)
+ ctx->num_internal_frames += 1;
if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
v4l2_err(&dev->v4l2_dev,
"not enough framebuffers to decode (%d < %d)\n",
@@ -1724,6 +1739,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
struct coda_q_data *q_data_dst;
struct coda_buffer_meta *meta;
unsigned long flags;
+ u32 rot_mode = 0;
u32 reg_addr, reg_stride;
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1759,27 +1775,40 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
if (dev->devtype->product == CODA_960)
coda_set_gdi_regs(ctx);
- if (dev->devtype->product == CODA_960) {
- /*
- * The CODA960 seems to have an internal list of buffers with
- * 64 entries that includes the registered frame buffers as
- * well as the rotator buffer output.
- * ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames.
- */
- coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
- CODA9_CMD_DEC_PIC_ROT_INDEX);
-
- reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
- reg_stride = CODA9_CMD_DEC_PIC_ROT_STRIDE;
+ if (ctx->use_vdoa &&
+ ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ vdoa_device_run(ctx->vdoa,
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0),
+ ctx->internal_frames[ctx->display_idx].paddr);
} else {
- reg_addr = CODA_CMD_DEC_PIC_ROT_ADDR_Y;
- reg_stride = CODA_CMD_DEC_PIC_ROT_STRIDE;
+ if (dev->devtype->product == CODA_960) {
+ /*
+ * The CODA960 seems to have an internal list of
+ * buffers with 64 entries that includes the
+ * registered frame buffers as well as the rotator
+ * buffer output.
+ *
+ * ROT_INDEX needs to be < 0x40, but >
+ * ctx->num_internal_frames.
+ */
+ coda_write(dev,
+ CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
+ CODA9_CMD_DEC_PIC_ROT_INDEX);
+
+ reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
+ reg_stride = CODA9_CMD_DEC_PIC_ROT_STRIDE;
+ } else {
+ reg_addr = CODA_CMD_DEC_PIC_ROT_ADDR_Y;
+ reg_stride = CODA_CMD_DEC_PIC_ROT_STRIDE;
+ }
+ coda_write_base(ctx, q_data_dst, dst_buf, reg_addr);
+ coda_write(dev, q_data_dst->bytesperline, reg_stride);
+
+ rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
}
- coda_write_base(ctx, q_data_dst, dst_buf, reg_addr);
- coda_write(dev, q_data_dst->bytesperline, reg_stride);
- coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
- CODA_CMD_DEC_PIC_ROT_MODE);
+ coda_write(dev, rot_mode, CODA_CMD_DEC_PIC_ROT_MODE);
switch (dev->devtype->product) {
case CODA_DX6:
@@ -1851,6 +1880,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
u32 src_fourcc;
int success;
u32 err_mb;
+ int err_vdoa = 0;
u32 val;
/* Update kfifo out pointer from coda bitstream read pointer */
@@ -1934,13 +1964,17 @@ static void coda_finish_decode(struct coda_ctx *ctx)
}
}
+ /* Wait until the VDOA finished writing the previous display frame */
+ if (ctx->use_vdoa &&
+ ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ err_vdoa = vdoa_wait_for_completion(ctx->vdoa);
+ }
+
ctx->frm_dis_flg = coda_read(dev,
CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
- /*
- * The previous display frame was copied out by the rotator,
- * now it can be overwritten again
- */
+ /* The previous display frame was copied out and can be overwritten */
if (ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames) {
ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
@@ -2045,6 +2079,9 @@ static void coda_finish_decode(struct coda_ctx *ctx)
trace_coda_dec_rot_done(ctx, dst_buf, meta);
switch (q_data_dst->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ payload = width * height * 2;
+ break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_NV12:
@@ -2057,8 +2094,10 @@ static void coda_finish_decode(struct coda_ctx *ctx)
}
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
- coda_m2m_buf_done(ctx, dst_buf, ctx->frame_errors[display_idx] ?
- VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ if (ctx->frame_errors[ctx->display_idx] || err_vdoa)
+ coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
+ else
+ coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
"job finished: decoding frame (%d) (%s)\n",
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 9e6bdafa16f5..eb6548f46cba 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -41,6 +41,7 @@
#include <media/videobuf2-vmalloc.h>
#include "coda.h"
+#include "imx-vdoa.h"
#define CODA_NAME "coda"
@@ -66,6 +67,10 @@ static int disable_tiling;
module_param(disable_tiling, int, 0644);
MODULE_PARM_DESC(disable_tiling, "Disable tiled frame buffers");
+static int disable_vdoa;
+module_param(disable_vdoa, int, 0644);
+MODULE_PARM_DESC(disable_vdoa, "Disable Video Data Order Adapter tiled to raster-scan conversion");
+
void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
@@ -90,6 +95,8 @@ void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
u32 base_cb, base_cr;
switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ /* Fallthrough: IN -H264-> CODA -NV12 MB-> VDOA -YUYV-> OUT */
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_YUV420:
default:
@@ -196,6 +203,11 @@ static const struct coda_video_device coda_bit_decoder = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
+ /*
+ * If V4L2_PIX_FMT_YUYV should be default,
+ * set_default_params() must be adjusted.
+ */
+ V4L2_PIX_FMT_YUYV,
},
};
@@ -241,6 +253,7 @@ static u32 coda_format_normalize_yuv(u32 fourcc)
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUYV:
return V4L2_PIX_FMT_YUV420;
default:
return fourcc;
@@ -325,6 +338,31 @@ const char *coda_product_name(int product)
}
}
+static struct vdoa_data *coda_get_vdoa_data(void)
+{
+ struct device_node *vdoa_node;
+ struct platform_device *vdoa_pdev;
+ struct vdoa_data *vdoa_data = NULL;
+
+ vdoa_node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-vdoa");
+ if (!vdoa_node)
+ return NULL;
+
+ vdoa_pdev = of_find_device_by_node(vdoa_node);
+ if (!vdoa_pdev)
+ goto out;
+
+ vdoa_data = platform_get_drvdata(vdoa_pdev);
+ if (!vdoa_data)
+ vdoa_data = ERR_PTR(-EPROBE_DEFER);
+
+out:
+ if (vdoa_node)
+ of_node_put(vdoa_node);
+
+ return vdoa_data;
+}
+
/*
* V4L2 ioctl() operations.
*/
@@ -404,6 +442,11 @@ static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
return -EINVAL;
for (i = 0; i < CODA_MAX_FORMATS; i++) {
+ /* Skip YUYV if the vdoa is not available */
+ if (!ctx->vdoa && f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ formats[i] == V4L2_PIX_FMT_YUYV)
+ continue;
+
if (formats[i] == f->fmt.pix.pixelformat) {
f->fmt.pix.pixelformat = formats[i];
return 0;
@@ -417,6 +460,33 @@ static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
return 0;
}
+static int coda_try_fmt_vdoa(struct coda_ctx *ctx, struct v4l2_format *f,
+ bool *use_vdoa)
+{
+ int err;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (!use_vdoa)
+ return -EINVAL;
+
+ if (!ctx->vdoa) {
+ *use_vdoa = false;
+ return 0;
+ }
+
+ err = vdoa_context_configure(NULL, f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.pixelformat);
+ if (err) {
+ *use_vdoa = false;
+ return 0;
+ }
+
+ *use_vdoa = true;
+ return 0;
+}
+
static unsigned int coda_estimate_sizeimage(struct coda_ctx *ctx, u32 sizeimage,
u32 width, u32 height)
{
@@ -463,6 +533,11 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 3 / 2;
break;
+ case V4L2_PIX_FMT_YUYV:
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ break;
case V4L2_PIX_FMT_YUV422P:
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
@@ -495,6 +570,7 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
const struct coda_codec *codec;
struct vb2_queue *src_vq;
int ret;
+ bool use_vdoa;
ret = coda_try_pixelformat(ctx, f);
if (ret < 0)
@@ -531,6 +607,19 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 3 / 2;
+
+ ret = coda_try_fmt_vdoa(ctx, f, &use_vdoa);
+ if (ret < 0)
+ return ret;
+
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ if (!use_vdoa)
+ return -EINVAL;
+
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ }
}
return 0;
@@ -566,7 +655,8 @@ static int coda_try_fmt_vid_out(struct file *file, void *priv,
return coda_try_fmt(ctx, codec, f);
}
-static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
+static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
+ struct v4l2_rect *r)
{
struct coda_q_data *q_data;
struct vb2_queue *vq;
@@ -589,18 +679,23 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
q_data->height = f->fmt.pix.height;
q_data->bytesperline = f->fmt.pix.bytesperline;
q_data->sizeimage = f->fmt.pix.sizeimage;
- q_data->rect.left = 0;
- q_data->rect.top = 0;
- q_data->rect.width = f->fmt.pix.width;
- q_data->rect.height = f->fmt.pix.height;
+ if (r) {
+ q_data->rect = *r;
+ } else {
+ q_data->rect.left = 0;
+ q_data->rect.top = 0;
+ q_data->rect.width = f->fmt.pix.width;
+ q_data->rect.height = f->fmt.pix.height;
+ }
switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
+ break;
case V4L2_PIX_FMT_NV12:
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
- if (!disable_tiling)
- break;
- }
+ ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
+ if (!disable_tiling)
+ break;
/* else fall through */
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
@@ -610,9 +705,20 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
break;
}
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP &&
+ !coda_try_fmt_vdoa(ctx, f, &ctx->use_vdoa) &&
+ ctx->use_vdoa)
+ vdoa_context_configure(ctx->vdoa, f->fmt.pix.width,
+ f->fmt.pix.height,
+ f->fmt.pix.pixelformat);
+ else
+ ctx->use_vdoa = false;
+
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
- f->type, q_data->width, q_data->height, q_data->fourcc);
+ "Setting format for type %d, wxh: %dx%d, fmt: %4.4s %c\n",
+ f->type, q_data->width, q_data->height,
+ (char *)&q_data->fourcc,
+ (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) ? 'L' : 'T');
return 0;
}
@@ -621,27 +727,37 @@ static int coda_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_q_data *q_data_src;
+ struct v4l2_rect r;
int ret;
ret = coda_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- return coda_s_fmt(ctx, f);
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ r.left = 0;
+ r.top = 0;
+ r.width = q_data_src->width;
+ r.height = q_data_src->height;
+
+ return coda_s_fmt(ctx, f, &r);
}
static int coda_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_q_data *q_data_src;
struct v4l2_format f_cap;
+ struct v4l2_rect r;
int ret;
ret = coda_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
- ret = coda_s_fmt(ctx, f);
+ ret = coda_s_fmt(ctx, f, NULL);
if (ret)
return ret;
@@ -657,7 +773,13 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
if (ret)
return ret;
- return coda_s_fmt(ctx, &f_cap);
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ r.left = 0;
+ r.top = 0;
+ r.width = q_data_src->width;
+ r.height = q_data_src->height;
+
+ return coda_s_fmt(ctx, &f_cap, &r);
}
static int coda_reqbufs(struct file *file, void *priv,
@@ -1018,6 +1140,16 @@ static int coda_job_ready(void *m2m_priv)
bool stream_end = ctx->bit_stream_param &
CODA_BIT_STREAM_END_FLAG;
int num_metas = ctx->num_metas;
+ unsigned int count;
+
+ count = hweight32(ctx->frm_dis_flg);
+ if (ctx->use_vdoa && count >= (ctx->num_internal_frames - 1)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: all internal buffers in use: %d/%d (0x%x)",
+ ctx->idx, count, ctx->num_internal_frames,
+ ctx->frm_dis_flg);
+ return 0;
+ }
if (ctx->hold && !src_bufs) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
@@ -1708,6 +1840,13 @@ static int coda_open(struct file *file)
default:
ctx->reg_idx = idx;
}
+ if (ctx->dev->vdoa && !disable_vdoa) {
+ ctx->vdoa = vdoa_context_create(dev->vdoa);
+ if (!ctx->vdoa)
+ v4l2_warn(&dev->v4l2_dev,
+ "Failed to create vdoa context: not using vdoa");
+ }
+ ctx->use_vdoa = false;
/* Power up and upload firmware if necessary */
ret = pm_runtime_get_sync(&dev->plat_dev->dev);
@@ -1789,6 +1928,9 @@ static int coda_release(struct file *file)
/* If this instance is running, call .job_abort and wait for it to end */
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ if (ctx->vdoa)
+ vdoa_context_destroy(ctx->vdoa);
+
/* In case the instance was not running, we still need to call SEQ_END */
if (ctx->ops->seq_end_work) {
queue_work(dev->workqueue, &ctx->seq_end_work);
@@ -2079,6 +2221,7 @@ static const struct coda_devtype coda_devdata[] = {
[CODA_IMX27] = {
.firmware = {
"vpu_fw_imx27_TO2.bin",
+ "vpu/vpu_fw_imx27_TO2.bin",
"v4l-codadx6-imx27.bin"
},
.product = CODA_DX6,
@@ -2092,6 +2235,7 @@ static const struct coda_devtype coda_devdata[] = {
[CODA_IMX53] = {
.firmware = {
"vpu_fw_imx53.bin",
+ "vpu/vpu_fw_imx53.bin",
"v4l-coda7541-imx53.bin"
},
.product = CODA_7541,
@@ -2106,6 +2250,7 @@ static const struct coda_devtype coda_devdata[] = {
[CODA_IMX6Q] = {
.firmware = {
"vpu_fw_imx6q.bin",
+ "vpu/vpu_fw_imx6q.bin",
"v4l-coda960-imx6q.bin"
},
.product = CODA_960,
@@ -2120,6 +2265,7 @@ static const struct coda_devtype coda_devdata[] = {
[CODA_IMX6DL] = {
.firmware = {
"vpu_fw_imx6d.bin",
+ "vpu/vpu_fw_imx6d.bin",
"v4l-coda960-imx6dl.bin"
},
.product = CODA_960,
@@ -2235,6 +2381,11 @@ static int coda_probe(struct platform_device *pdev)
}
dev->iram_pool = pool;
+ /* Get vdoa_data if supported by the platform */
+ dev->vdoa = coda_get_vdoa_data();
+ if (PTR_ERR(dev->vdoa) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
return ret;
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 53f96661683c..4b831c91ae4a 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -50,7 +50,7 @@ enum coda_product {
struct coda_video_device;
struct coda_devtype {
- char *firmware[2];
+ char *firmware[3];
enum coda_product product;
const struct coda_codec *codecs;
unsigned int num_codecs;
@@ -75,6 +75,7 @@ struct coda_dev {
struct platform_device *plat_dev;
const struct coda_devtype *devtype;
int firmware;
+ struct vdoa_data *vdoa;
void __iomem *regs_base;
struct clk *clk_per;
@@ -236,6 +237,8 @@ struct coda_ctx {
int display_idx;
struct dentry *debugfs_entry;
bool use_bit;
+ bool use_vdoa;
+ struct vdoa_ctx *vdoa;
};
extern int coda_debug;
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
new file mode 100644
index 000000000000..67fd8ffa60a4
--- /dev/null
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -0,0 +1,338 @@
+/*
+ * i.MX6 Video Data Order Adapter (VDOA)
+ *
+ * Copyright (C) 2014 Philipp Zabel
+ * Copyright (C) 2016 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include "imx-vdoa.h"
+
+#define VDOA_NAME "imx-vdoa"
+
+#define VDOAC 0x00
+#define VDOASRR 0x04
+#define VDOAIE 0x08
+#define VDOAIST 0x0c
+#define VDOAFP 0x10
+#define VDOAIEBA00 0x14
+#define VDOAIEBA01 0x18
+#define VDOAIEBA02 0x1c
+#define VDOAIEBA10 0x20
+#define VDOAIEBA11 0x24
+#define VDOAIEBA12 0x28
+#define VDOASL 0x2c
+#define VDOAIUBO 0x30
+#define VDOAVEBA0 0x34
+#define VDOAVEBA1 0x38
+#define VDOAVEBA2 0x3c
+#define VDOAVUBO 0x40
+#define VDOASR 0x44
+
+#define VDOAC_ISEL BIT(6)
+#define VDOAC_PFS BIT(5)
+#define VDOAC_SO BIT(4)
+#define VDOAC_SYNC BIT(3)
+#define VDOAC_NF BIT(2)
+#define VDOAC_BNDM_MASK 0x3
+#define VDOAC_BAND_HEIGHT_8 0x0
+#define VDOAC_BAND_HEIGHT_16 0x1
+#define VDOAC_BAND_HEIGHT_32 0x2
+
+#define VDOASRR_START BIT(1)
+#define VDOASRR_SWRST BIT(0)
+
+#define VDOAIE_EITERR BIT(1)
+#define VDOAIE_EIEOT BIT(0)
+
+#define VDOAIST_TERR BIT(1)
+#define VDOAIST_EOT BIT(0)
+
+#define VDOAFP_FH_MASK (0x1fff << 16)
+#define VDOAFP_FW_MASK (0x3fff)
+
+#define VDOASL_VSLY_MASK (0x3fff << 16)
+#define VDOASL_ISLY_MASK (0x7fff)
+
+#define VDOASR_ERRW BIT(4)
+#define VDOASR_EOB BIT(3)
+#define VDOASR_CURRENT_FRAME (0x3 << 1)
+#define VDOASR_CURRENT_BUFFER BIT(1)
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct vdoa_data {
+ struct vdoa_ctx *curr_ctx;
+ struct device *dev;
+ struct clk *vdoa_clk;
+ void __iomem *regs;
+ int irq;
+};
+
+struct vdoa_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+ u32 pixelformat;
+};
+
+struct vdoa_ctx {
+ struct vdoa_data *vdoa;
+ struct completion completion;
+ struct vdoa_q_data q_data[2];
+};
+
+static irqreturn_t vdoa_irq_handler(int irq, void *data)
+{
+ struct vdoa_data *vdoa = data;
+ struct vdoa_ctx *curr_ctx;
+ u32 val;
+
+ /* Disable interrupts */
+ writel(0, vdoa->regs + VDOAIE);
+
+ curr_ctx = vdoa->curr_ctx;
+ if (!curr_ctx) {
+ dev_dbg(vdoa->dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_HANDLED;
+ }
+
+ val = readl(vdoa->regs + VDOAIST);
+ writel(val, vdoa->regs + VDOAIST);
+ if (val & VDOAIST_TERR) {
+ val = readl(vdoa->regs + VDOASR) & VDOASR_ERRW;
+ dev_err(vdoa->dev, "AXI %s error\n", val ? "write" : "read");
+ } else if (!(val & VDOAIST_EOT)) {
+ dev_warn(vdoa->dev, "Spurious interrupt\n");
+ }
+ complete(&curr_ctx->completion);
+
+ return IRQ_HANDLED;
+}
+
+void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src)
+{
+ struct vdoa_q_data *src_q_data, *dst_q_data;
+ struct vdoa_data *vdoa = ctx->vdoa;
+ u32 val;
+
+ vdoa->curr_ctx = ctx;
+
+ src_q_data = &ctx->q_data[V4L2_M2M_SRC];
+ dst_q_data = &ctx->q_data[V4L2_M2M_DST];
+
+ /* Progressive, no sync, 1 frame per run */
+ if (dst_q_data->pixelformat == V4L2_PIX_FMT_YUYV)
+ val = VDOAC_PFS;
+ else
+ val = 0;
+ writel(val, vdoa->regs + VDOAC);
+
+ writel(dst_q_data->height << 16 | dst_q_data->width,
+ vdoa->regs + VDOAFP);
+
+ val = dst;
+ writel(val, vdoa->regs + VDOAIEBA00);
+
+ writel(src_q_data->bytesperline << 16 | dst_q_data->bytesperline,
+ vdoa->regs + VDOASL);
+
+ if (dst_q_data->pixelformat == V4L2_PIX_FMT_NV12 ||
+ dst_q_data->pixelformat == V4L2_PIX_FMT_NV21)
+ val = dst_q_data->bytesperline * dst_q_data->height;
+ else
+ val = 0;
+ writel(val, vdoa->regs + VDOAIUBO);
+
+ val = src;
+ writel(val, vdoa->regs + VDOAVEBA0);
+ val = round_up(src_q_data->bytesperline * src_q_data->height, 4096);
+ writel(val, vdoa->regs + VDOAVUBO);
+
+ /* Enable interrupts and start transfer */
+ writel(VDOAIE_EITERR | VDOAIE_EIEOT, vdoa->regs + VDOAIE);
+ writel(VDOASRR_START, vdoa->regs + VDOASRR);
+}
+EXPORT_SYMBOL(vdoa_device_run);
+
+int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
+{
+ struct vdoa_data *vdoa = ctx->vdoa;
+
+ if (!wait_for_completion_timeout(&ctx->completion,
+ msecs_to_jiffies(300))) {
+ dev_err(vdoa->dev,
+ "Timeout waiting for transfer result\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vdoa_wait_for_completion);
+
+struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa)
+{
+ struct vdoa_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ err = clk_prepare_enable(vdoa->vdoa_clk);
+ if (err) {
+ kfree(ctx);
+ return NULL;
+ }
+
+ init_completion(&ctx->completion);
+ ctx->vdoa = vdoa;
+
+ return ctx;
+}
+EXPORT_SYMBOL(vdoa_context_create);
+
+void vdoa_context_destroy(struct vdoa_ctx *ctx)
+{
+ struct vdoa_data *vdoa = ctx->vdoa;
+
+ clk_disable_unprepare(vdoa->vdoa_clk);
+ kfree(ctx);
+}
+EXPORT_SYMBOL(vdoa_context_destroy);
+
+int vdoa_context_configure(struct vdoa_ctx *ctx,
+ unsigned int width, unsigned int height,
+ u32 pixelformat)
+{
+ struct vdoa_q_data *src_q_data;
+ struct vdoa_q_data *dst_q_data;
+
+ if (width < 16 || width > 8192 || width % 16 != 0 ||
+ height < 16 || height > 4096 || height % 16 != 0)
+ return -EINVAL;
+
+ if (pixelformat != V4L2_PIX_FMT_YUYV &&
+ pixelformat != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+
+ /* If no context is passed, only check if the format is valid */
+ if (!ctx)
+ return 0;
+
+ src_q_data = &ctx->q_data[V4L2_M2M_SRC];
+ dst_q_data = &ctx->q_data[V4L2_M2M_DST];
+
+ src_q_data->width = width;
+ src_q_data->height = height;
+ src_q_data->bytesperline = width;
+ src_q_data->sizeimage =
+ round_up(src_q_data->bytesperline * height, 4096) +
+ src_q_data->bytesperline * height / 2;
+
+ dst_q_data->width = width;
+ dst_q_data->height = height;
+ dst_q_data->pixelformat = pixelformat;
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ dst_q_data->bytesperline = width * 2;
+ dst_q_data->sizeimage = dst_q_data->bytesperline * height;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ default:
+ dst_q_data->bytesperline = width;
+ dst_q_data->sizeimage =
+ dst_q_data->bytesperline * height * 3 / 2;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vdoa_context_configure);
+
+static int vdoa_probe(struct platform_device *pdev)
+{
+ struct vdoa_data *vdoa;
+ struct resource *res;
+
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ vdoa = devm_kzalloc(&pdev->dev, sizeof(*vdoa), GFP_KERNEL);
+ if (!vdoa)
+ return -ENOMEM;
+
+ vdoa->dev = &pdev->dev;
+
+ vdoa->vdoa_clk = devm_clk_get(vdoa->dev, NULL);
+ if (IS_ERR(vdoa->vdoa_clk)) {
+ dev_err(vdoa->dev, "Failed to get clock\n");
+ return PTR_ERR(vdoa->vdoa_clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vdoa->regs = devm_ioremap_resource(vdoa->dev, res);
+ if (IS_ERR(vdoa->regs))
+ return PTR_ERR(vdoa->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ vdoa->irq = devm_request_threaded_irq(&pdev->dev, res->start, NULL,
+ vdoa_irq_handler, IRQF_ONESHOT,
+ "vdoa", vdoa);
+ if (vdoa->irq < 0) {
+ dev_err(vdoa->dev, "Failed to get irq\n");
+ return vdoa->irq;
+ }
+
+ platform_set_drvdata(pdev, vdoa);
+
+ return 0;
+}
+
+static int vdoa_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id vdoa_dt_ids[] = {
+ { .compatible = "fsl,imx6q-vdoa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
+
+static const struct platform_driver vdoa_driver = {
+ .probe = vdoa_probe,
+ .remove = vdoa_remove,
+ .driver = {
+ .name = VDOA_NAME,
+ .of_match_table = vdoa_dt_ids,
+ },
+};
+
+module_platform_driver(vdoa_driver);
+
+MODULE_DESCRIPTION("Video Data Order Adapter");
+MODULE_AUTHOR("Philipp Zabel <philipp.zabel@gmail.com>");
+MODULE_ALIAS("platform:imx-vdoa");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/coda/imx-vdoa.h b/drivers/media/platform/coda/imx-vdoa.h
new file mode 100644
index 000000000000..967576b2a06a
--- /dev/null
+++ b/drivers/media/platform/coda/imx-vdoa.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IMX_VDOA_H
+#define IMX_VDOA_H
+
+struct vdoa_data;
+struct vdoa_ctx;
+
+#if (defined CONFIG_VIDEO_IMX_VDOA || defined CONFIG_VIDEO_IMX_VDOA_MODULE)
+
+struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa);
+int vdoa_context_configure(struct vdoa_ctx *ctx,
+ unsigned int width, unsigned int height,
+ u32 pixelformat);
+void vdoa_context_destroy(struct vdoa_ctx *ctx);
+
+void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src);
+int vdoa_wait_for_completion(struct vdoa_ctx *ctx);
+
+#else
+
+static inline struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa)
+{
+ return NULL;
+}
+
+static inline int vdoa_context_configure(struct vdoa_ctx *ctx,
+ unsigned int width,
+ unsigned int height,
+ u32 pixelformat)
+{
+ return 0;
+}
+
+static inline void vdoa_context_destroy(struct vdoa_ctx *ctx) { };
+
+static inline void vdoa_device_run(struct vdoa_ctx *ctx,
+ dma_addr_t dst, dma_addr_t src) { };
+
+static inline int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
+{
+ return 0;
+};
+
+#endif
+
+#endif /* IMX_VDOA_H */
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index ae5605de7679..8f6688a7a111 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* ccdc device API
*/
#ifndef _CCDC_HW_DEVICE_H
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 65c2973167c6..73db166dc338 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* CCDC hardware module for DM355
* ------------------------------
*
diff --git a/drivers/media/platform/davinci/dm355_ccdc_regs.h b/drivers/media/platform/davinci/dm355_ccdc_regs.h
index 2e1946e0b99f..a753ce262583 100644
--- a/drivers/media/platform/davinci/dm355_ccdc_regs.h
+++ b/drivers/media/platform/davinci/dm355_ccdc_regs.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _DM355_CCDC_REGS_H
#define _DM355_CCDC_REGS_H
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index c7523a7e0594..740fbc7a8c14 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* CCDC hardware module for DM6446
* ------------------------------
*
diff --git a/drivers/media/platform/davinci/dm644x_ccdc_regs.h b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
index 2b0aca5383f0..bece0bd9c9de 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc_regs.h
+++ b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _DM644X_CCDC_REGS_H
#define _DM644X_CCDC_REGS_H
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 99faea2e84c6..5813b49391ed 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Image Sensor Interface (ISIF) driver
*
* This driver is for configuring the ISIF IP available on DM365 or any other
diff --git a/drivers/media/platform/davinci/isif_regs.h b/drivers/media/platform/davinci/isif_regs.h
index 3993aece821b..a3564abe08ae 100644
--- a/drivers/media/platform/davinci/isif_regs.h
+++ b/drivers/media/platform/davinci/isif_regs.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ISIF_REGS_H
#define _ISIF_REGS_H
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 8c8cbeb7d90f..3679b1e7b39e 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 7d96a4b13b32..df042e84a678 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/media/platform/davinci/vpbe_osd_regs.h b/drivers/media/platform/davinci/vpbe_osd_regs.h
index 584520f3af60..3db265f87c65 100644
--- a/drivers/media/platform/davinci/vpbe_osd_regs.h
+++ b/drivers/media/platform/davinci/vpbe_osd_regs.h
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _VPBE_OSD_REGS_H
#define _VPBE_OSD_REGS_H
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 36ed1466b290..8bfe90a24681 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/media/platform/davinci/vpbe_venc_regs.h b/drivers/media/platform/davinci/vpbe_venc_regs.h
index 947cb1510776..6ad38f7ab0fe 100644
--- a/drivers/media/platform/davinci/vpbe_venc_regs.h
+++ b/drivers/media/platform/davinci/vpbe_venc_regs.h
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _VPBE_VENC_REGS_H
#define _VPBE_VENC_REGS_H
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index ee1cd79739c8..e3fe3e0635aa 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Driver name : VPFE Capture driver
* VPFE Capture driver allows applications to capture and stream video
* frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
@@ -523,6 +519,8 @@ static int vpfe_open(struct file *file)
if (!vpfe_dev->initialized) {
if (vpfe_initialize_device(vpfe_dev)) {
mutex_unlock(&vpfe_dev->lock);
+ v4l2_fh_exit(&fh->fh);
+ kfree(fh);
return -ENODEV;
}
}
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 0380cf2e5775..1b02a6363f77 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -32,6 +32,9 @@
MODULE_DESCRIPTION("TI DaVinci Video Port Interface driver");
MODULE_LICENSE("GPL");
+#define VPIF_DRIVER_NAME "vpif"
+MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
+
#define VPIF_CH0_MAX_MODES 22
#define VPIF_CH1_MAX_MODES 2
#define VPIF_CH2_MAX_MODES 15
@@ -464,9 +467,18 @@ static const struct dev_pm_ops vpif_pm = {
#define vpif_pm_ops NULL
#endif
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id vpif_of_match[] = {
+ { .compatible = "ti,da850-vpif", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, vpif_of_match);
+#endif
+
static struct platform_driver vpif_driver = {
.driver = {
- .name = "vpif",
+ .of_match_table = of_match_ptr(vpif_of_match),
+ .name = VPIF_DRIVER_NAME,
.pm = vpif_pm_ops,
},
.remove = vpif_remove,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index f791f5c402bf..44f702752d3a 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* TODO : add support for VBI & HBI data service
* add static buffer allocation
*/
@@ -45,6 +41,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level 0-1");
#define VPIF_DRIVER_NAME "vpif_capture"
+MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
/* global variables */
static struct vpif_device vpif_obj = { {NULL} };
@@ -178,8 +175,6 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long addr, flags;
int ret;
- spin_lock_irqsave(&common->irqlock, flags);
-
/* Initialize field_id */
ch->field_id = 0;
@@ -210,6 +205,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
vpif_config_addr(ch, ret);
/* Get the next frame from the buffer queue */
+ spin_lock_irqsave(&common->irqlock, flags);
common->cur_frm = common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
/* Remove buffer from the buffer queue */
@@ -243,6 +239,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
err:
+ spin_lock_irqsave(&common->irqlock, flags);
list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
@@ -286,7 +283,6 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
vpif_dbg(1, debug, "stream off failed in subdev\n");
/* release all active buffers */
- spin_lock_irqsave(&common->irqlock, flags);
if (common->cur_frm == common->next_frm) {
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
@@ -299,6 +295,7 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
VB2_BUF_STATE_ERROR);
}
+ spin_lock_irqsave(&common->irqlock, flags);
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
@@ -647,6 +644,10 @@ static int vpif_input_to_subdev(
vpif_dbg(2, debug, "vpif_input_to_subdev\n");
+ if (!chan_cfg)
+ return -1;
+ if (input_index >= chan_cfg->input_count)
+ return -1;
subdev_name = chan_cfg->inputs[input_index].subdev_name;
if (!subdev_name)
return -1;
@@ -685,6 +686,9 @@ static int vpif_set_input(
if (sd_index >= 0) {
sd = vpif_obj.sd[sd_index];
subdev_info = &vpif_cfg->subdev_info[sd_index];
+ } else {
+ /* no subdevice, no input to setup */
+ return 0;
}
/* first setup input path from sub device to vpif */
@@ -1430,6 +1434,11 @@ static __init int vpif_probe(struct platform_device *pdev)
int res_idx = 0;
int i, err;
+ if (!pdev->dev.platform_data) {
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ return -EINVAL;
+ }
+
vpif_dev = &pdev->dev;
err = initialize_vpif();
@@ -1466,7 +1475,10 @@ static __init int vpif_probe(struct platform_device *pdev)
}
if (!vpif_obj.config->asd_sizes) {
- i2c_adap = i2c_get_adapter(1);
+ int i2c_id = vpif_obj.config->i2c_adapter_id;
+
+ i2c_adap = i2c_get_adapter(i2c_id);
+ WARN_ON(!i2c_adap);
for (i = 0; i < subdev_count; i++) {
subdevdata = &vpif_obj.config->subdev_info[i];
vpif_obj.sd[i] =
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
index 9e35b6771d22..cf494a596a44 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef VPIF_CAPTURE_H
@@ -67,7 +63,7 @@ struct common_obj {
struct vb2_queue buffer_queue;
/* Queue of filled frames */
struct list_head dma_queue;
- /* Used in video-buf */
+ /* Protects the dma_queue field */
spinlock_t irqlock;
/* lock used to access this structure */
struct mutex lock;
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index e5f18448dbf7..50c30731bb78 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -42,6 +42,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level 0-1");
#define VPIF_DRIVER_NAME "vpif_display"
+MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
static int ycmux_mode;
@@ -1244,6 +1245,11 @@ static __init int vpif_probe(struct platform_device *pdev)
int res_idx = 0;
int i, err;
+ if (!pdev->dev.platform_data) {
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ return -EINVAL;
+ }
+
vpif_dev = &pdev->dev;
err = initialize_vpif();
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 373b796132f2..f2d27b932999 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* common vpss system module platform driver for all video drivers.
*/
#include <linux/module.h>
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index cbf75b6194b4..cbb03768f5d7 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -408,7 +408,7 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
if (pix_mp->field == V4L2_FIELD_ANY)
pix_mp->field = V4L2_FIELD_NONE;
else if (pix_mp->field != V4L2_FIELD_NONE) {
- pr_err("Not supported field order(%d)\n", pix_mp->field);
+ pr_debug("Not supported field order(%d)\n", pix_mp->field);
return -EINVAL;
}
@@ -1118,6 +1118,7 @@ static int gsc_remove(struct platform_device *pdev)
clk_disable_unprepare(gsc->clock[i]);
pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index f49f24b4462a..82505025d96c 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -675,8 +675,8 @@ static int gsc_m2m_open(struct file *file)
error_ctrls:
gsc_ctrls_delete(ctx);
-error_fh:
v4l2_fh_del(&ctx->fh);
+error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
unlock:
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 964f4a681934..8a7cd07dbe28 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -536,7 +536,7 @@ static int fimc_capture_release(struct file *file)
mutex_lock(&fimc->lock);
if (close && vc->streaming) {
- media_entity_pipeline_stop(&vc->ve.vdev.entity);
+ media_pipeline_stop(&vc->ve.vdev.entity);
vc->streaming = false;
}
@@ -1195,7 +1195,7 @@ static int fimc_cap_streamon(struct file *file, void *priv,
if (fimc_capture_active(fimc))
return -EBUSY;
- ret = media_entity_pipeline_start(entity, &vc->ve.pipe->mp);
+ ret = media_pipeline_start(entity, &vc->ve.pipe->mp);
if (ret < 0)
return ret;
@@ -1229,7 +1229,7 @@ static int fimc_cap_streamon(struct file *file, void *priv,
}
err_p_stop:
- media_entity_pipeline_stop(entity);
+ media_pipeline_stop(entity);
return ret;
}
@@ -1244,7 +1244,7 @@ static int fimc_cap_streamoff(struct file *file, void *priv,
if (ret < 0)
return ret;
- media_entity_pipeline_stop(&vc->ve.vdev.entity);
+ media_pipeline_stop(&vc->ve.vdev.entity);
vc->streaming = false;
return 0;
}
@@ -1695,7 +1695,7 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
+static const struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
.enum_mbus_code = fimc_subdev_enum_mbus_code,
.get_selection = fimc_subdev_get_selection,
.set_selection = fimc_subdev_set_selection,
@@ -1703,7 +1703,7 @@ static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
.set_fmt = fimc_subdev_set_fmt,
};
-static struct v4l2_subdev_ops fimc_subdev_ops = {
+static const struct v4l2_subdev_ops fimc_subdev_ops = {
.pad = &fimc_subdev_pad_ops,
};
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
index 6bba4ca022be..2f559663e51e 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -28,7 +28,14 @@ struct fimc_is_i2c {
* is implemented in the FIMC-IS subsystem firmware and the host CPU
* doesn't access the I2C bus controller.
*/
-static const struct i2c_algorithm fimc_is_i2c_algorithm;
+static u32 is_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm fimc_is_i2c_algorithm = {
+ .functionality = is_i2c_func,
+};
static int fimc_is_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 518ad34f80d7..7f92144a1de3 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -825,12 +825,13 @@ static int fimc_is_probe(struct platform_device *pdev)
is->irq = irq_of_parse_and_map(dev->of_node, 0);
if (!is->irq) {
dev_err(dev, "no irq found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
ret = fimc_is_get_clocks(is);
if (ret < 0)
- return ret;
+ goto err_iounmap;
platform_set_drvdata(pdev, is);
@@ -891,6 +892,8 @@ err_irq:
free_irq(is->irq, is);
err_clk:
fimc_is_put_clocks(is);
+err_iounmap:
+ iounmap(is->pmu_regs);
return ret;
}
@@ -947,6 +950,7 @@ static int fimc_is_remove(struct platform_device *pdev)
fimc_is_unregister_subdevs(is);
vb2_dma_contig_clear_max_seg_size(dev);
fimc_is_put_clocks(is);
+ iounmap(is->pmu_regs);
fimc_is_debugfs_remove(is);
release_firmware(is->fw.f_w);
fimc_is_free_cpu_memory(is);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 400ce0cb0c0d..55ba696b8cf4 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -312,7 +312,7 @@ static int isp_video_release(struct file *file)
mutex_lock(&isp->video_lock);
if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
- media_entity_pipeline_stop(entity);
+ media_pipeline_stop(entity);
ivc->streaming = 0;
}
@@ -489,7 +489,7 @@ static int isp_video_streamon(struct file *file, void *priv,
struct media_entity *me = &ve->vdev.entity;
int ret;
- ret = media_entity_pipeline_start(me, &ve->pipe->mp);
+ ret = media_pipeline_start(me, &ve->pipe->mp);
if (ret < 0)
return ret;
@@ -504,7 +504,7 @@ static int isp_video_streamon(struct file *file, void *priv,
isp->video_capture.streaming = 1;
return 0;
p_stop:
- media_entity_pipeline_stop(me);
+ media_pipeline_stop(me);
return ret;
}
@@ -519,7 +519,7 @@ static int isp_video_streamoff(struct file *file, void *priv,
if (ret < 0)
return ret;
- media_entity_pipeline_stop(&video->ve.vdev.entity);
+ media_pipeline_stop(&video->ve.vdev.entity);
video->streaming = 0;
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index b91abf1c4d43..b4c4a33784c4 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -524,7 +524,7 @@ static int fimc_lite_release(struct file *file)
if (v4l2_fh_is_singular_file(file) &&
atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
if (fimc->streaming) {
- media_entity_pipeline_stop(entity);
+ media_pipeline_stop(entity);
fimc->streaming = false;
}
fimc_lite_stop_capture(fimc, false);
@@ -832,7 +832,7 @@ static int fimc_lite_streamon(struct file *file, void *priv,
if (fimc_lite_active(fimc))
return -EBUSY;
- ret = media_entity_pipeline_start(entity, &fimc->ve.pipe->mp);
+ ret = media_pipeline_start(entity, &fimc->ve.pipe->mp);
if (ret < 0)
return ret;
@@ -849,7 +849,7 @@ static int fimc_lite_streamon(struct file *file, void *priv,
}
err_p_stop:
- media_entity_pipeline_stop(entity);
+ media_pipeline_stop(entity);
return 0;
}
@@ -863,7 +863,7 @@ static int fimc_lite_streamoff(struct file *file, void *priv,
if (ret < 0)
return ret;
- media_entity_pipeline_stop(&fimc->ve.vdev.entity);
+ media_pipeline_stop(&fimc->ve.vdev.entity);
fimc->streaming = false;
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 6028e4fbaed3..d8724fe9e9da 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -663,8 +663,8 @@ error_m2m_ctx:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
-error_fh:
v4l2_fh_del(&ctx->fh);
+error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
unlock:
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index e3a8709138fa..e82450e90a67 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -402,8 +402,10 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
return ret;
}
- if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS)
+ if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS) {
+ of_node_put(ep);
return -EINVAL;
+ }
pd->mux_id = (endpoint.base.port - 1) & 0x1;
@@ -1117,7 +1119,7 @@ static int __fimc_md_modify_pipeline(struct media_entity *entity, bool enable)
/* Locking: called with entity->graph_obj.mdev->graph_mutex mutex held. */
static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
- struct media_entity_graph *graph)
+ struct media_graph *graph)
{
struct media_entity *entity_err = entity;
int ret;
@@ -1128,9 +1130,9 @@ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
* through active links. This is needed as we cannot power on/off the
* subdevs in random order.
*/
- media_entity_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, entity);
- while ((entity = media_entity_graph_walk_next(graph))) {
+ while ((entity = media_graph_walk_next(graph))) {
if (!is_media_entity_v4l2_video_device(entity))
continue;
@@ -1143,9 +1145,9 @@ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
return 0;
err:
- media_entity_graph_walk_start(graph, entity_err);
+ media_graph_walk_start(graph, entity_err);
- while ((entity_err = media_entity_graph_walk_next(graph))) {
+ while ((entity_err = media_graph_walk_next(graph))) {
if (!is_media_entity_v4l2_video_device(entity_err))
continue;
@@ -1161,7 +1163,7 @@ err:
static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
unsigned int notification)
{
- struct media_entity_graph *graph =
+ struct media_graph *graph =
&container_of(link->graph_obj.mdev, struct fimc_md,
media_dev)->link_setup_graph;
struct media_entity *sink = link->sink->entity;
@@ -1169,7 +1171,7 @@ static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
/* Before link disconnection */
if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
- ret = media_entity_graph_walk_init(graph,
+ ret = media_graph_walk_init(graph,
link->graph_obj.mdev);
if (ret)
return ret;
@@ -1183,7 +1185,7 @@ static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
} else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH) {
if (link->flags & MEDIA_LNK_FL_ENABLED)
ret = __fimc_md_modify_pipelines(sink, true, graph);
- media_entity_graph_walk_cleanup(graph);
+ media_graph_walk_cleanup(graph);
}
return ret ? -EPIPE : 0;
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
index ed122cb2dd74..957787a2f480 100644
--- a/drivers/media/platform/exynos4-is/media-dev.h
+++ b/drivers/media/platform/exynos4-is/media-dev.h
@@ -154,7 +154,7 @@ struct fimc_md {
bool user_subdev_api;
spinlock_t slock;
struct list_head pipelines;
- struct media_entity_graph link_setup_graph;
+ struct media_graph link_setup_graph;
};
static inline
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index befd9fc0adc4..f819b29efc38 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -649,23 +649,23 @@ static int s5pcsis_log_status(struct v4l2_subdev *sd)
return 0;
}
-static struct v4l2_subdev_core_ops s5pcsis_core_ops = {
+static const struct v4l2_subdev_core_ops s5pcsis_core_ops = {
.s_power = s5pcsis_s_power,
.log_status = s5pcsis_log_status,
};
-static struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
+static const struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
.enum_mbus_code = s5pcsis_enum_mbus_code,
.get_fmt = s5pcsis_get_fmt,
.set_fmt = s5pcsis_set_fmt,
};
-static struct v4l2_subdev_video_ops s5pcsis_video_ops = {
+static const struct v4l2_subdev_video_ops s5pcsis_video_ops = {
.s_rx_buffer = s5pcsis_s_rx_buffer,
.s_stream = s5pcsis_s_stream,
};
-static struct v4l2_subdev_ops s5pcsis_subdev_ops = {
+static const struct v4l2_subdev_ops s5pcsis_subdev_ops = {
.core = &s5pcsis_core_ops,
.pad = &s5pcsis_pad_ops,
.video = &s5pcsis_video_ops,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 074659227864..502877a4b1df 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -351,16 +351,6 @@ static void mtk_vdec_worker(struct work_struct *work)
dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);
- buf.va = vb2_plane_vaddr(src_buf, 0);
- buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- buf.size = (size_t)src_buf->planes[0].bytesused;
- if (!buf.va) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
- ctx->id, src_buf->index);
- return;
- }
-
pfb = &dst_buf_info->frame_buffer;
pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
@@ -371,8 +361,6 @@ static void mtk_vdec_worker(struct work_struct *work)
pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
pfb->status = 0;
mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
- mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
- ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
mtk_v4l2_debug(3,
"id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
@@ -381,24 +369,36 @@ static void mtk_vdec_worker(struct work_struct *work)
&pfb->base_c.dma_addr, pfb->base_y.size);
if (src_buf_info->lastframe) {
- /* update src buf status */
+ mtk_v4l2_debug(1, "Got empty flush input buffer.");
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- src_buf_info->lastframe = false;
- v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
/* update dst buf status */
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ mutex_lock(&ctx->lock);
dst_buf_info->used = false;
+ mutex_unlock(&ctx->lock);
vdec_if_decode(ctx, NULL, NULL, &res_chg);
clean_display_buffer(ctx);
vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+ dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST;
v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
clean_free_buffer(ctx);
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
return;
}
+ buf.va = vb2_plane_vaddr(src_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ buf.size = (size_t)src_buf->planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
+ ctx->id, src_buf->index);
+ return;
+ }
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
dst_buf_info->vb.vb2_buf.timestamp
= src_buf_info->vb.vb2_buf.timestamp;
dst_buf_info->vb.timecode
@@ -412,10 +412,9 @@ static void mtk_vdec_worker(struct work_struct *work)
if (ret) {
mtk_v4l2_err(
- " <===[%d], src_buf[%d]%d sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+ " <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
ctx->id,
src_buf->index,
- src_buf_info->lastframe,
buf.size,
src_buf_info->vb.vb2_buf.timestamp,
dst_buf->index,
@@ -456,6 +455,65 @@ static void mtk_vdec_worker(struct work_struct *work)
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
}
+static int vidioc_try_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *cmd)
+{
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ case V4L2_DEC_CMD_START:
+ if (cmd->flags != 0) {
+ mtk_v4l2_err("cmd->flags=%u", cmd->flags);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static int vidioc_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *cmd)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *src_vq, *dst_vq;
+ int ret;
+
+ ret = vidioc_try_decoder_cmd(file, priv, cmd);
+ if (ret)
+ return ret;
+
+ mtk_v4l2_debug(1, "decoder cmd=%u", cmd->cmd);
+ dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!vb2_is_streaming(src_vq)) {
+ mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
+ return 0;
+ }
+ if (!vb2_is_streaming(dst_vq)) {
+ mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
+ return 0;
+ }
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb);
+ v4l2_m2m_try_schedule(ctx->m2m_ctx);
+ break;
+
+ case V4L2_DEC_CMD_START:
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx)
{
mutex_unlock(&ctx->dev->dec_mutex);
@@ -521,10 +579,6 @@ static int vidioc_vdec_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
- struct vb2_queue *vq;
- struct vb2_buffer *vb;
- struct mtk_video_dec_buf *mtkbuf;
- struct vb2_v4l2_buffer *vb2_v4l2;
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
@@ -532,25 +586,6 @@ static int vidioc_vdec_qbuf(struct file *file, void *priv,
return -EIO;
}
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, buf->type);
- if (buf->index >= vq->num_buffers) {
- mtk_v4l2_debug(1, "buffer index %d out of range", buf->index);
- return -EINVAL;
- }
- vb = vq->bufs[buf->index];
- vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
- mtkbuf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
-
- if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
- (buf->m.planes[0].bytesused == 0)) {
- mtkbuf->lastframe = true;
- mtk_v4l2_debug(1, "[%d] (%d) id=%d lastframe=%d (%d,%d, %d) vb=%p",
- ctx->id, buf->type, buf->index,
- mtkbuf->lastframe, buf->bytesused,
- buf->m.planes[0].bytesused, buf->length,
- vb);
- }
-
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
@@ -1067,10 +1102,8 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
int ret = 0;
unsigned int dpbsize = 1;
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
- struct vb2_v4l2_buffer, vb2_buf);
- struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
- struct mtk_video_dec_buf, vb);
+ struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
+ struct mtk_video_dec_buf *buf = NULL;
mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
ctx->id, vb->vb2_queue->type,
@@ -1079,10 +1112,11 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
* check if this buffer is ready to be used after decode
*/
if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
mutex_lock(&ctx->lock);
if (buf->used == false) {
- v4l2_m2m_buf_queue(ctx->m2m_ctx,
- to_vb2_v4l2_buffer(vb));
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
buf->queued_in_vb2 = true;
buf->queued_in_v4l2 = true;
buf->ready_to_display = false;
@@ -1095,7 +1129,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
return;
}
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
if (ctx->state != MTK_STATE_INIT) {
mtk_v4l2_debug(3, "[%d] already init driver %d",
@@ -1108,6 +1142,14 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
mtk_v4l2_err("No src buffer");
return;
}
+ vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ if (buf->lastframe) {
+ /* This shouldn't happen. Just in case. */
+ mtk_v4l2_err("Invalid flush buffer.");
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ return;
+ }
src_mem.va = vb2_plane_vaddr(src_buf, 0);
src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
@@ -1126,15 +1168,14 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
* if there is no SPS header or picture info
* in bs
*/
- int log_level = ret ? 0 : 1;
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
VB2_BUF_STATE_DONE);
- mtk_v4l2_debug(log_level,
- "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
- ctx->id, src_buf->index,
- src_mem.size, ret, res_chg);
+ mtk_v4l2_debug(ret ? 0 : 1,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->index,
+ src_mem.size, ret, res_chg);
return;
}
@@ -1224,9 +1265,15 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
- VB2_BUF_STATE_ERROR);
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
+ struct vb2_v4l2_buffer *vb2_v4l2 =
+ to_vb2_v4l2_buffer(src_buf);
+ struct mtk_video_dec_buf *buf_info = container_of(
+ vb2_v4l2, struct mtk_video_dec_buf, vb);
+ if (!buf_info->lastframe)
+ v4l2_m2m_buf_done(vb2_v4l2,
+ VB2_BUF_STATE_ERROR);
+ }
return;
}
@@ -1406,6 +1453,9 @@ const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_g_selection = vidioc_vdec_g_selection,
.vidioc_s_selection = vidioc_vdec_s_selection,
+
+ .vidioc_decoder_cmd = vidioc_decoder_cmd,
+ .vidioc_try_decoder_cmd = vidioc_try_decoder_cmd,
};
int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index d48287c727f4..4334b7394861 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -105,13 +105,21 @@ static int fops_vcodec_open(struct file *file)
{
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
+ struct mtk_video_dec_buf *mtk_buf = NULL;
int ret = 0;
+ struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ mtk_buf = kzalloc(sizeof(*mtk_buf), GFP_KERNEL);
+ if (!mtk_buf) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
mutex_lock(&dev->dev_mutex);
+ ctx->empty_flush_buf = mtk_buf;
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
@@ -135,6 +143,10 @@ static int fops_vcodec_open(struct file *file)
ret);
goto err_m2m_ctx_init;
}
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ctx->empty_flush_buf->vb.vb2_buf.vb2_queue = src_vq;
+ ctx->empty_flush_buf->lastframe = true;
mtk_vcodec_dec_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
@@ -173,6 +185,7 @@ err_m2m_ctx_init:
err_ctrls_setup:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
+ kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -203,6 +216,7 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
list_del_init(&ctx->list);
+ kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index d7eb8ef855d2..3cffb381ac8e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -254,6 +254,7 @@ struct vdec_pic_info {
* @decode_work: worker for the decoding
* @encode_work: worker for the encoding
* @last_decoded_picinfo: pic information get from latest decode
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush
*
* @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
@@ -291,6 +292,7 @@ struct mtk_vcodec_ctx {
struct work_struct decode_work;
struct work_struct encode_work;
struct vdec_pic_info last_decoded_picinfo;
+ struct mtk_video_dec_buf *empty_flush_buf;
enum v4l2_colorspace colorspace;
enum v4l2_ycbcr_encoding ycbcr_enc;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 5a24c51aebb7..1abd14e79565 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -70,9 +70,8 @@ void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
{
int err;
- uint32_t msg_id = *(uint32_t *)msg;
- mtk_vcodec_debug(vpu, "id=%X", msg_id);
+ mtk_vcodec_debug(vpu, "id=%X", *(uint32_t *)msg);
vpu->failure = 0;
vpu->signaled = 0;
@@ -80,7 +79,7 @@ static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
err = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
if (err) {
mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
- vpu->id, msg_id, err);
+ vpu->id, *(uint32_t *)msg, err);
return err;
}
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index b76c80bdf30b..4eb3be37ba14 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -665,10 +665,10 @@ static int h264_enc_deinit(unsigned long handle)
}
static const struct venc_common_if venc_h264_if = {
- h264_enc_init,
- h264_enc_encode,
- h264_enc_set_param,
- h264_enc_deinit,
+ .init = h264_enc_init,
+ .encode = h264_enc_encode,
+ .set_param = h264_enc_set_param,
+ .deinit = h264_enc_deinit,
};
const struct venc_common_if *get_h264_enc_comm_if(void);
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 544f57186243..a6fa145f2c54 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -470,10 +470,10 @@ static int vp8_enc_deinit(unsigned long handle)
}
static const struct venc_common_if venc_vp8_if = {
- vp8_enc_init,
- vp8_enc_encode,
- vp8_enc_set_param,
- vp8_enc_deinit,
+ .init = vp8_enc_init,
+ .encode = vp8_enc_encode,
+ .set_param = vp8_enc_set_param,
+ .deinit = vp8_enc_deinit,
};
const struct venc_common_if *get_vp8_enc_comm_if(void);
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
index a01c7599b510..0d882acf8830 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
@@ -79,10 +79,8 @@ static int vpu_enc_send_msg(struct venc_vpu_inst *vpu, void *msg,
status = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
if (status) {
- uint32_t msg_id = *(uint32_t *)msg;
-
mtk_vcodec_err(vpu, "vpu_ipi_send msg_id %x len %d fail %d",
- msg_id, len, status);
+ *(uint32_t *)msg, len, status);
return -EINVAL;
}
if (vpu->failure)
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 7354469670b7..218e6d7ae93a 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -225,22 +225,22 @@ isp_video_remote_subdev(struct isp_video *video, u32 *pad)
static int isp_video_get_graph_data(struct isp_video *video,
struct isp_pipeline *pipe)
{
- struct media_entity_graph graph;
+ struct media_graph graph;
struct media_entity *entity = &video->video.entity;
struct media_device *mdev = entity->graph_obj.mdev;
struct isp_video *far_end = NULL;
int ret;
mutex_lock(&mdev->graph_mutex);
- ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
+ ret = media_graph_walk_init(&graph, mdev);
if (ret) {
mutex_unlock(&mdev->graph_mutex);
return ret;
}
- media_entity_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph))) {
+ while ((entity = media_graph_walk_next(&graph))) {
struct isp_video *__video;
media_entity_enum_set(&pipe->ent_enum, entity);
@@ -261,7 +261,7 @@ static int isp_video_get_graph_data(struct isp_video *video,
mutex_unlock(&mdev->graph_mutex);
- media_entity_graph_walk_cleanup(&graph);
+ media_graph_walk_cleanup(&graph);
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
pipe->input = far_end;
@@ -1112,7 +1112,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
pipe->max_rate = pipe->l3_ick;
- ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ ret = media_pipeline_start(&video->video.entity, &pipe->pipe);
if (ret < 0)
goto err_pipeline_start;
@@ -1169,7 +1169,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return 0;
err_check_format:
- media_entity_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(&video->video.entity);
err_pipeline_start:
/* TODO: Implement PM QoS */
/* The DMA queue must be emptied here, otherwise CCDC interrupts that
@@ -1236,7 +1236,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
video->error = false;
/* TODO: Implement PM QoS */
- media_entity_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(&video->video.entity);
media_entity_enum_cleanup(&pipe->ent_enum);
@@ -1350,6 +1350,7 @@ static int isp_video_open(struct file *file)
done:
if (ret < 0) {
v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
kfree(handle);
}
@@ -1373,6 +1374,7 @@ static int isp_video_release(struct file *file)
/* Release the file handle. */
v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
kfree(handle);
file->private_data = NULL;
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 674cc1309b43..42f25d241edd 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -1596,7 +1596,7 @@ static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
else
fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
- dprintk(ctx->fdp1, "Try %s format: %4s (0x%08x) %ux%u field %u\n",
+ dprintk(ctx->fdp1, "Try %s format: %4.4s (0x%08x) %ux%u field %u\n",
V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
@@ -1671,7 +1671,7 @@ static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
- dprintk(ctx->fdp1, "Set %s format: %4s (0x%08x) %ux%u field %u\n",
+ dprintk(ctx->fdp1, "Set %s format: %4.4s (0x%08x) %ux%u field %u\n",
V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 0413a861a59a..1b30be72f4f9 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -856,13 +856,13 @@ static int s3c_camif_streamon(struct file *file, void *priv,
if (s3c_vp_active(vp))
return 0;
- ret = media_entity_pipeline_start(sensor, camif->m_pipeline);
+ ret = media_pipeline_start(sensor, camif->m_pipeline);
if (ret < 0)
return ret;
ret = camif_pipeline_validate(camif);
if (ret < 0) {
- media_entity_pipeline_stop(sensor);
+ media_pipeline_stop(sensor);
return ret;
}
@@ -886,7 +886,7 @@ static int s3c_camif_streamoff(struct file *file, void *priv,
ret = vb2_streamoff(&vp->vb_queue, type);
if (ret == 0)
- media_entity_pipeline_stop(&camif->sensor.sd->entity);
+ media_pipeline_stop(&camif->sensor.sd->entity);
return ret;
}
@@ -1488,7 +1488,7 @@ static const struct v4l2_subdev_pad_ops s3c_camif_subdev_pad_ops = {
.set_fmt = s3c_camif_subdev_set_fmt,
};
-static struct v4l2_subdev_ops s3c_camif_subdev_ops = {
+static const struct v4l2_subdev_ops s3c_camif_subdev_ops = {
.pad = &s3c_camif_subdev_pad_ops,
};
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index 534d6c3c6d60..cb4986b8f798 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -59,7 +59,7 @@ static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on)
return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, NULL, on);
}
-static struct v4l2_subdev_core_ops platform_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops platform_subdev_core_ops = {
.s_power = soc_camera_platform_s_power,
};
@@ -110,7 +110,7 @@ static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops platform_subdev_video_ops = {
.s_stream = soc_camera_platform_s_stream,
.g_mbus_config = soc_camera_platform_g_mbus_config,
};
@@ -122,7 +122,7 @@ static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = {
.set_fmt = soc_camera_platform_fill_fmt,
};
-static struct v4l2_subdev_ops platform_subdev_ops = {
+static const struct v4l2_subdev_ops platform_subdev_ops = {
.core = &platform_subdev_core_ops,
.video = &platform_subdev_video_ops,
.pad = &platform_subdev_pad_ops,
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index 79c56356a7c7..7af66860d624 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -677,7 +677,7 @@ int bdisp_debugfs_create(struct bdisp_dev *bdisp)
err:
bdisp_debugfs_remove(bdisp);
- return 0;
+ return -ENOMEM;
}
void bdisp_debugfs_remove(struct bdisp_dev *bdisp)
diff --git a/drivers/media/platform/sti/delta/Makefile b/drivers/media/platform/sti/delta/Makefile
new file mode 100644
index 000000000000..8d032508a933
--- /dev/null
+++ b/drivers/media/platform/sti/delta/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) := st-delta.o
+st-delta-y := delta-v4l2.o delta-mem.o delta-ipc.o delta-debug.o
+
+# MJPEG support
+st-delta-$(CONFIG_VIDEO_STI_DELTA_MJPEG) += delta-mjpeg-hdr.o
+st-delta-$(CONFIG_VIDEO_STI_DELTA_MJPEG) += delta-mjpeg-dec.o
diff --git a/drivers/media/platform/sti/delta/delta-cfg.h b/drivers/media/platform/sti/delta/delta-cfg.h
new file mode 100644
index 000000000000..c6388f575800
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-cfg.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef DELTA_CFG_H
+#define DELTA_CFG_H
+
+#define DELTA_FW_VERSION "21.1-3"
+
+#define DELTA_MIN_WIDTH 32
+#define DELTA_MAX_WIDTH 4096
+#define DELTA_MIN_HEIGHT 32
+#define DELTA_MAX_HEIGHT 2400
+
+/* DELTA requires a 32x32 pixels alignment for frames */
+#define DELTA_WIDTH_ALIGNMENT 32
+#define DELTA_HEIGHT_ALIGNMENT 32
+
+#define DELTA_DEFAULT_WIDTH DELTA_MIN_WIDTH
+#define DELTA_DEFAULT_HEIGHT DELTA_MIN_HEIGHT
+#define DELTA_DEFAULT_FRAMEFORMAT V4L2_PIX_FMT_NV12
+#define DELTA_DEFAULT_STREAMFORMAT V4L2_PIX_FMT_MJPEG
+
+#define DELTA_MAX_RESO (DELTA_MAX_WIDTH * DELTA_MAX_HEIGHT)
+
+/* guard value for number of access units */
+#define DELTA_MAX_AUS 10
+
+/* IP perf dependent, can be tuned */
+#define DELTA_PEAK_FRAME_SMOOTHING 2
+
+/*
+ * guard output frame count:
+ * - at least 1 frame needed for display
+ * - at worst 21
+ * ( max h264 dpb (16) +
+ * decoding peak smoothing (2) +
+ * user display pipeline (3) )
+ */
+#define DELTA_MIN_FRAME_USER 1
+#define DELTA_MAX_DPB 16
+#define DELTA_MAX_FRAME_USER 3 /* platform/use-case dependent */
+#define DELTA_MAX_FRAMES (DELTA_MAX_DPB + DELTA_PEAK_FRAME_SMOOTHING +\
+ DELTA_MAX_FRAME_USER)
+
+#if DELTA_MAX_FRAMES > VIDEO_MAX_FRAME
+#undef DELTA_MAX_FRAMES
+#define DELTA_MAX_FRAMES (VIDEO_MAX_FRAME)
+#endif
+
+/* extra space to be allocated to store codec specific data per frame */
+#define DELTA_MAX_FRAME_PRIV_SIZE 100
+
+/* PM runtime auto power-off after 5ms of inactivity */
+#define DELTA_HW_AUTOSUSPEND_DELAY_MS 5
+
+#define DELTA_MAX_DECODERS 10
+#ifdef CONFIG_VIDEO_STI_DELTA_MJPEG
+extern const struct delta_dec mjpegdec;
+#endif
+
+#endif /* DELTA_CFG_H */
diff --git a/drivers/media/platform/sti/delta/delta-debug.c b/drivers/media/platform/sti/delta/delta-debug.c
new file mode 100644
index 000000000000..a7ebf2cc7783
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-debug.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Fabrice Lecoultre <fabrice.lecoultre@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "delta.h"
+#include "delta-debug.h"
+
+char *delta_streaminfo_str(struct delta_streaminfo *s, char *str,
+ unsigned int len)
+{
+ if (!s)
+ return NULL;
+
+ snprintf(str, len,
+ "%4.4s %dx%d %s %s dpb=%d %s %s %s%dx%d@(%d,%d) %s%d/%d",
+ (char *)&s->streamformat, s->width, s->height,
+ s->profile, s->level, s->dpb,
+ (s->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
+ s->other,
+ s->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
+ s->crop.width, s->crop.height,
+ s->crop.left, s->crop.top,
+ s->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
+ s->pixelaspect.numerator,
+ s->pixelaspect.denominator);
+
+ return str;
+}
+
+char *delta_frameinfo_str(struct delta_frameinfo *f, char *str,
+ unsigned int len)
+{
+ if (!f)
+ return NULL;
+
+ snprintf(str, len,
+ "%4.4s %dx%d aligned %dx%d %s %s%dx%d@(%d,%d) %s%d/%d",
+ (char *)&f->pixelformat, f->width, f->height,
+ f->aligned_width, f->aligned_height,
+ (f->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
+ f->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
+ f->crop.width, f->crop.height,
+ f->crop.left, f->crop.top,
+ f->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
+ f->pixelaspect.numerator,
+ f->pixelaspect.denominator);
+
+ return str;
+}
+
+void delta_trace_summary(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct delta_streaminfo *s = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_STREAMINFO))
+ return;
+
+ dev_dbg(delta->dev, "%s %s, %d frames decoded, %d frames output, %d frames dropped, %d stream errors, %d decode errors",
+ ctx->name,
+ delta_streaminfo_str(s, str, sizeof(str)),
+ ctx->decoded_frames,
+ ctx->output_frames,
+ ctx->dropped_frames,
+ ctx->stream_errors,
+ ctx->decode_errors);
+}
diff --git a/drivers/media/platform/sti/delta/delta-debug.h b/drivers/media/platform/sti/delta/delta-debug.h
new file mode 100644
index 000000000000..955c1587ac2d
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-debug.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Fabrice Lecoultre <fabrice.lecoultre@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef DELTA_DEBUG_H
+#define DELTA_DEBUG_H
+
+char *delta_streaminfo_str(struct delta_streaminfo *s, char *str,
+ unsigned int len);
+char *delta_frameinfo_str(struct delta_frameinfo *f, char *str,
+ unsigned int len);
+void delta_trace_summary(struct delta_ctx *ctx);
+
+#endif /* DELTA_DEBUG_H */
diff --git a/drivers/media/platform/sti/delta/delta-ipc.c b/drivers/media/platform/sti/delta/delta-ipc.c
new file mode 100644
index 000000000000..41e4a4c259b3
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-ipc.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/rpmsg.h>
+
+#include "delta.h"
+#include "delta-ipc.h"
+#include "delta-mem.h"
+
+#define IPC_TIMEOUT 100
+#define IPC_SANITY_TAG 0xDEADBEEF
+
+enum delta_ipc_fw_command {
+ DELTA_IPC_OPEN,
+ DELTA_IPC_SET_STREAM,
+ DELTA_IPC_DECODE,
+ DELTA_IPC_CLOSE
+};
+
+#define to_rpmsg_driver(__drv) container_of(__drv, struct rpmsg_driver, drv)
+#define to_delta(__d) container_of(__d, struct delta_dev, rpmsg_driver)
+
+#define to_ctx(hdl) ((struct delta_ipc_ctx *)hdl)
+#define to_pctx(ctx) container_of(ctx, struct delta_ctx, ipc_ctx)
+
+struct delta_ipc_header_msg {
+ u32 tag;
+ void *host_hdl;
+ u32 copro_hdl;
+ u32 command;
+};
+
+#define to_host_hdl(ctx) ((void *)ctx)
+
+#define msg_to_ctx(msg) ((struct delta_ipc_ctx *)(msg)->header.host_hdl)
+#define msg_to_copro_hdl(msg) ((msg)->header.copro_hdl)
+
+static inline dma_addr_t to_paddr(struct delta_ipc_ctx *ctx, void *vaddr)
+{
+ return (ctx->ipc_buf->paddr + (vaddr - ctx->ipc_buf->vaddr));
+}
+
+static inline bool is_valid_data(struct delta_ipc_ctx *ctx,
+ void *data, u32 size)
+{
+ return ((data >= ctx->ipc_buf->vaddr) &&
+ ((data + size) <= (ctx->ipc_buf->vaddr + ctx->ipc_buf->size)));
+}
+
+/*
+ * IPC shared memory (@ipc_buf_size, @ipc_buf_paddr) is sent to copro
+ * at each instance opening. This memory is allocated by IPC client
+ * and given through delta_ipc_open(). All messages parameters
+ * (open, set_stream, decode) will have their phy address within
+ * this IPC shared memory, avoiding de-facto recopies inside delta-ipc.
+ * All the below messages structures are used on both host and firmware
+ * side and are packed (use only of 32 bits size fields in messages
+ * structures to ensure packing):
+ * - struct delta_ipc_open_msg
+ * - struct delta_ipc_set_stream_msg
+ * - struct delta_ipc_decode_msg
+ * - struct delta_ipc_close_msg
+ * - struct delta_ipc_cb_msg
+ */
+struct delta_ipc_open_msg {
+ struct delta_ipc_header_msg header;
+ u32 ipc_buf_size;
+ dma_addr_t ipc_buf_paddr;
+ char name[32];
+ u32 param_size;
+ dma_addr_t param_paddr;
+};
+
+struct delta_ipc_set_stream_msg {
+ struct delta_ipc_header_msg header;
+ u32 param_size;
+ dma_addr_t param_paddr;
+};
+
+struct delta_ipc_decode_msg {
+ struct delta_ipc_header_msg header;
+ u32 param_size;
+ dma_addr_t param_paddr;
+ u32 status_size;
+ dma_addr_t status_paddr;
+};
+
+struct delta_ipc_close_msg {
+ struct delta_ipc_header_msg header;
+};
+
+struct delta_ipc_cb_msg {
+ struct delta_ipc_header_msg header;
+ int err;
+};
+
+static void build_msg_header(struct delta_ipc_ctx *ctx,
+ enum delta_ipc_fw_command command,
+ struct delta_ipc_header_msg *header)
+{
+ header->tag = IPC_SANITY_TAG;
+ header->host_hdl = to_host_hdl(ctx);
+ header->copro_hdl = ctx->copro_hdl;
+ header->command = command;
+}
+
+int delta_ipc_open(struct delta_ctx *pctx, const char *name,
+ struct delta_ipc_param *param, u32 ipc_buf_size,
+ struct delta_buf **ipc_buf, void **hdl)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_ctx *ctx = &pctx->ipc_ctx;
+ struct delta_ipc_open_msg msg;
+ struct delta_buf *buf = &ctx->ipc_buf_struct;
+ int ret;
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, rpmsg is not initialized\n",
+ pctx->name);
+ pctx->sys_errors++;
+ return -EINVAL;
+ }
+
+ if (!name) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, no name given\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!ipc_buf_size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, no size given for ipc buffer\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size > ipc_buf_size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, too large ipc parameter (%d bytes while max %d expected)\n",
+ pctx->name,
+ param->size, ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ /* init */
+ init_completion(&ctx->done);
+
+ /*
+ * allocation of contiguous buffer for
+ * data of commands exchanged between
+ * host and firmware coprocessor
+ */
+ ret = hw_alloc(pctx, ipc_buf_size,
+ "ipc data buffer", buf);
+ if (ret)
+ return ret;
+ ctx->ipc_buf = buf;
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_OPEN, &msg.header);
+
+ msg.ipc_buf_size = ipc_buf_size;
+ msg.ipc_buf_paddr = ctx->ipc_buf->paddr;
+
+ memcpy(msg.name, name, sizeof(msg.name));
+ msg.name[sizeof(msg.name) - 1] = 0;
+
+ msg.param_size = param->size;
+ memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size);
+ msg.param_paddr = ctx->ipc_buf->paddr;
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, rpmsg_send failed (%d) for DELTA_IPC_OPEN (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ ret, name, param->size, param->data);
+ goto err;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, timeout waiting for DELTA_IPC_OPEN callback (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ name, param->size, param->data);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* command completed, check error */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, DELTA_IPC_OPEN completed but with error (%d) (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, name, param->size, param->data);
+ ret = -EIO;
+ goto err;
+ }
+
+ *ipc_buf = ctx->ipc_buf;
+ *hdl = (void *)ctx;
+
+ return 0;
+
+err:
+ pctx->sys_errors++;
+ if (ctx->ipc_buf) {
+ hw_free(pctx, ctx->ipc_buf);
+ ctx->ipc_buf = NULL;
+ }
+
+ return ret;
+};
+
+int delta_ipc_set_stream(void *hdl, struct delta_ipc_param *param)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_set_stream_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, invalid ipc handle\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, rpmsg is not initialized\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size > ctx->ipc_buf->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, too large ipc parameter(%d bytes while max %d expected)\n",
+ pctx->name,
+ param->size, ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, param->data, param->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, parameter is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ param->size,
+ param->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_SET_STREAM, &msg.header);
+
+ msg.param_size = param->size;
+ msg.param_paddr = to_paddr(ctx, param->data);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, rpmsg_send failed (%d) for DELTA_IPC_SET_STREAM (size=%d, data=%p)\n",
+ pctx->name,
+ ret, param->size, param->data);
+ pctx->sys_errors++;
+ return ret;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, timeout waiting for DELTA_IPC_SET_STREAM callback (size=%d, data=%p)\n",
+ pctx->name,
+ param->size, param->data);
+ pctx->sys_errors++;
+ return -ETIMEDOUT;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, DELTA_IPC_SET_STREAM completed but with error (%d) (size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, param->size, param->data);
+ pctx->sys_errors++;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int delta_ipc_decode(void *hdl, struct delta_ipc_param *param,
+ struct delta_ipc_param *status)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_decode_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, invalid ipc handle\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, rpmsg is not initialized\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!status || !status->data || !status->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, empty status\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size + status->size > ctx->ipc_buf->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, too large ipc parameter (%d bytes (param) + %d bytes (status) while max %d expected)\n",
+ pctx->name,
+ param->size,
+ status->size,
+ ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, param->data, param->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, parameter is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ param->size,
+ param->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, status->data, status->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, status is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ status->size,
+ status->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_DECODE, &msg.header);
+
+ msg.param_size = param->size;
+ msg.param_paddr = to_paddr(ctx, param->data);
+
+ msg.status_size = status->size;
+ msg.status_paddr = to_paddr(ctx, status->data);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, rpmsg_send failed (%d) for DELTA_IPC_DECODE (size=%d, data=%p)\n",
+ pctx->name,
+ ret, param->size, param->data);
+ pctx->sys_errors++;
+ return ret;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, timeout waiting for DELTA_IPC_DECODE callback (size=%d, data=%p)\n",
+ pctx->name,
+ param->size, param->data);
+ pctx->sys_errors++;
+ return -ETIMEDOUT;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, DELTA_IPC_DECODE completed but with error (%d) (size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, param->size, param->data);
+ pctx->sys_errors++;
+ return -EIO;
+ }
+
+ return 0;
+};
+
+void delta_ipc_close(void *hdl)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_close_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, invalid ipc handle\n",
+ pctx->name);
+ return;
+ }
+
+ if (ctx->ipc_buf) {
+ hw_free(pctx, ctx->ipc_buf);
+ ctx->ipc_buf = NULL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, rpmsg is not initialized\n",
+ pctx->name);
+ return;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_CLOSE, &msg.header);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, rpmsg_send failed (%d) for DELTA_IPC_CLOSE\n",
+ pctx->name, ret);
+ pctx->sys_errors++;
+ return;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, timeout waiting for DELTA_IPC_CLOSE callback\n",
+ pctx->name);
+ pctx->sys_errors++;
+ return;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, DELTA_IPC_CLOSE completed but with error (%d)\n",
+ pctx->name, ctx->cb_err);
+ pctx->sys_errors++;
+ }
+};
+
+static int delta_ipc_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct delta_ipc_ctx *ctx;
+ struct delta_ipc_cb_msg *msg;
+
+ /* sanity check */
+ if (!rpdev) {
+ dev_err(NULL, "rpdev is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!data || !len) {
+ dev_err(&rpdev->dev,
+ "unexpected empty message received from src=%d\n", src);
+ return -EINVAL;
+ }
+
+ if (len != sizeof(*msg)) {
+ dev_err(&rpdev->dev,
+ "unexpected message length received from src=%d (received %d bytes while %zu bytes expected)\n",
+ len, src, sizeof(*msg));
+ return -EINVAL;
+ }
+
+ msg = (struct delta_ipc_cb_msg *)data;
+ if (msg->header.tag != IPC_SANITY_TAG) {
+ dev_err(&rpdev->dev,
+ "unexpected message tag received from src=%d (received %x tag while %x expected)\n",
+ src, msg->header.tag, IPC_SANITY_TAG);
+ return -EINVAL;
+ }
+
+ ctx = msg_to_ctx(msg);
+ if (!ctx) {
+ dev_err(&rpdev->dev,
+ "unexpected message with NULL host_hdl received from src=%d\n",
+ src);
+ return -EINVAL;
+ }
+
+ /*
+ * if not already known, save copro instance context
+ * to ensure re-entrance on copro side
+ */
+ if (!ctx->copro_hdl)
+ ctx->copro_hdl = msg_to_copro_hdl(msg);
+
+ /*
+ * all is fine,
+ * update status & complete command
+ */
+ ctx->cb_err = msg->err;
+ complete(&ctx->done);
+
+ return 0;
+}
+
+static int delta_ipc_probe(struct rpmsg_device *rpmsg_device)
+{
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpmsg_device->dev.driver);
+ struct delta_dev *delta = to_delta(rpdrv);
+
+ delta->rpmsg_device = rpmsg_device;
+
+ return 0;
+}
+
+static void delta_ipc_remove(struct rpmsg_device *rpmsg_device)
+{
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpmsg_device->dev.driver);
+ struct delta_dev *delta = to_delta(rpdrv);
+
+ delta->rpmsg_device = NULL;
+}
+
+static struct rpmsg_device_id delta_ipc_device_id_table[] = {
+ {.name = "rpmsg-delta"},
+ {},
+};
+
+static struct rpmsg_driver delta_rpmsg_driver = {
+ .drv = {.name = KBUILD_MODNAME},
+ .id_table = delta_ipc_device_id_table,
+ .probe = delta_ipc_probe,
+ .callback = delta_ipc_cb,
+ .remove = delta_ipc_remove,
+};
+
+int delta_ipc_init(struct delta_dev *delta)
+{
+ delta->rpmsg_driver = delta_rpmsg_driver;
+
+ return register_rpmsg_driver(&delta->rpmsg_driver);
+}
+
+void delta_ipc_exit(struct delta_dev *delta)
+{
+ unregister_rpmsg_driver(&delta->rpmsg_driver);
+}
diff --git a/drivers/media/platform/sti/delta/delta-ipc.h b/drivers/media/platform/sti/delta/delta-ipc.h
new file mode 100644
index 000000000000..cef2019c72d4
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-ipc.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef DELTA_IPC_H
+#define DELTA_IPC_H
+
+int delta_ipc_init(struct delta_dev *delta);
+void delta_ipc_exit(struct delta_dev *delta);
+
+/*
+ * delta_ipc_open - open a decoding instance on firmware side
+ * @ctx: (in) delta context
+ * @name: (in) name of decoder to be used
+ * @param: (in) open command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter
+ * @ipc_buf_size: (in) size of IPC shared buffer between host
+ * and copro used to share command data.
+ * Client have to set here the size of the biggest
+ * command parameters (+ status if any).
+ * Allocation will be done in this function which
+ * will give back to client in @ipc_buf the virtual
+ * & physical addresses & size of shared IPC buffer.
+ * All the further command data (parameters + status)
+ * have to be written in this shared IPC buffer
+ * virtual memory. This is done to avoid
+ * unnecessary copies of command data.
+ * @ipc_buf: (out) allocated IPC shared buffer
+ * @ipc_buf.size: (out) allocated size
+ * @ipc_buf.vaddr: (out) virtual address where to copy
+ * further command data
+ * @hdl: (out) handle of decoding instance.
+ */
+
+int delta_ipc_open(struct delta_ctx *ctx, const char *name,
+ struct delta_ipc_param *param, u32 ipc_buf_size,
+ struct delta_buf **ipc_buf, void **hdl);
+
+/*
+ * delta_ipc_set_stream - set information about stream to decoder
+ * @hdl: (in) handle of decoding instance.
+ * @param: (in) set stream command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter. Must be
+ * within IPC shared buffer range
+ */
+int delta_ipc_set_stream(void *hdl, struct delta_ipc_param *param);
+
+/*
+ * delta_ipc_decode - frame decoding synchronous request, returns only
+ * after decoding completion on firmware side.
+ * @hdl: (in) handle of decoding instance.
+ * @param: (in) decode command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter. Must be
+ * within IPC shared buffer range
+ * @status: (in/out) decode command status specific to decoder
+ * @status.size: (in) size of status
+ * @status.data: (in/out) virtual address of status. Must be
+ * within IPC shared buffer range.
+ * Status is filled by decoding instance
+ * after decoding completion.
+ */
+int delta_ipc_decode(void *hdl, struct delta_ipc_param *param,
+ struct delta_ipc_param *status);
+
+/*
+ * delta_ipc_close - close decoding instance
+ * @hdl: (in) handle of decoding instance to close.
+ */
+void delta_ipc_close(void *hdl);
+
+#endif /* DELTA_IPC_H */
diff --git a/drivers/media/platform/sti/delta/delta-mem.c b/drivers/media/platform/sti/delta/delta-mem.c
new file mode 100644
index 000000000000..d7b53d31caa6
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mem.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "delta.h"
+#include "delta-mem.h"
+
+int hw_alloc(struct delta_ctx *ctx, u32 size, const char *name,
+ struct delta_buf *buf)
+{
+ struct delta_dev *delta = ctx->dev;
+ dma_addr_t dma_addr;
+ void *addr;
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+
+ addr = dma_alloc_attrs(delta->dev, size, &dma_addr,
+ GFP_KERNEL | __GFP_NOWARN, attrs);
+ if (!addr) {
+ dev_err(delta->dev,
+ "%s hw_alloc:dma_alloc_coherent failed for %s (size=%d)\n",
+ ctx->name, name, size);
+ ctx->sys_errors++;
+ return -ENOMEM;
+ }
+
+ buf->size = size;
+ buf->paddr = dma_addr;
+ buf->vaddr = addr;
+ buf->name = name;
+ buf->attrs = attrs;
+
+ dev_dbg(delta->dev,
+ "%s allocate %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
+ ctx->name, size, buf->vaddr, &buf->paddr, buf->name);
+
+ return 0;
+}
+
+void hw_free(struct delta_ctx *ctx, struct delta_buf *buf)
+{
+ struct delta_dev *delta = ctx->dev;
+
+ dev_dbg(delta->dev,
+ "%s free %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
+ ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
+
+ dma_free_attrs(delta->dev, buf->size,
+ buf->vaddr, buf->paddr, buf->attrs);
+}
diff --git a/drivers/media/platform/sti/delta/delta-mem.h b/drivers/media/platform/sti/delta/delta-mem.h
new file mode 100644
index 000000000000..f8ca109e1241
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mem.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef DELTA_MEM_H
+#define DELTA_MEM_H
+
+int hw_alloc(struct delta_ctx *ctx, u32 size, const char *name,
+ struct delta_buf *buf);
+void hw_free(struct delta_ctx *ctx, struct delta_buf *buf);
+
+#endif /* DELTA_MEM_H */
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
new file mode 100644
index 000000000000..e79bdc611432
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/slab.h>
+
+#include "delta.h"
+#include "delta-ipc.h"
+#include "delta-mjpeg.h"
+#include "delta-mjpeg-fw.h"
+
+#define DELTA_MJPEG_MAX_RESO DELTA_MAX_RESO
+
+struct delta_mjpeg_ctx {
+ /* jpeg header */
+ struct mjpeg_header header_struct;
+ struct mjpeg_header *header;
+
+ /* ipc */
+ void *ipc_hdl;
+ struct delta_buf *ipc_buf;
+
+ /* decoded output frame */
+ struct delta_frame *out_frame;
+
+ unsigned char str[3000];
+};
+
+#define to_ctx(ctx) ((struct delta_mjpeg_ctx *)(ctx)->priv)
+
+static char *ipc_open_param_str(struct jpeg_video_decode_init_params_t *p,
+ char *str, unsigned int len)
+{
+ char *b = str;
+
+ if (!p)
+ return "";
+
+ b += snprintf(b, len,
+ "jpeg_video_decode_init_params_t\n"
+ "circular_buffer_begin_addr_p 0x%x\n"
+ "circular_buffer_end_addr_p 0x%x\n",
+ p->circular_buffer_begin_addr_p,
+ p->circular_buffer_end_addr_p);
+
+ return str;
+}
+
+static char *ipc_decode_param_str(struct jpeg_decode_params_t *p,
+ char *str, unsigned int len)
+{
+ char *b = str;
+
+ if (!p)
+ return "";
+
+ b += snprintf(b, len,
+ "jpeg_decode_params_t\n"
+ "picture_start_addr_p 0x%x\n"
+ "picture_end_addr_p 0x%x\n"
+ "decoding_mode %d\n"
+ "display_buffer_addr.display_decimated_luma_p 0x%x\n"
+ "display_buffer_addr.display_decimated_chroma_p 0x%x\n"
+ "main_aux_enable %d\n"
+ "additional_flags 0x%x\n"
+ "field_flag %x\n"
+ "is_jpeg_image %x\n",
+ p->picture_start_addr_p,
+ p->picture_end_addr_p,
+ p->decoding_mode,
+ p->display_buffer_addr.display_decimated_luma_p,
+ p->display_buffer_addr.display_decimated_chroma_p,
+ p->main_aux_enable, p->additional_flags,
+ p->field_flag,
+ p->is_jpeg_image);
+
+ return str;
+}
+
+static inline bool is_stream_error(enum jpeg_decoding_error_t err)
+{
+ switch (err) {
+ case JPEG_DECODER_UNDEFINED_HUFF_TABLE:
+ case JPEG_DECODER_BAD_RESTART_MARKER:
+ case JPEG_DECODER_BAD_SOS_SPECTRAL:
+ case JPEG_DECODER_BAD_SOS_SUCCESSIVE:
+ case JPEG_DECODER_BAD_HEADER_LENGTH:
+ case JPEG_DECODER_BAD_COUNT_VALUE:
+ case JPEG_DECODER_BAD_DHT_MARKER:
+ case JPEG_DECODER_BAD_INDEX_VALUE:
+ case JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES:
+ case JPEG_DECODER_BAD_QUANT_TABLE_LENGTH:
+ case JPEG_DECODER_BAD_NUMBER_QUANT_TABLES:
+ case JPEG_DECODER_BAD_COMPONENT_COUNT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline const char *err_str(enum jpeg_decoding_error_t err)
+{
+ switch (err) {
+ case JPEG_DECODER_NO_ERROR:
+ return "JPEG_DECODER_NO_ERROR";
+ case JPEG_DECODER_UNDEFINED_HUFF_TABLE:
+ return "JPEG_DECODER_UNDEFINED_HUFF_TABLE";
+ case JPEG_DECODER_UNSUPPORTED_MARKER:
+ return "JPEG_DECODER_UNSUPPORTED_MARKER";
+ case JPEG_DECODER_UNABLE_ALLOCATE_MEMORY:
+ return "JPEG_DECODER_UNABLE_ALLOCATE_MEMORY";
+ case JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS:
+ return "JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS";
+ case JPEG_DECODER_BAD_PARAMETER:
+ return "JPEG_DECODER_BAD_PARAMETER";
+ case JPEG_DECODER_DECODE_ERROR:
+ return "JPEG_DECODER_DECODE_ERROR";
+ case JPEG_DECODER_BAD_RESTART_MARKER:
+ return "JPEG_DECODER_BAD_RESTART_MARKER";
+ case JPEG_DECODER_UNSUPPORTED_COLORSPACE:
+ return "JPEG_DECODER_UNSUPPORTED_COLORSPACE";
+ case JPEG_DECODER_BAD_SOS_SPECTRAL:
+ return "JPEG_DECODER_BAD_SOS_SPECTRAL";
+ case JPEG_DECODER_BAD_SOS_SUCCESSIVE:
+ return "JPEG_DECODER_BAD_SOS_SUCCESSIVE";
+ case JPEG_DECODER_BAD_HEADER_LENGTH:
+ return "JPEG_DECODER_BAD_HEADER_LENGTH";
+ case JPEG_DECODER_BAD_COUNT_VALUE:
+ return "JPEG_DECODER_BAD_COUNT_VALUE";
+ case JPEG_DECODER_BAD_DHT_MARKER:
+ return "JPEG_DECODER_BAD_DHT_MARKER";
+ case JPEG_DECODER_BAD_INDEX_VALUE:
+ return "JPEG_DECODER_BAD_INDEX_VALUE";
+ case JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES:
+ return "JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES";
+ case JPEG_DECODER_BAD_QUANT_TABLE_LENGTH:
+ return "JPEG_DECODER_BAD_QUANT_TABLE_LENGTH";
+ case JPEG_DECODER_BAD_NUMBER_QUANT_TABLES:
+ return "JPEG_DECODER_BAD_NUMBER_QUANT_TABLES";
+ case JPEG_DECODER_BAD_COMPONENT_COUNT:
+ return "JPEG_DECODER_BAD_COMPONENT_COUNT";
+ case JPEG_DECODER_DIVIDE_BY_ZERO_ERROR:
+ return "JPEG_DECODER_DIVIDE_BY_ZERO_ERROR";
+ case JPEG_DECODER_NOT_JPG_IMAGE:
+ return "JPEG_DECODER_NOT_JPG_IMAGE";
+ case JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE:
+ return "JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE";
+ case JPEG_DECODER_UNSUPPORTED_SCALING:
+ return "JPEG_DECODER_UNSUPPORTED_SCALING";
+ case JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE:
+ return "JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE";
+ case JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE:
+ return "JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE";
+ case JPEG_DECODER_BAD_VALUE_FROM_RED:
+ return "JPEG_DECODER_BAD_VALUE_FROM_RED";
+ case JPEG_DECODER_BAD_SUBREGION_PARAMETERS:
+ return "JPEG_DECODER_BAD_SUBREGION_PARAMETERS";
+ case JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED:
+ return "JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED";
+ case JPEG_DECODER_ERROR_TASK_TIMEOUT:
+ return "JPEG_DECODER_ERROR_TASK_TIMEOUT";
+ case JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED:
+ return "JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED";
+ default:
+ return "!unknown MJPEG error!";
+ }
+}
+
+static bool delta_mjpeg_check_status(struct delta_ctx *pctx,
+ struct jpeg_decode_return_params_t *status)
+{
+ struct delta_dev *delta = pctx->dev;
+ bool dump = false;
+
+ if (status->error_code == JPEG_DECODER_NO_ERROR)
+ goto out;
+
+ if (is_stream_error(status->error_code)) {
+ dev_warn_ratelimited(delta->dev,
+ "%s firmware: stream error @ frame %d (%s)\n",
+ pctx->name, pctx->decoded_frames,
+ err_str(status->error_code));
+ pctx->stream_errors++;
+ } else {
+ dev_warn_ratelimited(delta->dev,
+ "%s firmware: decode error @ frame %d (%s)\n",
+ pctx->name, pctx->decoded_frames,
+ err_str(status->error_code));
+ pctx->decode_errors++;
+ dump = true;
+ }
+
+out:
+ dev_dbg(delta->dev,
+ "%s firmware: decoding time(us)=%d\n", pctx->name,
+ status->decode_time_in_us);
+
+ return dump;
+}
+
+static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret = 0;
+ struct jpeg_video_decode_init_params_t params_struct;
+ struct jpeg_video_decode_init_params_t *params = &params_struct;
+ struct delta_buf *ipc_buf;
+ u32 ipc_buf_size;
+ struct delta_ipc_param ipc_param;
+ void *hdl;
+
+ memset(params, 0, sizeof(*params));
+ params->circular_buffer_begin_addr_p = 0x00000000;
+ params->circular_buffer_end_addr_p = 0xffffffff;
+
+ dev_vdbg(delta->dev,
+ "%s %s\n", pctx->name,
+ ipc_open_param_str(params, ctx->str, sizeof(ctx->str)));
+
+ ipc_param.size = sizeof(*params);
+ ipc_param.data = params;
+ ipc_buf_size = sizeof(struct jpeg_decode_params_t) +
+ sizeof(struct jpeg_decode_return_params_t);
+ ret = delta_ipc_open(pctx, "JPEG_DECODER_HW0", &ipc_param,
+ ipc_buf_size, &ipc_buf, &hdl);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_open_param_str(params, ctx->str, sizeof(ctx->str)));
+ return ret;
+ }
+
+ ctx->ipc_buf = ipc_buf;
+ ctx->ipc_hdl = hdl;
+
+ return 0;
+}
+
+static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret = 0;
+ struct jpeg_decode_params_t *params = ctx->ipc_buf->vaddr;
+ struct jpeg_decode_return_params_t *status =
+ ctx->ipc_buf->vaddr + sizeof(*params);
+ struct delta_frame *frame;
+ struct delta_ipc_param ipc_param, ipc_status;
+
+ ret = delta_get_free_frame(pctx, &frame);
+ if (ret)
+ return ret;
+
+ memset(params, 0, sizeof(*params));
+
+ params->picture_start_addr_p = (u32)(au->paddr);
+ params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
+
+ /*
+ * !WARNING!
+ * the NV12 decoded frame is only available
+ * on decimated output when enabling flag
+ * "JPEG_ADDITIONAL_FLAG_420MB"...
+ * the non decimated output gives YUV422SP
+ */
+ params->main_aux_enable = JPEG_DISP_AUX_EN;
+ params->additional_flags = JPEG_ADDITIONAL_FLAG_420MB;
+ params->horizontal_decimation_factor = JPEG_HDEC_1;
+ params->vertical_decimation_factor = JPEG_VDEC_1;
+ params->decoding_mode = JPEG_NORMAL_DECODE;
+
+ params->display_buffer_addr.struct_size =
+ sizeof(struct jpeg_display_buffer_address_t);
+ params->display_buffer_addr.display_decimated_luma_p =
+ (u32)frame->paddr;
+ params->display_buffer_addr.display_decimated_chroma_p =
+ (u32)(frame->paddr
+ + frame->info.aligned_width * frame->info.aligned_height);
+
+ dev_vdbg(delta->dev,
+ "%s %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str, sizeof(ctx->str)));
+
+ /* status */
+ memset(status, 0, sizeof(*status));
+ status->error_code = JPEG_DECODER_NO_ERROR;
+
+ ipc_param.size = sizeof(*params);
+ ipc_param.data = params;
+ ipc_status.size = sizeof(*status);
+ ipc_status.data = status;
+ ret = delta_ipc_decode(ctx->ipc_hdl, &ipc_param, &ipc_status);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str,
+ sizeof(ctx->str)));
+ return ret;
+ }
+
+ pctx->decoded_frames++;
+
+ /* check firmware decoding status */
+ if (delta_mjpeg_check_status(pctx, status)) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str,
+ sizeof(ctx->str)));
+ }
+
+ frame->field = V4L2_FIELD_NONE;
+ frame->flags = V4L2_BUF_FLAG_KEYFRAME;
+ frame->state |= DELTA_FRAME_DEC;
+
+ ctx->out_frame = frame;
+
+ return 0;
+}
+
+static int delta_mjpeg_open(struct delta_ctx *pctx)
+{
+ struct delta_mjpeg_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ pctx->priv = ctx;
+
+ return 0;
+}
+
+static int delta_mjpeg_close(struct delta_ctx *pctx)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (ctx->ipc_hdl) {
+ delta_ipc_close(ctx->ipc_hdl);
+ ctx->ipc_hdl = NULL;
+ }
+
+ kfree(ctx);
+
+ return 0;
+}
+
+static int delta_mjpeg_get_streaminfo(struct delta_ctx *pctx,
+ struct delta_streaminfo *streaminfo)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (!ctx->header)
+ goto nodata;
+
+ streaminfo->streamformat = V4L2_PIX_FMT_MJPEG;
+ streaminfo->width = ctx->header->frame_width;
+ streaminfo->height = ctx->header->frame_height;
+
+ /* progressive stream */
+ streaminfo->field = V4L2_FIELD_NONE;
+
+ streaminfo->dpb = 1;
+
+ return 0;
+
+nodata:
+ return -ENODATA;
+}
+
+static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret;
+ struct delta_au au = *pau;
+ unsigned int data_offset;
+ struct mjpeg_header *header = &ctx->header_struct;
+
+ if (!ctx->header) {
+ ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ header, &data_offset);
+ if (ret) {
+ pctx->stream_errors++;
+ goto err;
+ }
+ if (header->frame_width * header->frame_height >
+ DELTA_MJPEG_MAX_RESO) {
+ dev_err(delta->dev,
+ "%s stream resolution too large: %dx%d > %d pixels budget\n",
+ pctx->name,
+ header->frame_width,
+ header->frame_height, DELTA_MJPEG_MAX_RESO);
+ ret = -EINVAL;
+ goto err;
+ }
+ ctx->header = header;
+ goto out;
+ }
+
+ if (!ctx->ipc_hdl) {
+ ret = delta_mjpeg_ipc_open(pctx);
+ if (ret)
+ goto err;
+ }
+
+ ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ctx->header, &data_offset);
+ if (ret) {
+ pctx->stream_errors++;
+ goto err;
+ }
+
+ au.paddr += data_offset;
+ au.vaddr += data_offset;
+
+ ret = delta_mjpeg_ipc_decode(pctx, &au);
+ if (ret)
+ goto err;
+
+out:
+ return 0;
+
+err:
+ return ret;
+}
+
+static int delta_mjpeg_get_frame(struct delta_ctx *pctx,
+ struct delta_frame **frame)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (!ctx->out_frame)
+ return -ENODATA;
+
+ *frame = ctx->out_frame;
+
+ ctx->out_frame = NULL;
+
+ return 0;
+}
+
+const struct delta_dec mjpegdec = {
+ .name = "MJPEG",
+ .streamformat = V4L2_PIX_FMT_MJPEG,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .open = delta_mjpeg_open,
+ .close = delta_mjpeg_close,
+ .get_streaminfo = delta_mjpeg_get_streaminfo,
+ .get_frameinfo = delta_get_frameinfo_default,
+ .decode = delta_mjpeg_decode,
+ .get_frame = delta_mjpeg_get_frame,
+ .recycle = delta_recycle_default,
+};
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-fw.h b/drivers/media/platform/sti/delta/delta-mjpeg-fw.h
new file mode 100644
index 000000000000..de803d0c2fe8
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg-fw.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef DELTA_MJPEG_FW_H
+#define DELTA_MJPEG_FW_H
+
+/*
+ * struct jpeg_decoded_buffer_address_t
+ *
+ * defines the addresses where the decoded picture/additional
+ * info related to the block structures will be stored
+ *
+ * @display_luma_p: address of the luma buffer
+ * @display_chroma_p: address of the chroma buffer
+ */
+struct jpeg_decoded_buffer_address_t {
+ u32 luma_p;
+ u32 chroma_p;
+};
+
+/*
+ * struct jpeg_display_buffer_address_t
+ *
+ * defines the addresses (used by the Display Reconstruction block)
+ * where the pictures to be displayed will be stored
+ *
+ * @struct_size: size of the structure in bytes
+ * @display_luma_p: address of the luma buffer
+ * @display_chroma_p: address of the chroma buffer
+ * @display_decimated_luma_p: address of the decimated luma buffer
+ * @display_decimated_chroma_p: address of the decimated chroma buffer
+ */
+struct jpeg_display_buffer_address_t {
+ u32 struct_size;
+ u32 display_luma_p;
+ u32 display_chroma_p;
+ u32 display_decimated_luma_p;
+ u32 display_decimated_chroma_p;
+};
+
+/*
+ * used for enabling main/aux outputs for both display &
+ * reference reconstruction blocks
+ */
+enum jpeg_rcn_ref_disp_enable_t {
+ /* enable decimated (for display) reconstruction */
+ JPEG_DISP_AUX_EN = 0x00000010,
+ /* enable main (for display) reconstruction */
+ JPEG_DISP_MAIN_EN = 0x00000020,
+ /* enable both main & decimated (for display) reconstruction */
+ JPEG_DISP_AUX_MAIN_EN = 0x00000030,
+ /* enable only reference output(ex. for trick modes) */
+ JPEG_REF_MAIN_EN = 0x00000100,
+ /*
+ * enable reference output with decimated
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_AUX_EN = 0x00000110,
+ /*
+ * enable reference output with main
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_MAIN_EN = 0x00000120,
+ /*
+ * enable reference output with main & decimated
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_MAIN_AUX_EN = 0x00000130
+};
+
+/* identifies the horizontal decimation factor */
+enum jpeg_horizontal_deci_factor_t {
+ /* no resize */
+ JPEG_HDEC_1 = 0x00000000,
+ /* Advanced H/2 resize using improved 8-tap filters */
+ JPEG_HDEC_ADVANCED_2 = 0x00000101,
+ /* Advanced H/4 resize using improved 8-tap filters */
+ JPEG_HDEC_ADVANCED_4 = 0x00000102
+};
+
+/* identifies the vertical decimation factor */
+enum jpeg_vertical_deci_factor_t {
+ /* no resize */
+ JPEG_VDEC_1 = 0x00000000,
+ /* V/2 , progressive resize */
+ JPEG_VDEC_ADVANCED_2_PROG = 0x00000204,
+ /* V/2 , interlaced resize */
+ JPEG_VDEC_ADVANCED_2_INT = 0x000000208
+};
+
+/* status of the decoding process */
+enum jpeg_decoding_error_t {
+ JPEG_DECODER_NO_ERROR = 0,
+ JPEG_DECODER_UNDEFINED_HUFF_TABLE = 1,
+ JPEG_DECODER_UNSUPPORTED_MARKER = 2,
+ JPEG_DECODER_UNABLE_ALLOCATE_MEMORY = 3,
+ JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS = 4,
+ JPEG_DECODER_BAD_PARAMETER = 5,
+ JPEG_DECODER_DECODE_ERROR = 6,
+ JPEG_DECODER_BAD_RESTART_MARKER = 7,
+ JPEG_DECODER_UNSUPPORTED_COLORSPACE = 8,
+ JPEG_DECODER_BAD_SOS_SPECTRAL = 9,
+ JPEG_DECODER_BAD_SOS_SUCCESSIVE = 10,
+ JPEG_DECODER_BAD_HEADER_LENGTH = 11,
+ JPEG_DECODER_BAD_COUNT_VALUE = 12,
+ JPEG_DECODER_BAD_DHT_MARKER = 13,
+ JPEG_DECODER_BAD_INDEX_VALUE = 14,
+ JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES = 15,
+ JPEG_DECODER_BAD_QUANT_TABLE_LENGTH = 16,
+ JPEG_DECODER_BAD_NUMBER_QUANT_TABLES = 17,
+ JPEG_DECODER_BAD_COMPONENT_COUNT = 18,
+ JPEG_DECODER_DIVIDE_BY_ZERO_ERROR = 19,
+ JPEG_DECODER_NOT_JPG_IMAGE = 20,
+ JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE = 21,
+ JPEG_DECODER_UNSUPPORTED_SCALING = 22,
+ JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE = 23,
+ JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE = 24,
+ JPEG_DECODER_BAD_VALUE_FROM_RED = 25,
+ JPEG_DECODER_BAD_SUBREGION_PARAMETERS = 26,
+ JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED = 27,
+ JPEG_DECODER_ERROR_TASK_TIMEOUT = 28,
+ JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED = 29
+};
+
+/* identifies the decoding mode */
+enum jpeg_decoding_mode_t {
+ JPEG_NORMAL_DECODE = 0,
+};
+
+enum jpeg_additional_flags_t {
+ JPEG_ADDITIONAL_FLAG_NONE = 0,
+ /* request firmware to return values of the CEH registers */
+ JPEG_ADDITIONAL_FLAG_CEH = 1,
+ /* output storage of auxiliary reconstruction in Raster format. */
+ JPEG_ADDITIONAL_FLAG_RASTER = 64,
+ /* output storage of auxiliary reconstruction in 420MB format. */
+ JPEG_ADDITIONAL_FLAG_420MB = 128
+};
+
+/*
+ * struct jpeg_video_decode_init_params_t - initialization command parameters
+ *
+ * @circular_buffer_begin_addr_p: start address of fw circular buffer
+ * @circular_buffer_end_addr_p: end address of fw circular buffer
+ */
+struct jpeg_video_decode_init_params_t {
+ u32 circular_buffer_begin_addr_p;
+ u32 circular_buffer_end_addr_p;
+ u32 reserved;
+};
+
+/*
+ * struct jpeg_decode_params_t - decode command parameters
+ *
+ * @picture_start_addr_p: start address of jpeg picture
+ * @picture_end_addr_p: end address of jpeg picture
+ * @decoded_buffer_addr: decoded picture buffer
+ * @display_buffer_addr: display picture buffer
+ * @main_aux_enable: enable main and/or aux outputs
+ * @horizontal_decimation_factor:horizontal decimation factor
+ * @vertical_decimation_factor: vertical decimation factor
+ * @xvalue0: the x(0) coordinate for subregion decoding
+ * @xvalue1: the x(1) coordinate for subregion decoding
+ * @yvalue0: the y(0) coordinate for subregion decoding
+ * @yvalue1: the y(1) coordinate for subregion decoding
+ * @decoding_mode: decoding mode
+ * @additional_flags: additional flags
+ * @field_flag: determines frame/field scan
+ * @is_jpeg_image: 1 = still jpeg, 0 = motion jpeg
+ */
+struct jpeg_decode_params_t {
+ u32 picture_start_addr_p;
+ u32 picture_end_addr_p;
+ struct jpeg_decoded_buffer_address_t decoded_buffer_addr;
+ struct jpeg_display_buffer_address_t display_buffer_addr;
+ enum jpeg_rcn_ref_disp_enable_t main_aux_enable;
+ enum jpeg_horizontal_deci_factor_t horizontal_decimation_factor;
+ enum jpeg_vertical_deci_factor_t vertical_decimation_factor;
+ u32 xvalue0;
+ u32 xvalue1;
+ u32 yvalue0;
+ u32 yvalue1;
+ enum jpeg_decoding_mode_t decoding_mode;
+ u32 additional_flags;
+ u32 field_flag;
+ u32 reserved;
+ u32 is_jpeg_image;
+};
+
+/*
+ * struct jpeg_decode_return_params_t
+ *
+ * status returned by firmware after decoding
+ *
+ * @decode_time_in_us: decoding time in microseconds
+ * @pm_cycles: profiling information
+ * @pm_dmiss: profiling information
+ * @pm_imiss: profiling information
+ * @pm_bundles: profiling information
+ * @pm_pft: profiling information
+ * @error_code: status of the decoding process
+ * @ceh_registers: array where values of the Contrast Enhancement
+ * Histogram (CEH) registers will be stored.
+ * ceh_registers[0] correspond to register MBE_CEH_0_7,
+ * ceh_registers[1] correspond to register MBE_CEH_8_15
+ * ceh_registers[2] correspond to register MBE_CEH_16_23
+ * Note that elements of this array will be updated only
+ * if additional_flags has JPEG_ADDITIONAL_FLAG_CEH set.
+ */
+struct jpeg_decode_return_params_t {
+ /* profiling info */
+ u32 decode_time_in_us;
+ u32 pm_cycles;
+ u32 pm_dmiss;
+ u32 pm_imiss;
+ u32 pm_bundles;
+ u32 pm_pft;
+ enum jpeg_decoding_error_t error_code;
+ u32 ceh_registers[32];
+};
+
+#endif /* DELTA_MJPEG_FW_H */
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c b/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c
new file mode 100644
index 000000000000..a8fd8fa0ecb5
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg-hdr.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "delta.h"
+#include "delta-mjpeg.h"
+
+#define MJPEG_SOF_0 0xc0
+#define MJPEG_SOF_1 0xc1
+#define MJPEG_SOI 0xd8
+#define MJPEG_MARKER 0xff
+
+static char *header_str(struct mjpeg_header *header,
+ char *str,
+ unsigned int len)
+{
+ char *cur = str;
+ unsigned int left = len;
+
+ if (!header)
+ return "";
+
+ snprintf(cur, left, "[MJPEG header]\n"
+ "|- length = %d\n"
+ "|- precision = %d\n"
+ "|- width = %d\n"
+ "|- height = %d\n"
+ "|- components = %d\n",
+ header->length,
+ header->sample_precision,
+ header->frame_width,
+ header->frame_height,
+ header->nb_of_components);
+
+ return str;
+}
+
+static int delta_mjpeg_read_sof(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header)
+{
+ struct delta_dev *delta = pctx->dev;
+ unsigned int offset = 0;
+
+ if (size < 64)
+ goto err_no_more;
+
+ memset(header, 0, sizeof(*header));
+ header->length = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->sample_precision = *(u8 *)(data + offset);
+ offset += sizeof(u8);
+ header->frame_height = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->frame_width = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->nb_of_components = *(u8 *)(data + offset);
+ offset += sizeof(u8);
+
+ if (header->nb_of_components >= MJPEG_MAX_COMPONENTS) {
+ dev_err(delta->dev,
+ "%s unsupported number of components (%d > %d)\n",
+ pctx->name, header->nb_of_components,
+ MJPEG_MAX_COMPONENTS);
+ return -EINVAL;
+ }
+
+ if ((offset + header->nb_of_components *
+ sizeof(header->components[0])) > size)
+ goto err_no_more;
+
+ return 0;
+
+err_no_more:
+ dev_err(delta->dev,
+ "%s sof: reached end of %d size input stream\n",
+ pctx->name, size);
+ return -ENODATA;
+}
+
+int delta_mjpeg_read_header(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header,
+ unsigned int *data_offset)
+{
+ struct delta_dev *delta = pctx->dev;
+ unsigned char str[200];
+
+ unsigned int ret = 0;
+ unsigned int offset = 0;
+ unsigned int soi = 0;
+
+ if (size < 2)
+ goto err_no_more;
+
+ offset = 0;
+ while (1) {
+ if (data[offset] == MJPEG_MARKER)
+ switch (data[offset + 1]) {
+ case MJPEG_SOI:
+ soi = 1;
+ *data_offset = offset;
+ break;
+
+ case MJPEG_SOF_0:
+ case MJPEG_SOF_1:
+ if (!soi) {
+ dev_err(delta->dev,
+ "%s wrong sequence, got SOF while SOI not seen\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ ret = delta_mjpeg_read_sof(pctx,
+ &data[offset + 2],
+ size - (offset + 2),
+ header);
+ if (ret)
+ goto err;
+
+ goto done;
+
+ default:
+ break;
+ }
+
+ offset++;
+ if ((offset + 2) >= size)
+ goto err_no_more;
+ }
+
+done:
+ dev_dbg(delta->dev,
+ "%s found header @ offset %d:\n%s", pctx->name,
+ *data_offset,
+ header_str(header, str, sizeof(str)));
+ return 0;
+
+err_no_more:
+ dev_err(delta->dev,
+ "%s no header found within %d bytes input stream\n",
+ pctx->name, size);
+ return -ENODATA;
+
+err:
+ return ret;
+}
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg.h b/drivers/media/platform/sti/delta/delta-mjpeg.h
new file mode 100644
index 000000000000..18e6b37217ee
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-mjpeg.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef DELTA_MJPEG_H
+#define DELTA_MJPEG_H
+
+#include "delta.h"
+
+struct mjpeg_component {
+ unsigned int id;/* 1=Y, 2=Cb, 3=Cr, 4=L, 5=Q */
+ unsigned int h_sampling_factor;
+ unsigned int v_sampling_factor;
+ unsigned int quant_table_index;
+};
+
+#define MJPEG_MAX_COMPONENTS 5
+
+struct mjpeg_header {
+ unsigned int length;
+ unsigned int sample_precision;
+ unsigned int frame_width;
+ unsigned int frame_height;
+ unsigned int nb_of_components;
+ struct mjpeg_component components[MJPEG_MAX_COMPONENTS];
+};
+
+int delta_mjpeg_read_header(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header,
+ unsigned int *data_offset);
+
+#endif /* DELTA_MJPEG_H */
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
new file mode 100644
index 000000000000..c6f2e244b7a8
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -0,0 +1,1993 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Jean-Christophe Trotin <jean-christophe.trotin@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "delta.h"
+#include "delta-debug.h"
+#include "delta-ipc.h"
+
+#define DELTA_NAME "st-delta"
+
+#define DELTA_PREFIX "[---:----]"
+
+#define to_ctx(__fh) container_of(__fh, struct delta_ctx, fh)
+#define to_au(__vbuf) container_of(__vbuf, struct delta_au, vbuf)
+#define to_frame(__vbuf) container_of(__vbuf, struct delta_frame, vbuf)
+
+#define call_dec_op(dec, op, args...)\
+ ((dec && (dec)->op) ? (dec)->op(args) : 0)
+
+/* registry of available decoders */
+static const struct delta_dec *delta_decoders[] = {
+#ifdef CONFIG_VIDEO_STI_DELTA_MJPEG
+ &mjpegdec,
+#endif
+};
+
+static inline int frame_size(u32 w, u32 h, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return (w * h * 3) / 2;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_stride(u32 w, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return w;
+ default:
+ return 0;
+ }
+}
+
+static void dump_au(struct delta_ctx *ctx, struct delta_au *au)
+{
+ struct delta_dev *delta = ctx->dev;
+ u32 size = 10; /* dump first & last 10 bytes */
+ u8 *data = (u8 *)(au->vaddr);
+
+ if (au->size <= (size * 2))
+ dev_dbg(delta->dev, "%s dump au[%d] dts=%lld size=%d data=%*ph\n",
+ ctx->name, au->vbuf.vb2_buf.index, au->dts, au->size,
+ au->size, data);
+ else
+ dev_dbg(delta->dev, "%s dump au[%d] dts=%lld size=%d data=%*ph..%*ph\n",
+ ctx->name, au->vbuf.vb2_buf.index, au->dts, au->size,
+ size, data, size, data + au->size - size);
+}
+
+static void dump_frame(struct delta_ctx *ctx, struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ u32 size = 10; /* dump first 10 bytes */
+ u8 *data = (u8 *)(frame->vaddr);
+
+ dev_dbg(delta->dev, "%s dump frame[%d] dts=%lld type=%s field=%s data=%*ph\n",
+ ctx->name, frame->index, frame->dts,
+ frame_type_str(frame->flags),
+ frame_field_str(frame->field),
+ size, data);
+}
+
+static void delta_au_done(struct delta_ctx *ctx, struct delta_au *au, int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ vbuf = &au->vbuf;
+ vbuf->sequence = ctx->au_num++;
+ v4l2_m2m_buf_done(vbuf, err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+}
+
+static void delta_frame_done(struct delta_ctx *ctx, struct delta_frame *frame,
+ int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ dump_frame(ctx, frame);
+
+ /* decoded frame is now output to user */
+ frame->state |= DELTA_FRAME_OUT;
+
+ vbuf = &frame->vbuf;
+ vbuf->sequence = ctx->frame_num++;
+ v4l2_m2m_buf_done(vbuf, err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ if (frame->info.size) /* ignore EOS */
+ ctx->output_frames++;
+}
+
+static void requeue_free_frames(struct delta_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+ unsigned int i;
+
+ /* requeue all free frames */
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ if (frame->state == DELTA_FRAME_FREE) {
+ vbuf = &frame->vbuf;
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ frame->state = DELTA_FRAME_M2M;
+ }
+ }
+}
+
+static int delta_recycle(struct delta_ctx *ctx, struct delta_frame *frame)
+{
+ const struct delta_dec *dec = ctx->dec;
+
+ /* recycle frame on decoder side */
+ call_dec_op(dec, recycle, ctx, frame);
+
+ /* this frame is no more output */
+ frame->state &= ~DELTA_FRAME_OUT;
+
+ /* requeue free frame */
+ if (frame->state == DELTA_FRAME_FREE) {
+ struct vb2_v4l2_buffer *vbuf = &frame->vbuf;
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ frame->state = DELTA_FRAME_M2M;
+ }
+
+ /* reset other frame fields */
+ frame->flags = 0;
+ frame->dts = 0;
+
+ return 0;
+}
+
+static void delta_push_dts(struct delta_ctx *ctx, u64 val)
+{
+ struct delta_dts *dts;
+
+ dts = kzalloc(sizeof(*dts), GFP_KERNEL);
+ if (!dts)
+ return;
+
+ INIT_LIST_HEAD(&dts->list);
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+ dts->val = val;
+ list_add_tail(&dts->list, &ctx->dts);
+}
+
+static void delta_pop_dts(struct delta_ctx *ctx, u64 *val)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct delta_dts *dts;
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+ if (list_empty(&ctx->dts)) {
+ dev_warn(delta->dev, "%s no dts to pop ... output dts = 0\n",
+ ctx->name);
+ *val = 0;
+ return;
+ }
+
+ dts = list_first_entry(&ctx->dts, struct delta_dts, list);
+ list_del(&dts->list);
+
+ *val = dts->val;
+
+ kfree(dts);
+}
+
+static void delta_flush_dts(struct delta_ctx *ctx)
+{
+ struct delta_dts *dts;
+ struct delta_dts *next;
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+
+ /* free all pending dts */
+ list_for_each_entry_safe(dts, next, &ctx->dts, list)
+ kfree(dts);
+
+ /* reset list */
+ INIT_LIST_HEAD(&ctx->dts);
+}
+
+static inline int frame_alignment(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* multiple of 2 */
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static inline int estimated_au_size(u32 w, u32 h)
+{
+ /*
+ * for a MJPEG stream encoded from YUV422 pixel format,
+ * assuming a compression ratio of 2, the maximum size
+ * of an access unit is (width x height x 2) / 2,
+ * so (width x height)
+ */
+ return (w * h);
+}
+
+static void set_default_params(struct delta_ctx *ctx)
+{
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+
+ memset(frameinfo, 0, sizeof(*frameinfo));
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = DELTA_DEFAULT_WIDTH;
+ frameinfo->height = DELTA_DEFAULT_HEIGHT;
+ frameinfo->aligned_width = ALIGN(frameinfo->width,
+ DELTA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(frameinfo->height,
+ DELTA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+ frameinfo->field = V4L2_FIELD_NONE;
+ frameinfo->colorspace = V4L2_COLORSPACE_REC709;
+ frameinfo->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ frameinfo->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ frameinfo->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ memset(streaminfo, 0, sizeof(*streaminfo));
+ streaminfo->streamformat = DELTA_DEFAULT_STREAMFORMAT;
+ streaminfo->width = DELTA_DEFAULT_WIDTH;
+ streaminfo->height = DELTA_DEFAULT_HEIGHT;
+ streaminfo->field = V4L2_FIELD_NONE;
+ streaminfo->colorspace = V4L2_COLORSPACE_REC709;
+ streaminfo->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ streaminfo->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ streaminfo->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ ctx->max_au_size = estimated_au_size(streaminfo->width,
+ streaminfo->height);
+}
+
+static const struct delta_dec *delta_find_decoder(struct delta_ctx *ctx,
+ u32 streamformat,
+ u32 pixelformat)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec;
+ unsigned int i;
+
+ for (i = 0; i < delta->nb_of_decoders; i++) {
+ dec = delta->decoders[i];
+ if ((dec->pixelformat == pixelformat) &&
+ (dec->streamformat == streamformat))
+ return dec;
+ }
+
+ return NULL;
+}
+
+static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
+{
+ u32 i;
+
+ for (i = 0; i < *nb_of_formats; i++) {
+ if (format == formats[i])
+ return;
+ }
+
+ formats[(*nb_of_formats)++] = format;
+}
+
+static void register_formats(struct delta_dev *delta)
+{
+ unsigned int i;
+
+ for (i = 0; i < delta->nb_of_decoders; i++) {
+ register_format(delta->decoders[i]->pixelformat,
+ delta->pixelformats,
+ &delta->nb_of_pixelformats);
+
+ register_format(delta->decoders[i]->streamformat,
+ delta->streamformats,
+ &delta->nb_of_streamformats);
+ }
+}
+
+static void register_decoders(struct delta_dev *delta)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(delta_decoders); i++) {
+ if (delta->nb_of_decoders >= DELTA_MAX_DECODERS) {
+ dev_dbg(delta->dev,
+ "%s failed to register %s decoder (%d maximum reached)\n",
+ DELTA_PREFIX, delta_decoders[i]->name,
+ DELTA_MAX_DECODERS);
+ return;
+ }
+
+ delta->decoders[delta->nb_of_decoders++] = delta_decoders[i];
+ dev_info(delta->dev, "%s %s decoder registered\n",
+ DELTA_PREFIX, delta_decoders[i]->name);
+ }
+}
+
+static void delta_lock(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ mutex_lock(&delta->lock);
+}
+
+static void delta_unlock(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ mutex_unlock(&delta->lock);
+}
+
+static int delta_open_decoder(struct delta_ctx *ctx, u32 streamformat,
+ u32 pixelformat, const struct delta_dec **pdec)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec;
+ int ret;
+
+ dec = delta_find_decoder(ctx, streamformat, ctx->frameinfo.pixelformat);
+ if (!dec) {
+ dev_err(delta->dev, "%s no decoder found matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&streamformat, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(delta->dev, "%s one decoder matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&streamformat, (char *)&pixelformat);
+
+ /* update instance name */
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
+ delta->instance_id, (char *)&streamformat);
+
+ /* open decoder instance */
+ ret = call_dec_op(dec, open, ctx);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to open decoder instance (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+
+ dev_dbg(delta->dev, "%s %s decoder opened\n", ctx->name, dec->name);
+
+ *pdec = dec;
+
+ return ret;
+}
+
+/*
+ * V4L2 ioctl operations
+ */
+
+static int delta_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ strlcpy(cap->driver, DELTA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, delta->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ delta->pdev->name);
+
+ return 0;
+}
+
+static int delta_enum_fmt_stream(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ if (unlikely(f->index >= delta->nb_of_streamformats))
+ return -EINVAL;
+
+ f->pixelformat = delta->streamformats[f->index];
+
+ return 0;
+}
+
+static int delta_enum_fmt_frame(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ if (unlikely(f->index >= delta->nb_of_pixelformats))
+ return -EINVAL;
+
+ f->pixelformat = delta->pixelformats[f->index];
+
+ return 0;
+}
+
+static int delta_g_fmt_stream(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_STREAMINFO))
+ dev_dbg(delta->dev,
+ "%s V4L2 GET_FMT (OUTPUT): no stream information available, default to %s\n",
+ ctx->name,
+ delta_streaminfo_str(streaminfo, str, sizeof(str)));
+
+ pix->pixelformat = streaminfo->streamformat;
+ pix->width = streaminfo->width;
+ pix->height = streaminfo->height;
+ pix->field = streaminfo->field;
+ pix->bytesperline = 0;
+ pix->sizeimage = ctx->max_au_size;
+ pix->colorspace = streaminfo->colorspace;
+ pix->xfer_func = streaminfo->xfer_func;
+ pix->ycbcr_enc = streaminfo->ycbcr_enc;
+ pix->quantization = streaminfo->quantization;
+
+ return 0;
+}
+
+static int delta_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_FRAMEINFO))
+ dev_dbg(delta->dev,
+ "%s V4L2 GET_FMT (CAPTURE): no frame information available, default to %s\n",
+ ctx->name,
+ delta_frameinfo_str(frameinfo, str, sizeof(str)));
+
+ pix->pixelformat = frameinfo->pixelformat;
+ pix->width = frameinfo->aligned_width;
+ pix->height = frameinfo->aligned_height;
+ pix->field = frameinfo->field;
+ pix->bytesperline = frame_stride(frameinfo->aligned_width,
+ frameinfo->pixelformat);
+ pix->sizeimage = frameinfo->size;
+
+ if (ctx->flags & DELTA_FLAG_STREAMINFO) {
+ /* align colorspace & friends on stream ones if any set */
+ frameinfo->colorspace = streaminfo->colorspace;
+ frameinfo->xfer_func = streaminfo->xfer_func;
+ frameinfo->ycbcr_enc = streaminfo->ycbcr_enc;
+ frameinfo->quantization = streaminfo->quantization;
+ }
+ pix->colorspace = frameinfo->colorspace;
+ pix->xfer_func = frameinfo->xfer_func;
+ pix->ycbcr_enc = frameinfo->ycbcr_enc;
+ pix->quantization = frameinfo->quantization;
+
+ return 0;
+}
+
+static int delta_try_fmt_stream(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 streamformat = pix->pixelformat;
+ const struct delta_dec *dec;
+ u32 width, height;
+ u32 au_size;
+
+ dec = delta_find_decoder(ctx, streamformat, ctx->frameinfo.pixelformat);
+ if (!dec) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image
+ (&pix->width,
+ DELTA_MIN_WIDTH,
+ dec->max_width ? dec->max_width : DELTA_MAX_WIDTH,
+ 0,
+ &pix->height,
+ DELTA_MIN_HEIGHT,
+ dec->max_height ? dec->max_height : DELTA_MAX_HEIGHT,
+ 0, 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+
+ au_size = estimated_au_size(pix->width, pix->height);
+ if (pix->sizeimage < au_size) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): size updated %d -> %d to fit estimated size\n",
+ ctx->name, pix->sizeimage, au_size);
+ pix->sizeimage = au_size;
+ }
+
+ pix->bytesperline = 0;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_try_fmt_frame(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 pixelformat = pix->pixelformat;
+ const struct delta_dec *dec;
+ u32 width, height;
+
+ dec = delta_find_decoder(ctx, ctx->streaminfo.streamformat,
+ pixelformat);
+ if (!dec) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): unsupported format %4.4s\n",
+ ctx->name, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image(&pix->width,
+ DELTA_MIN_WIDTH, DELTA_MAX_WIDTH,
+ frame_alignment(pixelformat) - 1,
+ &pix->height,
+ DELTA_MIN_HEIGHT, DELTA_MAX_HEIGHT,
+ frame_alignment(pixelformat) - 1, 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ /* default decoder alignment constraint */
+ width = ALIGN(pix->width, DELTA_WIDTH_ALIGNMENT);
+ height = ALIGN(pix->height, DELTA_HEIGHT_ALIGNMENT);
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit decoder alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ if (!pix->colorspace) {
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ pix->width = width;
+ pix->height = height;
+ pix->bytesperline = frame_stride(pix->width, pixelformat);
+ pix->sizeimage = frame_size(pix->width, pix->height, pixelformat);
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_s_fmt_stream(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ ret = delta_try_fmt_stream(file, fh, f);
+ if (ret) {
+ dev_dbg(delta->dev,
+ "%s V4L2 S_FMT (OUTPUT): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(delta->dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->max_au_size = pix->sizeimage;
+ ctx->streaminfo.width = pix->width;
+ ctx->streaminfo.height = pix->height;
+ ctx->streaminfo.streamformat = pix->pixelformat;
+ ctx->streaminfo.colorspace = pix->colorspace;
+ ctx->streaminfo.xfer_func = pix->xfer_func;
+ ctx->streaminfo.ycbcr_enc = pix->ycbcr_enc;
+ ctx->streaminfo.quantization = pix->quantization;
+ ctx->flags |= DELTA_FLAG_STREAMINFO;
+
+ return 0;
+}
+
+static int delta_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_frameinfo frameinfo;
+ unsigned char str[100] = "";
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(delta->dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ if (ctx->state < DELTA_STATE_READY) {
+ /*
+ * decoder not yet opened and valid stream header not found,
+ * could not negotiate format with decoder, check at least
+ * pixel format & negotiate resolution boundaries
+ * and alignment...
+ */
+ ret = delta_try_fmt_frame(file, fh, f);
+ if (ret) {
+ dev_dbg(delta->dev,
+ "%s V4L2 S_FMT (CAPTURE): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* set frame information to decoder */
+ memset(&frameinfo, 0, sizeof(frameinfo));
+ frameinfo.pixelformat = pix->pixelformat;
+ frameinfo.width = pix->width;
+ frameinfo.height = pix->height;
+ frameinfo.aligned_width = pix->width;
+ frameinfo.aligned_height = pix->height;
+ frameinfo.size = pix->sizeimage;
+ frameinfo.field = pix->field;
+ frameinfo.colorspace = pix->colorspace;
+ frameinfo.xfer_func = pix->xfer_func;
+ frameinfo.ycbcr_enc = pix->ycbcr_enc;
+ frameinfo.quantization = pix->quantization;
+ ret = call_dec_op(dec, set_frameinfo, ctx, &frameinfo);
+ if (ret)
+ return ret;
+
+ /* then get what decoder can really do */
+ ret = call_dec_op(dec, get_frameinfo, ctx, &frameinfo);
+ if (ret)
+ return ret;
+
+ ctx->flags |= DELTA_FLAG_FRAMEINFO;
+ ctx->frameinfo = frameinfo;
+ dev_dbg(delta->dev,
+ "%s V4L2 SET_FMT (CAPTURE): frameinfo updated to %s\n",
+ ctx->name,
+ delta_frameinfo_str(&frameinfo, str, sizeof(str)));
+
+ pix->pixelformat = frameinfo.pixelformat;
+ pix->width = frameinfo.aligned_width;
+ pix->height = frameinfo.aligned_height;
+ pix->bytesperline = frame_stride(pix->width, pix->pixelformat);
+ pix->sizeimage = frameinfo.size;
+ pix->field = frameinfo.field;
+ pix->colorspace = frameinfo.colorspace;
+ pix->xfer_func = frameinfo.xfer_func;
+ pix->ycbcr_enc = frameinfo.ycbcr_enc;
+ pix->quantization = frameinfo.quantization;
+
+ return 0;
+}
+
+static int delta_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct delta_ctx *ctx = to_ctx(fh);
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct v4l2_rect crop;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if ((ctx->flags & DELTA_FLAG_FRAMEINFO) &&
+ (frameinfo->flags & DELTA_FRAMEINFO_FLAG_CROP)) {
+ crop = frameinfo->crop;
+ } else {
+ /* default to video dimensions */
+ crop.left = 0;
+ crop.top = 0;
+ crop.width = frameinfo->width;
+ crop.height = frameinfo->height;
+ }
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ /* visible area inside video */
+ s->r = crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* up to aligned dimensions */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frameinfo->aligned_width;
+ s->r.height = frameinfo->aligned_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void delta_complete_eos(struct delta_ctx *ctx,
+ struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct v4l2_event ev = {.type = V4L2_EVENT_EOS};
+
+ /*
+ * Send EOS to user:
+ * - by returning an empty frame flagged to V4L2_BUF_FLAG_LAST
+ * - and then send EOS event
+ */
+
+ /* empty frame */
+ frame->info.size = 0;
+
+ /* set the last buffer flag */
+ frame->flags |= V4L2_BUF_FLAG_LAST;
+
+ /* release frame to user */
+ delta_frame_done(ctx, frame, 0);
+
+ /* send EOS event */
+ v4l2_event_queue_fh(&ctx->fh, &ev);
+
+ dev_dbg(delta->dev, "%s EOS completed\n", ctx->name);
+}
+
+static int delta_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *cmd)
+{
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(cmd->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) &&
+ (cmd->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int delta_decoder_stop_cmd(struct delta_ctx *ctx, void *fh)
+{
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_dev *delta = ctx->dev;
+ struct delta_frame *frame = NULL;
+ int ret = 0;
+
+ dev_dbg(delta->dev, "%s EOS received\n", ctx->name);
+
+ if (ctx->state != DELTA_STATE_READY)
+ return 0;
+
+ /* drain the decoder */
+ call_dec_op(dec, drain, ctx);
+
+ /* release to user drained frames */
+ while (1) {
+ frame = NULL;
+ ret = call_dec_op(dec, get_frame, ctx, &frame);
+ if (ret == -ENODATA) {
+ /* no more decoded frames */
+ break;
+ }
+ if (frame) {
+ dev_dbg(delta->dev, "%s drain frame[%d]\n",
+ ctx->name, frame->index);
+
+ /* pop timestamp and mark frame with it */
+ delta_pop_dts(ctx, &frame->dts);
+
+ /* release decoded frame to user */
+ delta_frame_done(ctx, frame, 0);
+ }
+ }
+
+ /* try to complete EOS */
+ ret = delta_get_free_frame(ctx, &frame);
+ if (ret)
+ goto delay_eos;
+
+ /* new frame available, EOS can now be completed */
+ delta_complete_eos(ctx, frame);
+
+ ctx->state = DELTA_STATE_EOS;
+
+ return 0;
+
+delay_eos:
+ /*
+ * EOS completion from driver is delayed because
+ * we don't have a free empty frame available.
+ * EOS completion is so delayed till next frame_queue() call
+ * to be sure to have a free empty frame available.
+ */
+ ctx->state = DELTA_STATE_WF_EOS;
+ dev_dbg(delta->dev, "%s EOS delayed\n", ctx->name);
+
+ return 0;
+}
+
+static int delta_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *cmd)
+{
+ struct delta_ctx *ctx = to_ctx(fh);
+ int ret = 0;
+
+ ret = delta_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ return delta_decoder_stop_cmd(ctx, fh);
+}
+
+static int delta_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* v4l2 ioctl ops */
+static const struct v4l2_ioctl_ops delta_ioctl_ops = {
+ .vidioc_querycap = delta_querycap,
+ .vidioc_enum_fmt_vid_cap = delta_enum_fmt_frame,
+ .vidioc_g_fmt_vid_cap = delta_g_fmt_frame,
+ .vidioc_try_fmt_vid_cap = delta_try_fmt_frame,
+ .vidioc_s_fmt_vid_cap = delta_s_fmt_frame,
+ .vidioc_enum_fmt_vid_out = delta_enum_fmt_stream,
+ .vidioc_g_fmt_vid_out = delta_g_fmt_stream,
+ .vidioc_try_fmt_vid_out = delta_try_fmt_stream,
+ .vidioc_s_fmt_vid_out = delta_s_fmt_stream,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = delta_g_selection,
+ .vidioc_try_decoder_cmd = delta_try_decoder_cmd,
+ .vidioc_decoder_cmd = delta_decoder_cmd,
+ .vidioc_subscribe_event = delta_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * mem-to-mem operations
+ */
+
+static void delta_run_work(struct work_struct *work)
+{
+ struct delta_ctx *ctx = container_of(work, struct delta_ctx, run_work);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_au *au;
+ struct delta_frame *frame = NULL;
+ int ret = 0;
+ bool discard = false;
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!dec) {
+ dev_err(delta->dev, "%s no decoder opened yet\n", ctx->name);
+ return;
+ }
+
+ /* protect instance against reentrancy */
+ mutex_lock(&ctx->lock);
+
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s no buffer to decode\n", ctx->name);
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+ au = to_au(vbuf);
+ au->size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ au->dts = vbuf->vb2_buf.timestamp;
+
+ /* dump access unit */
+ dump_au(ctx, au);
+
+ /* enable the hardware */
+ if (!dec->pm) {
+ ret = delta_get_sync(ctx);
+ if (ret)
+ goto err;
+ }
+
+ /* decode this access unit */
+ ret = call_dec_op(dec, decode, ctx, au);
+
+ /*
+ * if the (-ENODATA) value is returned, it refers to the interlaced
+ * stream case for which 2 access units are needed to get 1 frame.
+ * So, this returned value doesn't mean that the decoding fails, but
+ * indicates that the timestamp information of the access unit shall
+ * not be taken into account, and that the V4L2 buffer associated with
+ * the access unit shall be flagged with V4L2_BUF_FLAG_ERROR to inform
+ * the user of this situation
+ */
+ if (ret == -ENODATA) {
+ discard = true;
+ } else if (ret) {
+ dev_err(delta->dev, "%s decoding failed (%d)\n",
+ ctx->name, ret);
+
+ /* disable the hardware */
+ if (!dec->pm)
+ delta_put_autosuspend(ctx);
+
+ goto err;
+ }
+
+ /* disable the hardware */
+ if (!dec->pm)
+ delta_put_autosuspend(ctx);
+
+ /* push au timestamp in FIFO */
+ if (!discard)
+ delta_push_dts(ctx, au->dts);
+
+ /* get available decoded frames */
+ while (1) {
+ ret = call_dec_op(dec, get_frame, ctx, &frame);
+ if (ret == -ENODATA) {
+ /* no more decoded frames */
+ goto out;
+ }
+ if (ret) {
+ dev_err(delta->dev, "%s cannot get decoded frame (%d)\n",
+ ctx->name, ret);
+ goto out;
+ }
+ if (!frame) {
+ dev_err(delta->dev,
+ "%s NULL decoded frame\n",
+ ctx->name);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* pop timestamp and mark frame with it */
+ delta_pop_dts(ctx, &frame->dts);
+
+ /* release decoded frame to user */
+ delta_frame_done(ctx, frame, 0);
+ }
+
+out:
+ requeue_free_frames(ctx);
+ delta_au_done(ctx, au, (discard ? -ENODATA : 0));
+ mutex_unlock(&ctx->lock);
+ v4l2_m2m_job_finish(delta->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+
+err:
+ requeue_free_frames(ctx);
+ delta_au_done(ctx, au, ret);
+ mutex_unlock(&ctx->lock);
+ v4l2_m2m_job_finish(delta->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void delta_device_run(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ queue_work(delta->work_queue, &ctx->run_work);
+}
+
+static void delta_job_abort(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ dev_dbg(delta->dev, "%s aborting job\n", ctx->name);
+
+ ctx->aborting = true;
+}
+
+static int delta_job_ready(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+ int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
+
+ if (!src_bufs) {
+ dev_dbg(delta->dev, "%s not ready: not enough video buffers.\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(delta->dev, "%s not ready: not enough video capture buffers.\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ dev_dbg(delta->dev, "%s job not ready: aborting\n", ctx->name);
+ return 0;
+ }
+
+ dev_dbg(delta->dev, "%s job ready\n", ctx->name);
+
+ return 1;
+}
+
+/* mem-to-mem ops */
+static struct v4l2_m2m_ops delta_m2m_ops = {
+ .device_run = delta_device_run,
+ .job_ready = delta_job_ready,
+ .job_abort = delta_job_abort,
+ .lock = delta_lock,
+ .unlock = delta_unlock,
+};
+
+/*
+ * VB2 queue operations
+ */
+
+static int delta_vb2_au_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(vq);
+ unsigned int size = ctx->max_au_size;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *num_planes = 1;
+ if (*num_buffers < 1)
+ *num_buffers = 1;
+ if (*num_buffers > DELTA_MAX_AUS)
+ *num_buffers = DELTA_MAX_AUS;
+
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int delta_vb2_au_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_au *au = to_au(vbuf);
+
+ if (!au->prepared) {
+ /* get memory addresses */
+ au->vaddr = vb2_plane_vaddr(&au->vbuf.vb2_buf, 0);
+ au->paddr = vb2_dma_contig_plane_dma_addr
+ (&au->vbuf.vb2_buf, 0);
+ au->prepared = true;
+ dev_dbg(delta->dev, "%s au[%d] prepared; virt=0x%p, phy=0x%pad\n",
+ ctx->name, vb->index, au->vaddr, &au->paddr);
+ }
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_setup_frame(struct delta_ctx *ctx,
+ struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+
+ if (frame->index >= DELTA_MAX_FRAMES) {
+ dev_err(delta->dev,
+ "%s frame index=%d exceeds output frame count (%d)\n",
+ ctx->name, frame->index, DELTA_MAX_FRAMES);
+ return -EINVAL;
+ }
+
+ if (ctx->nb_of_frames >= DELTA_MAX_FRAMES) {
+ dev_err(delta->dev,
+ "%s number of frames exceeds output frame count (%d > %d)\n",
+ ctx->name, ctx->nb_of_frames, DELTA_MAX_FRAMES);
+ return -EINVAL;
+ }
+
+ if (frame->index != ctx->nb_of_frames) {
+ dev_warn(delta->dev,
+ "%s frame index discontinuity detected, expected %d, got %d\n",
+ ctx->name, ctx->nb_of_frames, frame->index);
+ }
+
+ frame->state = DELTA_FRAME_FREE;
+ ctx->frames[ctx->nb_of_frames] = frame;
+ ctx->nb_of_frames++;
+
+ /* setup frame on decoder side */
+ return call_dec_op(dec, setup_frame, ctx, frame);
+}
+
+/*
+ * default implementation of get_frameinfo decoder ops
+ * matching frame information from stream information
+ * & with default pixel format & default alignment.
+ */
+int delta_get_frameinfo_default(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo)
+{
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+
+ memset(frameinfo, 0, sizeof(*frameinfo));
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = streaminfo->width;
+ frameinfo->height = streaminfo->height;
+ frameinfo->aligned_width = ALIGN(streaminfo->width,
+ DELTA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(streaminfo->height,
+ DELTA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+ if (streaminfo->flags & DELTA_STREAMINFO_FLAG_CROP) {
+ frameinfo->flags |= DELTA_FRAMEINFO_FLAG_CROP;
+ frameinfo->crop = streaminfo->crop;
+ }
+ if (streaminfo->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT) {
+ frameinfo->flags |= DELTA_FRAMEINFO_FLAG_PIXELASPECT;
+ frameinfo->pixelaspect = streaminfo->pixelaspect;
+ }
+ frameinfo->field = streaminfo->field;
+
+ return 0;
+}
+
+/*
+ * default implementation of recycle decoder ops
+ * consisting to relax the "decoded" frame state
+ */
+int delta_recycle_default(struct delta_ctx *pctx,
+ struct delta_frame *frame)
+{
+ frame->state &= ~DELTA_FRAME_DEC;
+
+ return 0;
+}
+
+static void dump_frames_status(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ unsigned int i;
+ struct delta_frame *frame;
+ unsigned char str[100] = "";
+
+ dev_info(delta->dev,
+ "%s dumping frames status...\n", ctx->name);
+
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ dev_info(delta->dev,
+ "%s frame[%d] %s\n",
+ ctx->name, frame->index,
+ frame_state_str(frame->state,
+ str, sizeof(str)));
+ }
+}
+
+int delta_get_free_frame(struct delta_ctx *ctx,
+ struct delta_frame **pframe)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+
+ *pframe = NULL;
+
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s no frame available",
+ ctx->name);
+ return -EIO;
+ }
+
+ frame = to_frame(vbuf);
+ frame->state &= ~DELTA_FRAME_M2M;
+ if (frame->state != DELTA_FRAME_FREE) {
+ dev_err(delta->dev,
+ "%s frame[%d] is not free\n",
+ ctx->name, frame->index);
+ dump_frames_status(ctx);
+ return -ENODATA;
+ }
+
+ dev_dbg(delta->dev,
+ "%s get free frame[%d]\n", ctx->name, frame->index);
+
+ *pframe = frame;
+ return 0;
+}
+
+int delta_get_sync(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ int ret = 0;
+
+ /* enable the hardware */
+ ret = pm_runtime_get_sync(delta->dev);
+ if (ret < 0) {
+ dev_err(delta->dev, "%s pm_runtime_get_sync failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void delta_put_autosuspend(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+
+ pm_runtime_put_autosuspend(delta->dev);
+}
+
+static void delta_vb2_au_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int delta_vb2_au_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_au *au;
+ int ret = 0;
+ struct vb2_v4l2_buffer *vbuf = NULL;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ unsigned char str1[100] = "";
+ unsigned char str2[100] = "";
+
+ if ((ctx->state != DELTA_STATE_WF_FORMAT) &&
+ (ctx->state != DELTA_STATE_WF_STREAMINFO))
+ return 0;
+
+ if (ctx->state == DELTA_STATE_WF_FORMAT) {
+ /* open decoder if not yet done */
+ ret = delta_open_decoder(ctx,
+ ctx->streaminfo.streamformat,
+ ctx->frameinfo.pixelformat, &dec);
+ if (ret)
+ goto err;
+ ctx->dec = dec;
+ ctx->state = DELTA_STATE_WF_STREAMINFO;
+ }
+
+ /*
+ * first buffer should contain stream header,
+ * decode it to get the infos related to stream
+ * such as width, height, dpb, ...
+ */
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s failed to start streaming, no stream header buffer enqueued\n",
+ ctx->name);
+ ret = -EINVAL;
+ goto err;
+ }
+ au = to_au(vbuf);
+ au->size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ au->dts = vbuf->vb2_buf.timestamp;
+
+ delta_push_dts(ctx, au->dts);
+
+ /* dump access unit */
+ dump_au(ctx, au);
+
+ /* decode this access unit */
+ ret = call_dec_op(dec, decode, ctx, au);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to start streaming, header decoding failed (%d)\n",
+ ctx->name, ret);
+ goto err;
+ }
+
+ ret = call_dec_op(dec, get_streaminfo, ctx, streaminfo);
+ if (ret) {
+ dev_dbg_ratelimited(delta->dev,
+ "%s failed to start streaming, valid stream header not yet decoded\n",
+ ctx->name);
+ goto err;
+ }
+ ctx->flags |= DELTA_FLAG_STREAMINFO;
+
+ ret = call_dec_op(dec, get_frameinfo, ctx, frameinfo);
+ if (ret)
+ goto err;
+ ctx->flags |= DELTA_FLAG_FRAMEINFO;
+
+ ctx->state = DELTA_STATE_READY;
+
+ dev_dbg(delta->dev, "%s %s => %s\n", ctx->name,
+ delta_streaminfo_str(streaminfo, str1, sizeof(str1)),
+ delta_frameinfo_str(frameinfo, str2, sizeof(str2)));
+
+ delta_au_done(ctx, au, ret);
+ return 0;
+
+err:
+ /*
+ * return all buffers to vb2 in QUEUED state.
+ * This will give ownership back to userspace
+ */
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ delta_flush_dts(ctx);
+
+ /* return all buffers to vb2 in ERROR state */
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+
+ ctx->au_num = 0;
+
+ ctx->aborting = false;
+}
+
+static int delta_vb2_frame_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(vq);
+ struct delta_dev *delta = ctx->dev;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ unsigned int size = frameinfo->size;
+
+ /*
+ * the number of output buffers needed for decoding =
+ * user need (*num_buffers given, usually for display pipeline) +
+ * stream need (streaminfo->dpb) +
+ * decoding peak smoothing (depends on DELTA IP perf)
+ */
+ if (*num_buffers < DELTA_MIN_FRAME_USER) {
+ dev_dbg(delta->dev,
+ "%s num_buffers too low (%d), increasing to %d\n",
+ ctx->name, *num_buffers, DELTA_MIN_FRAME_USER);
+ *num_buffers = DELTA_MIN_FRAME_USER;
+ }
+
+ *num_buffers += streaminfo->dpb + DELTA_PEAK_FRAME_SMOOTHING;
+
+ if (*num_buffers > DELTA_MAX_FRAMES) {
+ dev_dbg(delta->dev,
+ "%s output frame count too high (%d), cut to %d\n",
+ ctx->name, *num_buffers, DELTA_MAX_FRAMES);
+ *num_buffers = DELTA_MAX_FRAMES;
+ }
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ /* single plane for Y and CbCr */
+ *num_planes = 1;
+
+ sizes[0] = size;
+
+ ctx->nb_of_frames = 0;
+
+ return 0;
+}
+
+static int delta_vb2_frame_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+ int ret = 0;
+
+ if (!frame->prepared) {
+ frame->index = vbuf->vb2_buf.index;
+ frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ frame->paddr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ frame->info = ctx->frameinfo;
+
+ ret = delta_setup_frame(ctx, frame);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s setup_frame() failed (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+ frame->prepared = true;
+ dev_dbg(delta->dev,
+ "%s frame[%d] prepared; virt=0x%p, phy=0x%pad\n",
+ ctx->name, vb->index, frame->vaddr,
+ &frame->paddr);
+ }
+
+ frame->flags = vbuf->flags;
+
+ return 0;
+}
+
+static void delta_vb2_frame_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+
+ /* update V4L2 fields for user */
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, frame->info.size);
+ vb->timestamp = frame->dts;
+ vbuf->field = frame->field;
+ vbuf->flags = frame->flags;
+}
+
+static void delta_vb2_frame_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+
+ if (ctx->state == DELTA_STATE_WF_EOS) {
+ /* new frame available, EOS can now be completed */
+ delta_complete_eos(ctx, frame);
+
+ ctx->state = DELTA_STATE_EOS;
+
+ /* return, no need to recycle this buffer to decoder */
+ return;
+ }
+
+ /* recycle this frame */
+ delta_recycle(ctx, frame);
+}
+
+static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+ const struct delta_dec *dec = ctx->dec;
+ unsigned int i;
+
+ delta_flush_dts(ctx);
+
+ call_dec_op(dec, flush, ctx);
+
+ /*
+ * return all buffers to vb2 in ERROR state
+ * & reset each frame state to OUT
+ */
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ if (!(frame->state & DELTA_FRAME_OUT)) {
+ vbuf = &frame->vbuf;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+ frame->state = DELTA_FRAME_OUT;
+ }
+
+ ctx->frame_num = 0;
+
+ ctx->aborting = false;
+}
+
+/* VB2 queue ops */
+static struct vb2_ops delta_vb2_au_ops = {
+ .queue_setup = delta_vb2_au_queue_setup,
+ .buf_prepare = delta_vb2_au_prepare,
+ .buf_queue = delta_vb2_au_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = delta_vb2_au_start_streaming,
+ .stop_streaming = delta_vb2_au_stop_streaming,
+};
+
+static struct vb2_ops delta_vb2_frame_ops = {
+ .queue_setup = delta_vb2_frame_queue_setup,
+ .buf_prepare = delta_vb2_frame_prepare,
+ .buf_finish = delta_vb2_frame_finish,
+ .buf_queue = delta_vb2_frame_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = delta_vb2_frame_stop_streaming,
+};
+
+/*
+ * V4L2 file operations
+ */
+
+static int queue_init(void *priv,
+ struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct vb2_queue *q;
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+ int ret;
+
+ /* setup vb2 queue for stream input */
+ q = src_vq;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ctx;
+ /* overload vb2 buf with private au struct */
+ q->buf_struct_size = sizeof(struct delta_au);
+ q->ops = &delta_vb2_au_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->lock = &delta->lock;
+ q->dev = delta->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /* setup vb2 queue for frame output */
+ q = dst_vq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ctx;
+ /* overload vb2 buf with private frame struct */
+ q->buf_struct_size = sizeof(struct delta_frame)
+ + DELTA_MAX_FRAME_PRIV_SIZE;
+ q->ops = &delta_vb2_frame_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->lock = &delta->lock;
+ q->dev = delta->dev;
+
+ return vb2_queue_init(q);
+}
+
+static int delta_open(struct file *file)
+{
+ struct delta_dev *delta = video_drvdata(file);
+ struct delta_ctx *ctx = NULL;
+ int ret = 0;
+
+ mutex_lock(&delta->lock);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ctx->dev = delta;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ INIT_WORK(&ctx->run_work, delta_run_work);
+ mutex_init(&ctx->lock);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(delta->m2m_dev, ctx,
+ queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ dev_err(delta->dev, "%s failed to initialize m2m context (%d)\n",
+ DELTA_PREFIX, ret);
+ goto err_fh_del;
+ }
+
+ /*
+ * wait stream format to determine which
+ * decoder to open
+ */
+ ctx->state = DELTA_STATE_WF_FORMAT;
+
+ INIT_LIST_HEAD(&ctx->dts);
+
+ /* set the instance name */
+ delta->instance_id++;
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
+ delta->instance_id);
+
+ /* default parameters for frame and stream */
+ set_default_params(ctx);
+
+ /* enable ST231 clocks */
+ if (delta->clk_st231)
+ if (clk_prepare_enable(delta->clk_st231))
+ dev_warn(delta->dev, "failed to enable st231 clk\n");
+
+ /* enable FLASH_PROMIP clock */
+ if (delta->clk_flash_promip)
+ if (clk_prepare_enable(delta->clk_flash_promip))
+ dev_warn(delta->dev, "failed to enable delta promip clk\n");
+
+ mutex_unlock(&delta->lock);
+
+ dev_dbg(delta->dev, "%s decoder instance created\n", ctx->name);
+
+ return 0;
+
+err_fh_del:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+err:
+ mutex_unlock(&delta->lock);
+
+ return ret;
+}
+
+static int delta_release(struct file *file)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+
+ mutex_lock(&delta->lock);
+
+ /* close decoder */
+ call_dec_op(dec, close, ctx);
+
+ /*
+ * trace a summary of instance
+ * before closing (debug purpose)
+ */
+ delta_trace_summary(ctx);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ /* disable ST231 clocks */
+ if (delta->clk_st231)
+ clk_disable_unprepare(delta->clk_st231);
+
+ /* disable FLASH_PROMIP clock */
+ if (delta->clk_flash_promip)
+ clk_disable_unprepare(delta->clk_flash_promip);
+
+ dev_dbg(delta->dev, "%s decoder instance released\n", ctx->name);
+
+ kfree(ctx);
+
+ mutex_unlock(&delta->lock);
+ return 0;
+}
+
+/* V4L2 file ops */
+static const struct v4l2_file_operations delta_fops = {
+ .owner = THIS_MODULE,
+ .open = delta_open,
+ .release = delta_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .poll = v4l2_m2m_fop_poll,
+};
+
+/*
+ * Platform device operations
+ */
+
+static int delta_register_device(struct delta_dev *delta)
+{
+ int ret;
+ struct video_device *vdev;
+
+ if (!delta)
+ return -ENODEV;
+
+ delta->m2m_dev = v4l2_m2m_init(&delta_m2m_ops);
+ if (IS_ERR(delta->m2m_dev)) {
+ dev_err(delta->dev, "%s failed to initialize v4l2-m2m device\n",
+ DELTA_PREFIX);
+ ret = PTR_ERR(delta->m2m_dev);
+ goto err;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ dev_err(delta->dev, "%s failed to allocate video device\n",
+ DELTA_PREFIX);
+ ret = -ENOMEM;
+ goto err_m2m_release;
+ }
+
+ vdev->fops = &delta_fops;
+ vdev->ioctl_ops = &delta_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->lock = &delta->lock;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ vdev->v4l2_dev = &delta->v4l2_dev;
+ snprintf(vdev->name, sizeof(vdev->name), "%s-%s",
+ DELTA_NAME, DELTA_FW_VERSION);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to register video device\n",
+ DELTA_PREFIX);
+ goto err_vdev_release;
+ }
+
+ delta->vdev = vdev;
+ video_set_drvdata(vdev, delta);
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+err_m2m_release:
+ v4l2_m2m_release(delta->m2m_dev);
+err:
+ return ret;
+}
+
+static void delta_unregister_device(struct delta_dev *delta)
+{
+ if (!delta)
+ return;
+
+ if (delta->m2m_dev)
+ v4l2_m2m_release(delta->m2m_dev);
+
+ video_unregister_device(delta->vdev);
+}
+
+static int delta_probe(struct platform_device *pdev)
+{
+ struct delta_dev *delta;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ delta = devm_kzalloc(dev, sizeof(*delta), GFP_KERNEL);
+ if (!delta) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ delta->dev = dev;
+ delta->pdev = pdev;
+ platform_set_drvdata(pdev, delta);
+
+ mutex_init(&delta->lock);
+
+ /* get clock resources */
+ delta->clk_delta = devm_clk_get(dev, "delta");
+ if (IS_ERR(delta->clk_delta)) {
+ dev_dbg(dev, "%s can't get delta clock\n", DELTA_PREFIX);
+ delta->clk_delta = NULL;
+ }
+
+ delta->clk_st231 = devm_clk_get(dev, "delta-st231");
+ if (IS_ERR(delta->clk_st231)) {
+ dev_dbg(dev, "%s can't get delta-st231 clock\n", DELTA_PREFIX);
+ delta->clk_st231 = NULL;
+ }
+
+ delta->clk_flash_promip = devm_clk_get(dev, "delta-flash-promip");
+ if (IS_ERR(delta->clk_flash_promip)) {
+ dev_dbg(dev, "%s can't get delta-flash-promip clock\n",
+ DELTA_PREFIX);
+ delta->clk_flash_promip = NULL;
+ }
+
+ /* init pm_runtime used for power management */
+ pm_runtime_set_autosuspend_delay(dev, DELTA_HW_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ /* init firmware ipc channel */
+ ret = delta_ipc_init(delta);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to initialize firmware ipc channel\n",
+ DELTA_PREFIX);
+ goto err;
+ }
+
+ /* register all available decoders */
+ register_decoders(delta);
+
+ /* register all supported formats */
+ register_formats(delta);
+
+ /* register on V4L2 */
+ ret = v4l2_device_register(dev, &delta->v4l2_dev);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to register V4L2 device\n",
+ DELTA_PREFIX);
+ goto err;
+ }
+
+ delta->work_queue = create_workqueue(DELTA_NAME);
+ if (!delta->work_queue) {
+ dev_err(delta->dev, "%s failed to allocate work queue\n",
+ DELTA_PREFIX);
+ ret = -ENOMEM;
+ goto err_v4l2;
+ }
+
+ /* register device */
+ ret = delta_register_device(delta);
+ if (ret)
+ goto err_work_queue;
+
+ dev_info(dev, "%s %s registered as /dev/video%d\n",
+ DELTA_PREFIX, delta->vdev->name, delta->vdev->num);
+
+ return 0;
+
+err_work_queue:
+ destroy_workqueue(delta->work_queue);
+err_v4l2:
+ v4l2_device_unregister(&delta->v4l2_dev);
+err:
+ return ret;
+}
+
+static int delta_remove(struct platform_device *pdev)
+{
+ struct delta_dev *delta = platform_get_drvdata(pdev);
+
+ delta_ipc_exit(delta);
+
+ delta_unregister_device(delta);
+
+ destroy_workqueue(delta->work_queue);
+
+ pm_runtime_put_autosuspend(delta->dev);
+ pm_runtime_disable(delta->dev);
+
+ v4l2_device_unregister(&delta->v4l2_dev);
+
+ return 0;
+}
+
+static int delta_runtime_suspend(struct device *dev)
+{
+ struct delta_dev *delta = dev_get_drvdata(dev);
+
+ if (delta->clk_delta)
+ clk_disable_unprepare(delta->clk_delta);
+
+ return 0;
+}
+
+static int delta_runtime_resume(struct device *dev)
+{
+ struct delta_dev *delta = dev_get_drvdata(dev);
+
+ if (delta->clk_delta)
+ if (clk_prepare_enable(delta->clk_delta))
+ dev_warn(dev, "failed to prepare/enable delta clk\n");
+
+ return 0;
+}
+
+/* PM ops */
+static const struct dev_pm_ops delta_pm_ops = {
+ .runtime_suspend = delta_runtime_suspend,
+ .runtime_resume = delta_runtime_resume,
+};
+
+static const struct of_device_id delta_match_types[] = {
+ {
+ .compatible = "st,st-delta",
+ },
+ {
+ /* end node */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, delta_match_types);
+
+static struct platform_driver delta_driver = {
+ .probe = delta_probe,
+ .remove = delta_remove,
+ .driver = {
+ .name = DELTA_NAME,
+ .of_match_table = delta_match_types,
+ .pm = &delta_pm_ops},
+};
+
+module_platform_driver(delta_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DELTA video decoder V4L2 driver");
diff --git a/drivers/media/platform/sti/delta/delta.h b/drivers/media/platform/sti/delta/delta.h
new file mode 100644
index 000000000000..60c073246a01
--- /dev/null
+++ b/drivers/media/platform/sti/delta/delta.h
@@ -0,0 +1,566 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef DELTA_H
+#define DELTA_H
+
+#include <linux/rpmsg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "delta-cfg.h"
+
+/*
+ * enum delta_state - state of decoding instance
+ *
+ *@DELTA_STATE_WF_FORMAT:
+ * Wait for compressed format to be set by V4L2 client in order
+ * to know what is the relevant decoder to open.
+ *
+ *@DELTA_STATE_WF_STREAMINFO:
+ * Wait for stream information to be available (bitstream
+ * header parsing is done).
+ *
+ *@DELTA_STATE_READY:
+ * Decoding instance is ready to decode compressed access unit.
+ *
+ *@DELTA_STATE_WF_EOS:
+ * Decoding instance is waiting for EOS (End Of Stream) completion.
+ *
+ *@DELTA_STATE_EOS:
+ * EOS (End Of Stream) is completed (signaled to user). Decoding instance
+ * should then be closed.
+ */
+enum delta_state {
+ DELTA_STATE_WF_FORMAT,
+ DELTA_STATE_WF_STREAMINFO,
+ DELTA_STATE_READY,
+ DELTA_STATE_WF_EOS,
+ DELTA_STATE_EOS
+};
+
+/*
+ * struct delta_streaminfo - information about stream to decode
+ *
+ * @flags: validity of fields (crop, pixelaspect, other)
+ * @width: width of video stream
+ * @height: height ""
+ * @streamformat: fourcc compressed format of video (MJPEG, MPEG2, ...)
+ * @dpb: number of frames needed to decode a single frame
+ * (h264 dpb, up to 16)
+ * @crop: cropping window inside decoded frame (1920x1080@0,0
+ * inside 1920x1088 frame for ex.)
+ * @pixelaspect: pixel aspect ratio of video (4/3, 5/4)
+ * @field: interlaced or not
+ * @profile: profile string
+ * @level: level string
+ * @other: other string information from codec
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ */
+struct delta_streaminfo {
+ u32 flags;
+ u32 streamformat;
+ u32 width;
+ u32 height;
+ u32 dpb;
+ struct v4l2_rect crop;
+ struct v4l2_fract pixelaspect;
+ enum v4l2_field field;
+ u8 profile[32];
+ u8 level[32];
+ u8 other[32];
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+};
+
+#define DELTA_STREAMINFO_FLAG_CROP 0x0001
+#define DELTA_STREAMINFO_FLAG_PIXELASPECT 0x0002
+#define DELTA_STREAMINFO_FLAG_OTHER 0x0004
+
+/*
+ * struct delta_au - access unit structure.
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @prepared: if set vaddr/paddr are resolved
+ * @vaddr: virtual address (kernel can read/write)
+ * @paddr: physical address (for hardware)
+ * @flags: access unit type (V4L2_BUF_FLAG_KEYFRAME/PFRAME/BFRAME)
+ * @dts: decoding timestamp of this access unit
+ */
+struct delta_au {
+ struct vb2_v4l2_buffer vbuf; /* keep first */
+ struct list_head list; /* keep second */
+
+ bool prepared;
+ u32 size;
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 flags;
+ u64 dts;
+};
+
+/*
+ * struct delta_frameinfo - information about decoded frame
+ *
+ * @flags: validity of fields (crop, pixelaspect)
+ * @pixelformat: fourcc code for uncompressed video format
+ * @width: width of frame
+ * @height: height of frame
+ * @aligned_width: width of frame (with encoder or decoder alignment
+ * constraint)
+ * @aligned_height: height of frame (with encoder or decoder alignment
+ * constraint)
+ * @size: maximum size in bytes required for data
+ * @crop: cropping window inside frame (1920x1080@0,0
+ * inside 1920x1088 frame for ex.)
+ * @pixelaspect: pixel aspect ratio of video (4/3, 5/4)
+ * @field: interlaced mode
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ */
+struct delta_frameinfo {
+ u32 flags;
+ u32 pixelformat;
+ u32 width;
+ u32 height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 size;
+ struct v4l2_rect crop;
+ struct v4l2_fract pixelaspect;
+ enum v4l2_field field;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+};
+
+#define DELTA_FRAMEINFO_FLAG_CROP 0x0001
+#define DELTA_FRAMEINFO_FLAG_PIXELASPECT 0x0002
+
+/*
+ * struct delta_frame - frame structure.
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @info: frame information (width, height, format, alignment...)
+ * @prepared: if set pix/vaddr/paddr are resolved
+ * @index: frame index, aligned on V4L2 wow
+ * @vaddr: virtual address (kernel can read/write)
+ * @paddr: physical address (for hardware)
+ * @state: frame state for frame lifecycle tracking
+ * (DELTA_FRAME_FREE/DEC/OUT/REC/...)
+ * @flags: frame type (V4L2_BUF_FLAG_KEYFRAME/PFRAME/BFRAME)
+ * @dts: decoding timestamp of this frame
+ * @field: field order for interlaced frame
+ */
+struct delta_frame {
+ struct vb2_v4l2_buffer vbuf; /* keep first */
+ struct list_head list; /* keep second */
+
+ struct delta_frameinfo info;
+ bool prepared;
+ u32 index;
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 state;
+ u32 flags;
+ u64 dts;
+ enum v4l2_field field;
+};
+
+/* frame state for frame lifecycle tracking */
+#define DELTA_FRAME_FREE 0x00 /* is free and can be used for decoding */
+#define DELTA_FRAME_REF 0x01 /* is a reference frame */
+#define DELTA_FRAME_BSY 0x02 /* is owned by decoder and busy */
+#define DELTA_FRAME_DEC 0x04 /* contains decoded content */
+#define DELTA_FRAME_OUT 0x08 /* has been given to user */
+#define DELTA_FRAME_RDY 0x10 /* is ready but still held by decoder */
+#define DELTA_FRAME_M2M 0x20 /* is owned by mem2mem framework */
+
+/*
+ * struct delta_dts - decoding timestamp.
+ *
+ * @list: list to chain timestamps
+ * @val: timestamp in microseconds
+ */
+struct delta_dts {
+ struct list_head list;
+ u64 val;
+};
+
+struct delta_buf {
+ u32 size;
+ void *vaddr;
+ dma_addr_t paddr;
+ const char *name;
+ unsigned long attrs;
+};
+
+struct delta_ipc_ctx {
+ int cb_err;
+ u32 copro_hdl;
+ struct completion done;
+ struct delta_buf ipc_buf_struct;
+ struct delta_buf *ipc_buf;
+};
+
+struct delta_ipc_param {
+ u32 size;
+ void *data;
+};
+
+struct delta_ctx;
+
+/*
+ * struct delta_dec - decoder structure.
+ *
+ * @name: name of this decoder
+ * @streamformat: input stream format that this decoder support
+ * @pixelformat: pixel format of decoded frame that this decoder support
+ * @max_width: (optional) maximum width that can decode this decoder
+ * if not set, maximum width is DELTA_MAX_WIDTH
+ * @max_height: (optional) maximum height that can decode this decoder
+ * if not set, maximum height is DELTA_MAX_HEIGHT
+ * @pm: (optional) if set, decoder will manage power on its own
+ * @open: open this decoder
+ * @close: close this decoder
+ * @setup_frame: setup frame to be used by decoder, see below
+ * @get_streaminfo: get stream related infos, see below
+ * @get_frameinfo: get decoded frame related infos, see below
+ * @set_frameinfo: (optional) set decoded frame related infos, see below
+ * @setup_frame: setup frame to be used by decoder, see below
+ * @decode: decode a single access unit, see below
+ * @get_frame: get the next decoded frame available, see below
+ * @recycle: recycle the given frame, see below
+ * @flush: (optional) flush decoder, see below
+ * @drain: (optional) drain decoder, see below
+ */
+struct delta_dec {
+ const char *name;
+ u32 streamformat;
+ u32 pixelformat;
+ u32 max_width;
+ u32 max_height;
+ bool pm;
+
+ /*
+ * decoder ops
+ */
+ int (*open)(struct delta_ctx *ctx);
+ int (*close)(struct delta_ctx *ctx);
+
+ /*
+ * setup_frame() - setup frame to be used by decoder
+ * @ctx: (in) instance
+ * @frame: (in) frame to use
+ * @frame.index (in) identifier of frame
+ * @frame.vaddr (in) virtual address (kernel can read/write)
+ * @frame.paddr (in) physical address (for hardware)
+ *
+ * Frame is to be allocated by caller, then given
+ * to decoder through this call.
+ * Several frames must be given to decoder (dpb),
+ * each frame is identified using its index.
+ */
+ int (*setup_frame)(struct delta_ctx *ctx, struct delta_frame *frame);
+
+ /*
+ * get_streaminfo() - get stream related infos
+ * @ctx: (in) instance
+ * @streaminfo: (out) width, height, dpb,...
+ *
+ * Precondition: stream header must have been successfully
+ * parsed to have this call successful & @streaminfo valid.
+ * Header parsing must be done using decode(), giving
+ * explicitly header access unit or first access unit of bitstream.
+ * If no valid header is found, get_streaminfo will return -ENODATA,
+ * in this case the next bistream access unit must be decoded till
+ * get_streaminfo becomes successful.
+ */
+ int (*get_streaminfo)(struct delta_ctx *ctx,
+ struct delta_streaminfo *streaminfo);
+
+ /*
+ * get_frameinfo() - get decoded frame related infos
+ * @ctx: (in) instance
+ * @frameinfo: (out) width, height, alignment, crop, ...
+ *
+ * Precondition: get_streaminfo() must be successful
+ */
+ int (*get_frameinfo)(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+
+ /*
+ * set_frameinfo() - set decoded frame related infos
+ * @ctx: (in) instance
+ * @frameinfo: (out) width, height, alignment, crop, ...
+ *
+ * Optional.
+ * Typically used to negotiate with decoder the output
+ * frame if decoder can do post-processing.
+ */
+ int (*set_frameinfo)(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+
+ /*
+ * decode() - decode a single access unit
+ * @ctx: (in) instance
+ * @au: (in/out) access unit
+ * @au.size (in) size of au to decode
+ * @au.vaddr (in) virtual address (kernel can read/write)
+ * @au.paddr (in) physical address (for hardware)
+ * @au.flags (out) au type (V4L2_BUF_FLAG_KEYFRAME/
+ * PFRAME/BFRAME)
+ *
+ * Decode the access unit given. Decode is synchronous;
+ * access unit memory is no more needed after this call.
+ * After this call, none, one or several frames could
+ * have been decoded, which can be retrieved using
+ * get_frame().
+ */
+ int (*decode)(struct delta_ctx *ctx, struct delta_au *au);
+
+ /*
+ * get_frame() - get the next decoded frame available
+ * @ctx: (in) instance
+ * @frame: (out) frame with decoded data:
+ * @frame.index (out) identifier of frame
+ * @frame.field (out) field order for interlaced frame
+ * @frame.state (out) frame state for frame lifecycle tracking
+ * @frame.flags (out) frame type (V4L2_BUF_FLAG_KEYFRAME/
+ * PFRAME/BFRAME)
+ *
+ * Get the next available decoded frame.
+ * If no frame is available, -ENODATA is returned.
+ * If a frame is available, frame structure is filled with
+ * relevant data, frame.index identifying this exact frame.
+ * When this frame is no more needed by upper layers,
+ * recycle() must be called giving this frame identifier.
+ */
+ int (*get_frame)(struct delta_ctx *ctx, struct delta_frame **frame);
+
+ /*
+ * recycle() - recycle the given frame
+ * @ctx: (in) instance
+ * @frame: (in) frame to recycle:
+ * @frame.index (in) identifier of frame
+ *
+ * recycle() is to be called by user when the decoded frame
+ * is no more needed (composition/display done).
+ * This frame will then be reused by decoder to proceed
+ * with next frame decoding.
+ * If not enough frames have been provided through setup_frame(),
+ * or recycle() is not called fast enough, the decoder can run out
+ * of available frames to proceed with decoding (starvation).
+ * This case is guarded by wq_recycle wait queue which ensures that
+ * decoder is called only if at least one frame is available.
+ */
+ int (*recycle)(struct delta_ctx *ctx, struct delta_frame *frame);
+
+ /*
+ * flush() - flush decoder
+ * @ctx: (in) instance
+ *
+ * Optional.
+ * Reset decoder context and discard all internal buffers.
+ * This allows implementation of seek, which leads to discontinuity
+ * of input bitstream that decoder must know to restart its internal
+ * decoding logic.
+ */
+ int (*flush)(struct delta_ctx *ctx);
+
+ /*
+ * drain() - drain decoder
+ * @ctx: (in) instance
+ *
+ * Optional.
+ * Mark decoder pending frames (decoded but not yet output) as ready
+ * so that they can be output to client at EOS (End Of Stream).
+ * get_frame() is to be called in a loop right after drain() to
+ * get all those pending frames.
+ */
+ int (*drain)(struct delta_ctx *ctx);
+};
+
+struct delta_dev;
+
+/*
+ * struct delta_ctx - instance structure.
+ *
+ * @flags: validity of fields (streaminfo)
+ * @fh: V4L2 file handle
+ * @dev: device context
+ * @dec: selected decoder context for this instance
+ * @ipc_ctx: context of IPC communication with firmware
+ * @state: instance state
+ * @frame_num: frame number
+ * @au_num: access unit number
+ * @max_au_size: max size of an access unit
+ * @streaminfo: stream information (width, height, dpb, interlacing...)
+ * @frameinfo: frame information (width, height, format, alignment...)
+ * @nb_of_frames: number of frames available for decoding
+ * @frames: array of decoding frames to keep track of frame
+ * state and manage frame recycling
+ * @decoded_frames: nb of decoded frames from opening
+ * @output_frames: nb of output frames from opening
+ * @dropped_frames: nb of frames dropped (ie access unit not parsed
+ * or frame decoded but not output)
+ * @stream_errors: nb of stream errors (corrupted, not supported, ...)
+ * @decode_errors: nb of decode errors (firmware error)
+ * @sys_errors: nb of system errors (memory, ipc, ...)
+ * @dts: FIFO of decoding timestamp.
+ * output frames are timestamped with incoming access
+ * unit timestamps using this fifo.
+ * @name: string naming this instance (debug purpose)
+ * @run_work: decoding work
+ * @lock: lock for decoding work serialization
+ * @aborting: true if current job aborted
+ * @priv: private decoder context for this instance, allocated
+ * by decoder @open time.
+ */
+struct delta_ctx {
+ u32 flags;
+ struct v4l2_fh fh;
+ struct delta_dev *dev;
+ const struct delta_dec *dec;
+ struct delta_ipc_ctx ipc_ctx;
+
+ enum delta_state state;
+ u32 frame_num;
+ u32 au_num;
+ size_t max_au_size;
+ struct delta_streaminfo streaminfo;
+ struct delta_frameinfo frameinfo;
+ u32 nb_of_frames;
+ struct delta_frame *frames[DELTA_MAX_FRAMES];
+ u32 decoded_frames;
+ u32 output_frames;
+ u32 dropped_frames;
+ u32 stream_errors;
+ u32 decode_errors;
+ u32 sys_errors;
+ struct list_head dts;
+ char name[100];
+ struct work_struct run_work;
+ struct mutex lock;
+ bool aborting;
+ void *priv;
+};
+
+#define DELTA_FLAG_STREAMINFO 0x0001
+#define DELTA_FLAG_FRAMEINFO 0x0002
+
+#define DELTA_MAX_FORMATS DELTA_MAX_DECODERS
+
+/*
+ * struct delta_dev - device struct, 1 per probe (so single one for
+ * all platform life)
+ *
+ * @v4l2_dev: v4l2 device
+ * @vdev: v4l2 video device
+ * @pdev: platform device
+ * @dev: device
+ * @m2m_dev: memory-to-memory V4L2 device
+ * @lock: device lock, for crit section & V4L2 ops serialization.
+ * @clk_delta: delta main clock
+ * @clk_st231: st231 coprocessor main clock
+ * @clk_flash_promip: flash promip clock
+ * @decoders: list of registered decoders
+ * @nb_of_decoders: nb of registered decoders
+ * @pixelformats: supported uncompressed video formats
+ * @nb_of_pixelformats: number of supported umcompressed video formats
+ * @streamformats: supported compressed video formats
+ * @nb_of_streamformats:number of supported compressed video formats
+ * @instance_id: rolling counter identifying an instance (debug purpose)
+ * @work_queue: decoding job work queue
+ * @rpmsg_driver: rpmsg IPC driver
+ * @rpmsg_device: rpmsg IPC device
+ */
+struct delta_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct mutex lock;
+ struct clk *clk_delta;
+ struct clk *clk_st231;
+ struct clk *clk_flash_promip;
+ const struct delta_dec *decoders[DELTA_MAX_DECODERS];
+ u32 nb_of_decoders;
+ u32 pixelformats[DELTA_MAX_FORMATS];
+ u32 nb_of_pixelformats;
+ u32 streamformats[DELTA_MAX_FORMATS];
+ u32 nb_of_streamformats;
+ u8 instance_id;
+ struct workqueue_struct *work_queue;
+ struct rpmsg_driver rpmsg_driver;
+ struct rpmsg_device *rpmsg_device;
+};
+
+static inline char *frame_type_str(u32 flags)
+{
+ if (flags & V4L2_BUF_FLAG_KEYFRAME)
+ return "I";
+ if (flags & V4L2_BUF_FLAG_PFRAME)
+ return "P";
+ if (flags & V4L2_BUF_FLAG_BFRAME)
+ return "B";
+ if (flags & V4L2_BUF_FLAG_LAST)
+ return "EOS";
+ return "?";
+}
+
+static inline char *frame_field_str(enum v4l2_field field)
+{
+ if (field == V4L2_FIELD_NONE)
+ return "-";
+ if (field == V4L2_FIELD_TOP)
+ return "T";
+ if (field == V4L2_FIELD_BOTTOM)
+ return "B";
+ if (field == V4L2_FIELD_INTERLACED)
+ return "I";
+ if (field == V4L2_FIELD_INTERLACED_TB)
+ return "TB";
+ if (field == V4L2_FIELD_INTERLACED_BT)
+ return "BT";
+ return "?";
+}
+
+static inline char *frame_state_str(u32 state, char *str, unsigned int len)
+{
+ snprintf(str, len, "%s %s %s %s %s %s",
+ (state & DELTA_FRAME_REF) ? "ref" : " ",
+ (state & DELTA_FRAME_BSY) ? "bsy" : " ",
+ (state & DELTA_FRAME_DEC) ? "dec" : " ",
+ (state & DELTA_FRAME_OUT) ? "out" : " ",
+ (state & DELTA_FRAME_M2M) ? "m2m" : " ",
+ (state & DELTA_FRAME_RDY) ? "rdy" : " ");
+ return str;
+}
+
+int delta_get_frameinfo_default(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+int delta_recycle_default(struct delta_ctx *pctx,
+ struct delta_frame *frame);
+
+int delta_get_free_frame(struct delta_ctx *ctx,
+ struct delta_frame **pframe);
+
+int delta_get_sync(struct delta_ctx *ctx);
+void delta_put_autosuspend(struct delta_ctx *ctx);
+
+#endif /* DELTA_H */
diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
index ffb69cebaef3..e3ebe968472d 100644
--- a/drivers/media/platform/sti/hva/Makefile
+++ b/drivers/media/platform/sti/hva/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
+st-hva-$(CONFIG_VIDEO_STI_HVA_DEBUGFS) += hva-debugfs.o
diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/sti/hva/hva-debugfs.c
new file mode 100644
index 000000000000..83a6258a155b
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-debugfs.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/debugfs.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+static void format_ctx(struct seq_file *s, struct hva_ctx *ctx)
+{
+ struct hva_streaminfo *stream = &ctx->streaminfo;
+ struct hva_frameinfo *frame = &ctx->frameinfo;
+ struct hva_controls *ctrls = &ctx->ctrls;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ u32 bitrate_mode, aspect, entropy, vui_sar, sei_fp;
+
+ seq_printf(s, "|-%s\n |\n", ctx->name);
+
+ seq_printf(s, " |-[%sframe info]\n",
+ ctx->flags & HVA_FLAG_FRAMEINFO ? "" : "default ");
+ seq_printf(s, " | |- pixel format=%4.4s\n"
+ " | |- wxh=%dx%d\n"
+ " | |- wxh (w/ encoder alignment constraint)=%dx%d\n"
+ " |\n",
+ (char *)&frame->pixelformat,
+ frame->width, frame->height,
+ frame->aligned_width, frame->aligned_height);
+
+ seq_printf(s, " |-[%sstream info]\n",
+ ctx->flags & HVA_FLAG_STREAMINFO ? "" : "default ");
+ seq_printf(s, " | |- stream format=%4.4s\n"
+ " | |- wxh=%dx%d\n"
+ " | |- %s\n"
+ " | |- %s\n"
+ " |\n",
+ (char *)&stream->streamformat,
+ stream->width, stream->height,
+ stream->profile, stream->level);
+
+ bitrate_mode = V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
+ aspect = V4L2_CID_MPEG_VIDEO_ASPECT;
+ seq_puts(s, " |-[parameters]\n");
+ seq_printf(s, " | |- %s\n"
+ " | |- bitrate=%d bps\n"
+ " | |- GOP size=%d\n"
+ " | |- video aspect=%s\n"
+ " | |- framerate=%d/%d\n",
+ v4l2_ctrl_get_menu(bitrate_mode)[ctrls->bitrate_mode],
+ ctrls->bitrate,
+ ctrls->gop_size,
+ v4l2_ctrl_get_menu(aspect)[ctrls->aspect],
+ ctrls->time_per_frame.denominator,
+ ctrls->time_per_frame.numerator);
+
+ entropy = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE;
+ vui_sar = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC;
+ sei_fp = V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE;
+ if (stream->streamformat == V4L2_PIX_FMT_H264) {
+ seq_printf(s, " | |- %s entropy mode\n"
+ " | |- CPB size=%d kB\n"
+ " | |- DCT8x8 enable=%s\n"
+ " | |- qpmin=%d\n"
+ " | |- qpmax=%d\n"
+ " | |- PAR enable=%s\n"
+ " | |- PAR id=%s\n"
+ " | |- SEI frame packing enable=%s\n"
+ " | |- SEI frame packing type=%s\n",
+ v4l2_ctrl_get_menu(entropy)[ctrls->entropy_mode],
+ ctrls->cpb_size,
+ ctrls->dct8x8 ? "true" : "false",
+ ctrls->qpmin,
+ ctrls->qpmax,
+ ctrls->vui_sar ? "true" : "false",
+ v4l2_ctrl_get_menu(vui_sar)[ctrls->vui_sar_idc],
+ ctrls->sei_fp ? "true" : "false",
+ v4l2_ctrl_get_menu(sei_fp)[ctrls->sei_fp_type]);
+ }
+
+ if (ctx->sys_errors || ctx->encode_errors || ctx->frame_errors) {
+ seq_puts(s, " |\n |-[errors]\n");
+ seq_printf(s, " | |- system=%d\n"
+ " | |- encoding=%d\n"
+ " | |- frame=%d\n",
+ ctx->sys_errors,
+ ctx->encode_errors,
+ ctx->frame_errors);
+ }
+
+ seq_puts(s, " |\n |-[performances]\n");
+ seq_printf(s, " | |- frames encoded=%d\n"
+ " | |- avg HW processing duration (0.1ms)=%d [min=%d, max=%d]\n"
+ " | |- avg encoding period (0.1ms)=%d [min=%d, max=%d]\n"
+ " | |- avg fps (0.1Hz)=%d\n"
+ " | |- max reachable fps (0.1Hz)=%d\n"
+ " | |- avg bitrate (kbps)=%d [min=%d, max=%d]\n"
+ " | |- last bitrate (kbps)=%d\n",
+ dbg->cnt_duration,
+ dbg->avg_duration,
+ dbg->min_duration,
+ dbg->max_duration,
+ dbg->avg_period,
+ dbg->min_period,
+ dbg->max_period,
+ dbg->avg_fps,
+ dbg->max_fps,
+ dbg->avg_bitrate,
+ dbg->min_bitrate,
+ dbg->max_bitrate,
+ dbg->last_bitrate);
+}
+
+/*
+ * performance debug info
+ */
+void hva_dbg_perf_begin(struct hva_ctx *ctx)
+{
+ u64 div;
+ u32 period;
+ u32 bitrate;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ ktime_t prev = dbg->begin;
+
+ dbg->begin = ktime_get();
+
+ if (dbg->is_valid_period) {
+ /* encoding period */
+ div = (u64)ktime_us_delta(dbg->begin, prev);
+ do_div(div, 100);
+ period = (u32)div;
+ dbg->min_period = min(period, dbg->min_period);
+ dbg->max_period = max(period, dbg->max_period);
+ dbg->total_period += period;
+ dbg->cnt_period++;
+
+ /*
+ * minimum and maximum bitrates are based on the
+ * encoding period values upon a window of 32 samples
+ */
+ dbg->window_duration += period;
+ dbg->cnt_window++;
+ if (dbg->cnt_window >= 32) {
+ /*
+ * bitrate in kbps = (size * 8 / 1000) /
+ * (duration / 10000)
+ * = size * 80 / duration
+ */
+ if (dbg->window_duration > 0) {
+ div = (u64)dbg->window_stream_size * 80;
+ do_div(div, dbg->window_duration);
+ bitrate = (u32)div;
+ dbg->last_bitrate = bitrate;
+ dbg->min_bitrate = min(bitrate,
+ dbg->min_bitrate);
+ dbg->max_bitrate = max(bitrate,
+ dbg->max_bitrate);
+ }
+ dbg->window_stream_size = 0;
+ dbg->window_duration = 0;
+ dbg->cnt_window = 0;
+ }
+ }
+
+ /*
+ * filter sequences valid for performance:
+ * - begin/begin (no stream available) is an invalid sequence
+ * - begin/end is a valid sequence
+ */
+ dbg->is_valid_period = false;
+}
+
+void hva_dbg_perf_end(struct hva_ctx *ctx, struct hva_stream *stream)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ u64 div;
+ u32 duration;
+ u32 bytesused;
+ u32 timestamp;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ ktime_t end = ktime_get();
+
+ /* stream bytesused and timestamp in us */
+ bytesused = vb2_get_plane_payload(&stream->vbuf.vb2_buf, 0);
+ div = stream->vbuf.vb2_buf.timestamp;
+ do_div(div, 1000);
+ timestamp = (u32)div;
+
+ /* encoding duration */
+ div = (u64)ktime_us_delta(end, dbg->begin);
+
+ dev_dbg(dev,
+ "%s perf stream[%d] dts=%d encoded using %d bytes in %d us",
+ ctx->name,
+ stream->vbuf.sequence,
+ timestamp,
+ bytesused, (u32)div);
+
+ do_div(div, 100);
+ duration = (u32)div;
+
+ dbg->min_duration = min(duration, dbg->min_duration);
+ dbg->max_duration = max(duration, dbg->max_duration);
+ dbg->total_duration += duration;
+ dbg->cnt_duration++;
+
+ /*
+ * the average bitrate is based on the total stream size
+ * and the total encoding periods
+ */
+ dbg->total_stream_size += bytesused;
+ dbg->window_stream_size += bytesused;
+
+ dbg->is_valid_period = true;
+}
+
+static void hva_dbg_perf_compute(struct hva_ctx *ctx)
+{
+ u64 div;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+
+ if (dbg->cnt_duration > 0) {
+ div = (u64)dbg->total_duration;
+ do_div(div, dbg->cnt_duration);
+ dbg->avg_duration = (u32)div;
+ } else {
+ dbg->avg_duration = 0;
+ }
+
+ if (dbg->total_duration > 0) {
+ div = (u64)dbg->cnt_duration * 100000;
+ do_div(div, dbg->total_duration);
+ dbg->max_fps = (u32)div;
+ } else {
+ dbg->max_fps = 0;
+ }
+
+ if (dbg->cnt_period > 0) {
+ div = (u64)dbg->total_period;
+ do_div(div, dbg->cnt_period);
+ dbg->avg_period = (u32)div;
+ } else {
+ dbg->avg_period = 0;
+ }
+
+ if (dbg->total_period > 0) {
+ div = (u64)dbg->cnt_period * 100000;
+ do_div(div, dbg->total_period);
+ dbg->avg_fps = (u32)div;
+ } else {
+ dbg->avg_fps = 0;
+ }
+
+ if (dbg->total_period > 0) {
+ /*
+ * bitrate in kbps = (video size * 8 / 1000) /
+ * (video duration / 10000)
+ * = video size * 80 / video duration
+ */
+ div = (u64)dbg->total_stream_size * 80;
+ do_div(div, dbg->total_period);
+ dbg->avg_bitrate = (u32)div;
+ } else {
+ dbg->avg_bitrate = 0;
+ }
+}
+
+/*
+ * device debug info
+ */
+
+static int hva_dbg_device(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+
+ seq_printf(s, "[%s]\n", hva->v4l2_dev.name);
+ seq_printf(s, "registered as /dev/video%d\n", hva->vdev->num);
+
+ return 0;
+}
+
+static int hva_dbg_encoders(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+ unsigned int i = 0;
+
+ seq_printf(s, "[encoders]\n|- %d registered encoders:\n",
+ hva->nb_of_encoders);
+
+ while (hva->encoders[i]) {
+ seq_printf(s, "|- %s: %4.4s => %4.4s\n", hva->encoders[i]->name,
+ (char *)&hva->encoders[i]->pixelformat,
+ (char *)&hva->encoders[i]->streamformat);
+ i++;
+ }
+
+ return 0;
+}
+
+static int hva_dbg_last(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+ struct hva_ctx *last_ctx = &hva->dbg.last_ctx;
+
+ if (last_ctx->flags & HVA_FLAG_STREAMINFO) {
+ seq_puts(s, "[last encoding]\n");
+
+ hva_dbg_perf_compute(last_ctx);
+ format_ctx(s, last_ctx);
+ } else {
+ seq_puts(s, "[no information recorded about last encoding]\n");
+ }
+
+ return 0;
+}
+
+static int hva_dbg_regs(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+
+ hva_hw_dump_regs(hva, s);
+
+ return 0;
+}
+
+#define hva_dbg_declare(name) \
+ static int hva_dbg_##name##_open(struct inode *i, struct file *f) \
+ { \
+ return single_open(f, hva_dbg_##name, i->i_private); \
+ } \
+ static const struct file_operations hva_dbg_##name##_fops = { \
+ .open = hva_dbg_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define hva_dbg_create_entry(name) \
+ debugfs_create_file(#name, 0444, hva->dbg.debugfs_entry, hva, \
+ &hva_dbg_##name##_fops)
+
+hva_dbg_declare(device);
+hva_dbg_declare(encoders);
+hva_dbg_declare(last);
+hva_dbg_declare(regs);
+
+void hva_debugfs_create(struct hva_dev *hva)
+{
+ hva->dbg.debugfs_entry = debugfs_create_dir(HVA_NAME, NULL);
+ if (!hva->dbg.debugfs_entry)
+ goto err;
+
+ if (!hva_dbg_create_entry(device))
+ goto err;
+
+ if (!hva_dbg_create_entry(encoders))
+ goto err;
+
+ if (!hva_dbg_create_entry(last))
+ goto err;
+
+ if (!hva_dbg_create_entry(regs))
+ goto err;
+
+ return;
+
+err:
+ hva_debugfs_remove(hva);
+}
+
+void hva_debugfs_remove(struct hva_dev *hva)
+{
+ debugfs_remove_recursive(hva->dbg.debugfs_entry);
+ hva->dbg.debugfs_entry = NULL;
+}
+
+/*
+ * context (instance) debug info
+ */
+
+static int hva_dbg_ctx(struct seq_file *s, void *data)
+{
+ struct hva_ctx *ctx = s->private;
+
+ seq_printf(s, "[running encoding %d]\n", ctx->id);
+
+ hva_dbg_perf_compute(ctx);
+ format_ctx(s, ctx);
+
+ return 0;
+}
+
+hva_dbg_declare(ctx);
+
+void hva_dbg_ctx_create(struct hva_ctx *ctx)
+{
+ struct hva_dev *hva = ctx->hva_dev;
+ char name[4] = "";
+
+ ctx->dbg.min_duration = UINT_MAX;
+ ctx->dbg.min_period = UINT_MAX;
+ ctx->dbg.min_bitrate = UINT_MAX;
+
+ snprintf(name, sizeof(name), "%d", hva->instance_id);
+
+ ctx->dbg.debugfs_entry = debugfs_create_file(name, 0444,
+ hva->dbg.debugfs_entry,
+ ctx, &hva_dbg_ctx_fops);
+}
+
+void hva_dbg_ctx_remove(struct hva_ctx *ctx)
+{
+ struct hva_dev *hva = ctx->hva_dev;
+
+ if (ctx->flags & HVA_FLAG_STREAMINFO)
+ /* save context before removing */
+ memcpy(&hva->dbg.last_ctx, ctx, sizeof(*ctx));
+
+ debugfs_remove(ctx->dbg.debugfs_entry);
+}
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
index 8cc8467c0cd3..e6f247a983c7 100644
--- a/drivers/media/platform/sti/hva/hva-h264.c
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -607,6 +607,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx,
"%s width(%d) or height(%d) exceeds limits (%dx%d)\n",
pctx->name, frame_width, frame_height,
H264_MAX_SIZE_W, H264_MAX_SIZE_H);
+ pctx->frame_errors++;
return -EINVAL;
}
@@ -717,6 +718,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx,
default:
dev_err(dev, "%s invalid source pixel format\n",
pctx->name);
+ pctx->frame_errors++;
return -EINVAL;
}
@@ -741,6 +743,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx,
if (td->framerate_den == 0) {
dev_err(dev, "%s invalid framerate\n", pctx->name);
+ pctx->frame_errors++;
return -EINVAL;
}
@@ -831,6 +834,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx,
(payload > MAX_SPS_PPS_SIZE)) {
dev_err(dev, "%s invalid sps/pps size %d\n", pctx->name,
payload);
+ pctx->frame_errors++;
return -EINVAL;
}
@@ -842,6 +846,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx,
(u8 *)stream->vaddr,
&payload)) {
dev_err(dev, "%s fail to get SEI nal\n", pctx->name);
+ pctx->frame_errors++;
return -EINVAL;
}
@@ -963,6 +968,7 @@ err_seq_info:
err_ctx:
devm_kfree(dev, ctx);
err:
+ pctx->sys_errors++;
return ret;
}
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 68d625b412b6..ec25bdcfa3d1 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -9,6 +9,9 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+#include <linux/seq_file.h>
+#endif
#include "hva.h"
#include "hva-hw.h"
@@ -470,6 +473,7 @@ int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
if (pm_runtime_get_sync(dev) < 0) {
dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
+ ctx->sys_errors++;
ret = -EFAULT;
goto out;
}
@@ -481,6 +485,7 @@ int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
break;
default:
dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
+ ctx->encode_errors++;
ret = -EFAULT;
goto out;
}
@@ -511,6 +516,7 @@ int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
msecs_to_jiffies(2000))) {
dev_err(dev, "%s %s: time out on completion\n", ctx->name,
__func__);
+ ctx->encode_errors++;
ret = -EFAULT;
goto out;
}
@@ -518,6 +524,8 @@ int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
/* get encoding status */
ret = ctx->hw_err ? -EFAULT : 0;
+ ctx->encode_errors += ctx->hw_err ? 1 : 0;
+
out:
disable_irq(hva->irq_its);
disable_irq(hva->irq_err);
@@ -536,3 +544,43 @@ out:
return ret;
}
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+#define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
+ #reg, readl_relaxed(hva->regs + reg))
+
+void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
+{
+ struct device *dev = hva_to_dev(hva);
+
+ mutex_lock(&hva->protect_mutex);
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ seq_puts(s, "Cannot wake up IP\n");
+ mutex_unlock(&hva->protect_mutex);
+ return;
+ }
+
+ seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
+
+ DUMP(HVA_HIF_REG_RST);
+ DUMP(HVA_HIF_REG_RST_ACK);
+ DUMP(HVA_HIF_REG_MIF_CFG);
+ DUMP(HVA_HIF_REG_HEC_MIF_CFG);
+ DUMP(HVA_HIF_REG_CFL);
+ DUMP(HVA_HIF_REG_SFL);
+ DUMP(HVA_HIF_REG_LMI_ERR);
+ DUMP(HVA_HIF_REG_EMI_ERR);
+ DUMP(HVA_HIF_REG_HEC_MIF_ERR);
+ DUMP(HVA_HIF_REG_HEC_STS);
+ DUMP(HVA_HIF_REG_HVC_STS);
+ DUMP(HVA_HIF_REG_HJE_STS);
+ DUMP(HVA_HIF_REG_CNT);
+ DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
+ DUMP(HVA_HIF_REG_CLK_GATING);
+ DUMP(HVA_HIF_REG_VERSION);
+
+ pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&hva->protect_mutex);
+}
+#endif
diff --git a/drivers/media/platform/sti/hva/hva-hw.h b/drivers/media/platform/sti/hva/hva-hw.h
index efb45b927524..b46017dcfae9 100644
--- a/drivers/media/platform/sti/hva/hva-hw.h
+++ b/drivers/media/platform/sti/hva/hva-hw.h
@@ -38,5 +38,8 @@ int hva_hw_runtime_suspend(struct device *dev);
int hva_hw_runtime_resume(struct device *dev);
int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
struct hva_buffer *task);
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s);
+#endif
#endif /* HVA_HW_H */
diff --git a/drivers/media/platform/sti/hva/hva-mem.c b/drivers/media/platform/sti/hva/hva-mem.c
index 649f66007ad6..821c78ed208c 100644
--- a/drivers/media/platform/sti/hva/hva-mem.c
+++ b/drivers/media/platform/sti/hva/hva-mem.c
@@ -17,14 +17,17 @@ int hva_mem_alloc(struct hva_ctx *ctx, u32 size, const char *name,
void *base;
b = devm_kzalloc(dev, sizeof(*b), GFP_KERNEL);
- if (!b)
+ if (!b) {
+ ctx->sys_errors++;
return -ENOMEM;
+ }
base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
DMA_ATTR_WRITE_COMBINE);
if (!base) {
dev_err(dev, "%s %s : dma_alloc_attrs failed for %s (size=%d)\n",
ctx->name, __func__, name, size);
+ ctx->sys_errors++;
devm_kfree(dev, b);
return -ENOMEM;
}
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
index 6bf3c8588230..1c4fc33cbcb5 100644
--- a/drivers/media/platform/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -15,8 +15,6 @@
#include "hva.h"
#include "hva-hw.h"
-#define HVA_NAME "st-hva"
-
#define MIN_FRAMES 1
#define MIN_STREAMS 1
@@ -226,6 +224,28 @@ static int hva_open_encoder(struct hva_ctx *ctx, u32 streamformat,
return ret;
}
+static void hva_dbg_summary(struct hva_ctx *ctx)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_streaminfo *stream = &ctx->streaminfo;
+ struct hva_frameinfo *frame = &ctx->frameinfo;
+
+ if (!(ctx->flags & HVA_FLAG_STREAMINFO))
+ return;
+
+ dev_dbg(dev, "%s %4.4s %dx%d > %4.4s %dx%d %s %s: %d frames encoded, %d system errors, %d encoding errors, %d frame errors\n",
+ ctx->name,
+ (char *)&frame->pixelformat,
+ frame->aligned_width, frame->aligned_height,
+ (char *)&stream->streamformat,
+ stream->width, stream->height,
+ stream->profile, stream->level,
+ ctx->encoded_frames,
+ ctx->sys_errors,
+ ctx->encode_errors,
+ ctx->frame_errors);
+}
+
/*
* V4L2 ioctl operations
*/
@@ -614,19 +634,17 @@ static int hva_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
ctx->ctrls.profile = ctrl->val;
- if (ctx->flags & HVA_FLAG_STREAMINFO)
- snprintf(ctx->streaminfo.profile,
- sizeof(ctx->streaminfo.profile),
- "%s profile",
- v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ snprintf(ctx->streaminfo.profile,
+ sizeof(ctx->streaminfo.profile),
+ "%s profile",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
ctx->ctrls.level = ctrl->val;
- if (ctx->flags & HVA_FLAG_STREAMINFO)
- snprintf(ctx->streaminfo.level,
- sizeof(ctx->streaminfo.level),
- "level %s",
- v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ snprintf(ctx->streaminfo.level,
+ sizeof(ctx->streaminfo.level),
+ "level %s",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
break;
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
ctx->ctrls.entropy_mode = ctrl->val;
@@ -793,6 +811,10 @@ static void hva_run_work(struct work_struct *work)
/* protect instance against reentrancy */
mutex_lock(&ctx->lock);
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_perf_begin(ctx);
+#endif
+
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
@@ -812,6 +834,12 @@ static void hva_run_work(struct work_struct *work)
dst_buf->field = V4L2_FIELD_NONE;
dst_buf->sequence = ctx->stream_num - 1;
+ ctx->encoded_frames++;
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_perf_end(ctx, stream);
+#endif
+
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
}
@@ -1026,6 +1054,8 @@ err:
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
}
+ ctx->sys_errors++;
+
return ret;
}
@@ -1150,6 +1180,7 @@ static int hva_open(struct file *file)
if (ret) {
dev_err(dev, "%s [x:x] failed to setup controls\n",
HVA_PREFIX);
+ ctx->sys_errors++;
goto err_fh;
}
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
@@ -1162,6 +1193,7 @@ static int hva_open(struct file *file)
ret = PTR_ERR(ctx->fh.m2m_ctx);
dev_err(dev, "%s failed to initialize m2m context (%d)\n",
HVA_PREFIX, ret);
+ ctx->sys_errors++;
goto err_ctrls;
}
@@ -1175,6 +1207,10 @@ static int hva_open(struct file *file)
/* default parameters for frame and stream */
set_default_params(ctx);
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_ctx_create(ctx);
+#endif
+
dev_info(dev, "%s encoder instance created\n", ctx->name);
return 0;
@@ -1206,6 +1242,9 @@ static int hva_release(struct file *file)
hva->nb_of_instances--;
}
+ /* trace a summary of instance before closing (debug purpose) */
+ hva_dbg_summary(ctx);
+
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
@@ -1213,6 +1252,10 @@ static int hva_release(struct file *file)
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_ctx_remove(ctx);
+#endif
+
dev_info(dev, "%s encoder instance released\n", ctx->name);
kfree(ctx);
@@ -1337,6 +1380,10 @@ static int hva_probe(struct platform_device *pdev)
goto err_hw;
}
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_create(hva);
+#endif
+
hva->work_queue = create_workqueue(HVA_NAME);
if (!hva->work_queue) {
dev_err(dev, "%s %s failed to allocate work queue\n",
@@ -1358,6 +1405,9 @@ static int hva_probe(struct platform_device *pdev)
err_work_queue:
destroy_workqueue(hva->work_queue);
err_v4l2:
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_remove(hva);
+#endif
v4l2_device_unregister(&hva->v4l2_dev);
err_hw:
hva_hw_remove(hva);
@@ -1376,6 +1426,10 @@ static int hva_remove(struct platform_device *pdev)
hva_hw_remove(hva);
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_remove(hva);
+#endif
+
v4l2_device_unregister(&hva->v4l2_dev);
dev_info(dev, "%s %s removed\n", HVA_PREFIX, pdev->name);
diff --git a/drivers/media/platform/sti/hva/hva.h b/drivers/media/platform/sti/hva/hva.h
index caa580825541..0d749b257a21 100644
--- a/drivers/media/platform/sti/hva/hva.h
+++ b/drivers/media/platform/sti/hva/hva.h
@@ -21,7 +21,8 @@
#define ctx_to_hdev(c) (c->hva_dev)
-#define HVA_PREFIX "[---:----]"
+#define HVA_NAME "st-hva"
+#define HVA_PREFIX "[---:----]"
extern const struct hva_enc nv12h264enc;
extern const struct hva_enc nv21h264enc;
@@ -153,6 +154,61 @@ struct hva_stream {
#define to_hva_stream(vb) \
container_of(vb, struct hva_stream, vbuf)
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+/**
+ * struct hva_ctx_dbg - instance context debug info
+ *
+ * @debugfs_entry: debugfs entry
+ * @is_valid_period: true if the sequence is valid for performance
+ * @begin: start time of last HW task
+ * @total_duration: total HW processing durations in 0.1ms
+ * @cnt_duration: number of HW processings
+ * @min_duration: minimum HW processing duration in 0.1ms
+ * @max_duration: maximum HW processing duration in 0.1ms
+ * @avg_duration: average HW processing duration in 0.1ms
+ * @max_fps: maximum frames encoded per second (in 0.1Hz)
+ * @total_period: total encoding periods in 0.1ms
+ * @cnt_period: number of periods
+ * @min_period: minimum encoding period in 0.1ms
+ * @max_period: maximum encoding period in 0.1ms
+ * @avg_period: average encoding period in 0.1ms
+ * @total_stream_size: total number of encoded bytes
+ * @avg_fps: average frames encoded per second (in 0.1Hz)
+ * @window_duration: duration of the sampling window in 0.1ms
+ * @cnt_window: number of samples in the window
+ * @window_stream_size: number of encoded bytes upon the sampling window
+ * @last_bitrate: bitrate upon the last sampling window
+ * @min_bitrate: minimum bitrate in kbps
+ * @max_bitrate: maximum bitrate in kbps
+ * @avg_bitrate: average bitrate in kbps
+ */
+struct hva_ctx_dbg {
+ struct dentry *debugfs_entry;
+ bool is_valid_period;
+ ktime_t begin;
+ u32 total_duration;
+ u32 cnt_duration;
+ u32 min_duration;
+ u32 max_duration;
+ u32 avg_duration;
+ u32 max_fps;
+ u32 total_period;
+ u32 cnt_period;
+ u32 min_period;
+ u32 max_period;
+ u32 avg_period;
+ u32 total_stream_size;
+ u32 avg_fps;
+ u32 window_duration;
+ u32 cnt_window;
+ u32 window_stream_size;
+ u32 last_bitrate;
+ u32 min_bitrate;
+ u32 max_bitrate;
+ u32 avg_bitrate;
+};
+#endif
+
struct hva_dev;
struct hva_enc;
@@ -182,6 +238,11 @@ struct hva_enc;
* @priv: private codec data for this instance, allocated
* by encoder @open time
* @hw_err: true if hardware error detected
+ * @encoded_frames: number of encoded frames
+ * @sys_errors: number of system errors (memory, resource, pm...)
+ * @encode_errors: number of encoding errors (hw/driver errors)
+ * @frame_errors: number of frame errors (format, size, header...)
+ * @dbg: context debug info
*/
struct hva_ctx {
struct hva_dev *hva_dev;
@@ -207,11 +268,31 @@ struct hva_ctx {
struct hva_enc *enc;
void *priv;
bool hw_err;
+ u32 encoded_frames;
+ u32 sys_errors;
+ u32 encode_errors;
+ u32 frame_errors;
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ struct hva_ctx_dbg dbg;
+#endif
};
#define HVA_FLAG_STREAMINFO 0x0001
#define HVA_FLAG_FRAMEINFO 0x0002
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+/**
+ * struct hva_dev_dbg - device debug info
+ *
+ * @debugfs_entry: debugfs entry
+ * @last_ctx: debug information about last running instance context
+ */
+struct hva_dev_dbg {
+ struct dentry *debugfs_entry;
+ struct hva_ctx last_ctx;
+};
+#endif
+
#define HVA_MAX_INSTANCES 16
#define HVA_MAX_ENCODERS 10
#define HVA_MAX_FORMATS HVA_MAX_ENCODERS
@@ -250,6 +331,7 @@ struct hva_ctx {
* @lmi_err_reg: local memory interface error register value
* @emi_err_reg: external memory interface error register value
* @hec_mif_err_reg: HEC memory interface error register value
+ * @dbg: device debug info
*/
struct hva_dev {
struct v4l2_device v4l2_dev;
@@ -284,6 +366,9 @@ struct hva_dev {
u32 lmi_err_reg;
u32 emi_err_reg;
u32 hec_mif_err_reg;
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ struct hva_dev_dbg dbg;
+#endif
};
/**
@@ -312,4 +397,13 @@ struct hva_enc {
struct hva_stream *stream);
};
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+void hva_debugfs_create(struct hva_dev *hva);
+void hva_debugfs_remove(struct hva_dev *hva);
+void hva_dbg_ctx_create(struct hva_ctx *ctx);
+void hva_dbg_ctx_remove(struct hva_ctx *ctx);
+void hva_dbg_perf_begin(struct hva_ctx *ctx);
+void hva_dbg_perf_end(struct hva_ctx *ctx, struct hva_stream *stream);
+#endif
+
#endif /* HVA_H */
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 13bfd7184160..23472e3784ff 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -453,7 +453,7 @@ int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
if (ret)
return ret;
- while (vpdma_list_busy(vpdma, list_num) && timeout--)
+ while (vpdma_list_busy(vpdma, list_num) && --timeout)
;
if (timeout == 0) {
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index a98f679bd88d..970b9b6dab25 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -907,6 +907,7 @@ static int vim2m_open(struct file *file)
if (hdl->error) {
rc = hdl->error;
v4l2_ctrl_handler_free(hdl);
+ kfree(ctx);
goto open_unlock;
}
ctx->fh.ctrl_handler = hdl;
@@ -928,6 +929,7 @@ static int vim2m_open(struct file *file)
rc = PTR_ERR(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
kfree(ctx);
goto open_unlock;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index c52dd8787794..a18e6fec219b 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -63,7 +63,7 @@ static const struct vivid_fmt formats_ovl[] = {
};
/* The number of discrete webcam framesizes */
-#define VIVID_WEBCAM_SIZES 4
+#define VIVID_WEBCAM_SIZES 5
/* The number of discrete webcam frameintervals */
#define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
@@ -73,6 +73,7 @@ static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
{ 640, 360 },
{ 1280, 720 },
{ 1920, 1080 },
+ { 3840, 2160 },
};
/*
@@ -80,7 +81,9 @@ static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
* elements in this array as there are in webcam_sizes.
*/
static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
+ { 1, 1 },
{ 1, 2 },
+ { 1, 4 },
{ 1, 5 },
{ 1, 10 },
{ 1, 15 },
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index cd209dccff1b..b4b583f7137a 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -90,7 +90,7 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
if (ret == -ETIMEDOUT)
dev_err(vsp1->dev, "DRM pipeline stop timeout\n");
- media_entity_pipeline_stop(&pipe->output->entity.subdev.entity);
+ media_pipeline_stop(&pipe->output->entity.subdev.entity);
for (i = 0; i < bru->entity.source_pad; ++i) {
vsp1->drm->inputs[i].enabled = false;
@@ -196,7 +196,7 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
if (ret < 0)
return ret;
- ret = media_entity_pipeline_start(&pipe->output->entity.subdev.entity,
+ ret = media_pipeline_start(&pipe->output->entity.subdev.entity,
&pipe->pipe);
if (ret < 0) {
dev_dbg(vsp1->dev, "%s: pipeline start failed\n", __func__);
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 41e8b096dab8..3eaadbf7a876 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -548,20 +548,20 @@ out:
static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
struct vsp1_video *video)
{
- struct media_entity_graph graph;
+ struct media_graph graph;
struct media_entity *entity = &video->video.entity;
struct media_device *mdev = entity->graph_obj.mdev;
unsigned int i;
int ret;
/* Walk the graph to locate the entities and video nodes. */
- ret = media_entity_graph_walk_init(&graph, mdev);
+ ret = media_graph_walk_init(&graph, mdev);
if (ret)
return ret;
- media_entity_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph))) {
+ while ((entity = media_graph_walk_next(&graph))) {
struct v4l2_subdev *subdev;
struct vsp1_rwpf *rwpf;
struct vsp1_entity *e;
@@ -590,7 +590,7 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
}
}
- media_entity_graph_walk_cleanup(&graph);
+ media_graph_walk_cleanup(&graph);
/* We need one output and at least one input. */
if (pipe->num_inputs == 0 || !pipe->output)
@@ -848,7 +848,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
}
mutex_unlock(&pipe->lock);
- media_entity_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(&video->video.entity);
vsp1_video_pipeline_put(pipe);
/* Remove all buffers from the IRQ queue. */
@@ -980,7 +980,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return PTR_ERR(pipe);
}
- ret = __media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
if (ret < 0) {
mutex_unlock(&mdev->graph_mutex);
goto err_pipe;
@@ -1003,7 +1003,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return 0;
err_stop:
- media_entity_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(&video->video.entity);
err_pipe:
vsp1_video_pipeline_put(pipe);
return ret;
@@ -1021,6 +1021,7 @@ static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_streamon = vsp1_video_streamon,
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 1d5836c3fb7a..522cdfdd3345 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -177,7 +177,7 @@ done:
static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
struct xvip_dma *start)
{
- struct media_entity_graph graph;
+ struct media_graph graph;
struct media_entity *entity = &start->video.entity;
struct media_device *mdev = entity->graph_obj.mdev;
unsigned int num_inputs = 0;
@@ -187,15 +187,15 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
mutex_lock(&mdev->graph_mutex);
/* Walk the graph to locate the video nodes. */
- ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
+ ret = media_graph_walk_init(&graph, mdev);
if (ret) {
mutex_unlock(&mdev->graph_mutex);
return ret;
}
- media_entity_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph))) {
+ while ((entity = media_graph_walk_next(&graph))) {
struct xvip_dma *dma;
if (entity->function != MEDIA_ENT_F_IO_V4L)
@@ -213,7 +213,7 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
mutex_unlock(&mdev->graph_mutex);
- media_entity_graph_walk_cleanup(&graph);
+ media_graph_walk_cleanup(&graph);
/* We need exactly one output and zero or one input. */
if (num_outputs != 1 || num_inputs > 1)
@@ -409,7 +409,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
pipe = dma->video.entity.pipe
? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
- ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe);
+ ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
if (ret < 0)
goto error;
@@ -435,7 +435,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
error_stop:
- media_entity_pipeline_stop(&dma->video.entity);
+ media_pipeline_stop(&dma->video.entity);
error:
/* Give back all queued buffers to videobuf2. */
@@ -463,7 +463,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
/* Cleanup the pipeline and mark it as being stopped. */
xvip_pipeline_cleanup(pipe);
- media_entity_pipeline_stop(&dma->video.entity);
+ media_pipeline_stop(&dma->video.entity);
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index 2ec1f6c4b274..9c49d1d10bee 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -460,21 +460,21 @@ static const struct v4l2_ctrl_ops xtpg_ctrl_ops = {
.s_ctrl = xtpg_s_ctrl,
};
-static struct v4l2_subdev_core_ops xtpg_core_ops = {
+static const struct v4l2_subdev_core_ops xtpg_core_ops = {
};
-static struct v4l2_subdev_video_ops xtpg_video_ops = {
+static const struct v4l2_subdev_video_ops xtpg_video_ops = {
.s_stream = xtpg_s_stream,
};
-static struct v4l2_subdev_pad_ops xtpg_pad_ops = {
+static const struct v4l2_subdev_pad_ops xtpg_pad_ops = {
.enum_mbus_code = xvip_enum_mbus_code,
.enum_frame_size = xtpg_enum_frame_size,
.get_fmt = xtpg_get_format,
.set_fmt = xtpg_set_format,
};
-static struct v4l2_subdev_ops xtpg_ops = {
+static const struct v4l2_subdev_ops xtpg_ops = {
.core = &xtpg_core_ops,
.video = &xtpg_video_ops,
.pad = &xtpg_pad_ops,
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 2262b8139ca1..53bc8c010035 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -28,10 +28,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 82affaedf067..cbaf850f4791 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -309,9 +309,7 @@ static void cadet_handler(unsigned long data)
/*
* Clean up and exit
*/
- init_timer(&dev->readtimer);
- dev->readtimer.function = cadet_handler;
- dev->readtimer.data = data;
+ setup_timer(&dev->readtimer, cadet_handler, data);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
@@ -320,9 +318,7 @@ static void cadet_start_rds(struct cadet *dev)
{
dev->rdsstat = 1;
outb(0x80, dev->io); /* Select RDS fifo */
- init_timer(&dev->readtimer);
- dev->readtimer.function = cadet_handler;
- dev->readtimer.data = (unsigned long)dev;
+ setup_timer(&dev->readtimer, cadet_handler, (unsigned long)dev);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index c309ee45a08e..7312e469e850 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -13,11 +13,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/radio-isa.h b/drivers/media/radio/radio-isa.h
index ba4c01f1bd0c..bab414919cf0 100644
--- a/drivers/media/radio/radio-isa.h
+++ b/drivers/media/radio/radio-isa.h
@@ -13,11 +13,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef _RADIO_ISA_H_
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index 0c5d2db3b828..53a7c2e87762 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* kernel includes */
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c
index b3000ef85ee7..c2010a905a47 100644
--- a/drivers/media/radio/radio-ma901.c
+++ b/drivers/media/radio/radio-ma901.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index c2927fd12615..95c12532e87a 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index 85667a95f003..23971f5502a8 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index 0e65a85d52c6..b50638ec5f09 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index a1930b300c06..9db8331a0c75 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* History:
* 2008-12-06 Fabio Belavenuto <belavenuto@gmail.com>
* initial code
diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
index 83fe7ab358df..04ed1a5d1177 100644
--- a/drivers/media/radio/radio-tea5777.c
+++ b/drivers/media/radio/radio-tea5777.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/delay.h>
diff --git a/drivers/media/radio/radio-tea5777.h b/drivers/media/radio/radio-tea5777.h
index 4bd942526a1b..6b5af3c8457b 100644
--- a/drivers/media/radio/radio-tea5777.h
+++ b/drivers/media/radio/radio-tea5777.h
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/videodev2.h>
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index a82eb9678d6c..fc4d9a73ab17 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/io.h>
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 9ce4b12299b4..7240223dc15a 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index ba8e357ba0a2..bf9eced906db 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 1d827adab7eb..cd76facc22f5 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 9b81969d76b5..b3034f80163f 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 1add136d37a3..571f29a34bf8 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 6c0ca900702e..7d2defd9d399 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index 6c7597383ca2..6f93ef1249a6 100644
--- a/drivers/media/radio/si4713/radio-platform-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index bc2a8b5442ae..60f026a58076 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/completion.h>
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 9f879f0ec0ef..ed210f4c476a 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index dd203de5de95..1ff2eec4ed52 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _FM_DRV_H
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 4be07656fbc0..74a1b3ecb30a 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -26,10 +26,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/wl128x/fmdrv_common.h b/drivers/media/radio/wl128x/fmdrv_common.h
index d9b9c6cf83b4..7f1514eb1c07 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.h
+++ b/drivers/media/radio/wl128x/fmdrv_common.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _FMDRV_COMMON_H
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index e7455f82fadc..f689adc831ce 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "fmdrv.h"
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.h b/drivers/media/radio/wl128x/fmdrv_rx.h
index 23922188882f..f647c9bc796a 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.h
+++ b/drivers/media/radio/wl128x/fmdrv_rx.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _FMDRV_RX_H
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.c b/drivers/media/radio/wl128x/fmdrv_tx.c
index 839970b0f313..47ac19466ed2 100644
--- a/drivers/media/radio/wl128x/fmdrv_tx.c
+++ b/drivers/media/radio/wl128x/fmdrv_tx.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/delay.h>
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.h b/drivers/media/radio/wl128x/fmdrv_tx.h
index 11ae2e4c2d03..95e4daf7ba43 100644
--- a/drivers/media/radio/wl128x/fmdrv_tx.h
+++ b/drivers/media/radio/wl128x/fmdrv_tx.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _FMDRV_TX_H
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index fb42f0fd0c1f..71423f45c05c 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -22,10 +22,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/export.h>
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.h b/drivers/media/radio/wl128x/fmdrv_v4l2.h
index 0ba79d745e2f..9babb4ab2fad 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.h
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _FMDRV_V4L2_H
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 629e8ca15ab3..d1d3fd00ed89 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -235,6 +235,17 @@ config IR_MESON
To compile this driver as a module, choose M here: the
module will be called meson-ir.
+config IR_MTK
+ tristate "Mediatek IR remote receiver"
+ depends on RC_CORE
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ ---help---
+ Say Y if you want to use the IR remote receiver available
+ on Mediatek SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-cir.
+
config IR_NUVOTON
tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP
@@ -261,6 +272,15 @@ config IR_REDRAT3
To compile this driver as a module, choose M here: the
module will be called redrat3.
+config IR_SPI
+ tristate "SPI connected IR LED"
+ depends on SPI && LIRC
+ ---help---
+ Say Y if you want to use an IR LED connected through SPI bus.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ir-spi.
+
config IR_STREAMZAP
tristate "Streamzap PC Remote IR Receiver"
depends on USB_ARCH_HAS_HCD
@@ -336,7 +356,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
- depends on OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS && LIRC
+ depends on (OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS || COMPILE_TEST) && RC_CORE
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 3a984ee301e2..679aa0af85cd 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
obj-$(CONFIG_IR_ENE) += ene_ir.o
obj-$(CONFIG_IR_REDRAT3) += redrat3.o
obj-$(CONFIG_IR_RX51) += ir-rx51.o
+obj-$(CONFIG_IR_SPI) += ir-spi.o
obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o
obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o
@@ -38,3 +39,4 @@ obj-$(CONFIG_RC_ST) += st_rc.o
obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
obj-$(CONFIG_IR_IMG) += img-ir/
obj-$(CONFIG_IR_SERIAL) += serial_ir.o
+obj-$(CONFIG_IR_MTK) += mtk-cir.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 0884b7dc0e71..9cf3e69de16a 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -36,10 +36,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
* Hardware & software notes
@@ -764,7 +760,6 @@ static void ati_remote_rc_init(struct ati_remote *ati_remote)
struct rc_dev *rdev = ati_remote->rdev;
rdev->priv = ati_remote;
- rdev->driver_type = RC_DRIVER_SCANCODE;
rdev->allowed_protocols = RC_BIT_OTHER;
rdev->driver_name = "ati_remote";
@@ -851,7 +846,7 @@ static int ati_remote_probe(struct usb_interface *interface,
}
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
- rc_dev = rc_allocate_device();
+ rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!ati_remote || !rc_dev)
goto exit_free_dev_rdev;
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index bd5512e64aea..60da963f40dc 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -13,11 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
* Special thanks to:
* Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore
* bringing to life support for transmission & learning mode.
@@ -1012,7 +1007,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* allocate memory */
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!dev || !rdev)
goto exit_free_dev_rdev;
@@ -1058,8 +1053,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!dev->hw_learning_and_tx_capable)
learning_mode_force = false;
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rdev->priv = dev;
rdev->open = ene_open;
rdev->close = ene_close;
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index a7911e3b9bc0..494646b2a284 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -12,11 +12,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
#include <linux/spinlock.h>
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index ecab69ea3d51..0d3562712f27 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -16,11 +16,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -492,7 +487,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
return ret;
/* input device for IR remote (and tx) */
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
goto exit_free_dev_rdev;
@@ -534,8 +529,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* Set up the rc device */
rdev->priv = fintek;
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rdev->open = fintek_open;
rdev->close = fintek_close;
rdev->input_name = FINTEK_DESCRIPTION;
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
index b698f3d2ced9..ac34a774d018 100644
--- a/drivers/media/rc/fintek-cir.h
+++ b/drivers/media/rc/fintek-cir.h
@@ -16,11 +16,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
#include <linux/spinlock.h>
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 5b63b1f15cb1..4a4895e4d599 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -143,14 +143,13 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
if (!gpio_dev)
return -ENOMEM;
- rcdev = rc_allocate_device();
+ rcdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rcdev) {
rc = -ENOMEM;
goto err_allocate_device;
}
rcdev->priv = gpio_dev;
- rcdev->driver_type = RC_DRIVER_IR_RAW;
rcdev->input_name = GPIO_IR_DEVICE_NAME;
rcdev->input_phys = GPIO_IR_DEVICE_NAME "/input0";
rcdev->input_id.bustype = BUS_HOST;
@@ -165,7 +164,7 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
if (pdata->allowed_protos)
rcdev->allowed_protocols = pdata->allowed_protos;
else
- rcdev->allowed_protocols = RC_BIT_ALL;
+ rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index 5cf983be07a2..0f0ed4ea4d06 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -190,7 +190,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
usb_make_path(udev, ir->phys, sizeof(ir->phys));
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc)
goto fail;
@@ -198,13 +198,12 @@ static int igorplugusb_probe(struct usb_interface *intf,
rc->input_phys = ir->phys;
usb_to_input_id(udev, &rc->input_id);
rc->dev.parent = &intf->dev;
- rc->driver_type = RC_DRIVER_IR_RAW;
/*
* This device can only store 36 pulses + spaces, which is not enough
* for the NEC protocol and many others.
*/
- rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_NECX |
- RC_BIT_NEC32 | RC_BIT_RC6_6A_20 |
+ rc->allowed_protocols = RC_BIT_ALL_IR_DECODER & ~(RC_BIT_NEC |
+ RC_BIT_NECX | RC_BIT_NEC32 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE |
RC_BIT_SONY20 | RC_BIT_MCE_KBD | RC_BIT_SANYO);
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 5f634545ddd8..ccf24fd7ec1b 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/device.h>
@@ -431,7 +427,7 @@ static int iguanair_probe(struct usb_interface *intf,
struct usb_host_interface *idesc;
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir || !rc) {
ret = -ENOMEM;
goto out;
@@ -494,8 +490,7 @@ static int iguanair_probe(struct usb_interface *intf,
rc->input_phys = ir->phys;
usb_to_input_id(ir->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
- rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protocols = RC_BIT_ALL;
+ rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rc->priv = ir;
rc->open = iguanair_open;
rc->close = iguanair_close;
@@ -504,7 +499,9 @@ static int iguanair_probe(struct usb_interface *intf,
rc->tx_ir = iguanair_tx;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_RC6_MCE;
- rc->timeout = MS_TO_NS(100);
+ rc->min_timeout = 1;
+ rc->timeout = IR_DEFAULT_TIMEOUT;
+ rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rc->rx_resolution = RX_RESOLUTION;
iguanair_set_tx_carrier(rc, 38000);
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 7bb71bc9f534..431d33b36fb0 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -488,7 +488,15 @@ static int img_ir_set_filter(struct rc_dev *dev, enum rc_filter_type type,
/* convert scancode filter to raw filter */
filter.minlen = 0;
filter.maxlen = ~0;
- ret = hw->decoder->filter(sc_filter, &filter, hw->enabled_protocols);
+ if (type == RC_FILTER_NORMAL) {
+ /* guess scancode from protocol */
+ ret = hw->decoder->filter(sc_filter, &filter,
+ dev->enabled_protocols);
+ } else {
+ /* for wakeup user provided exact protocol variant */
+ ret = hw->decoder->filter(sc_filter, &filter,
+ 1ULL << dev->wakeup_protocol);
+ }
if (ret)
goto unlock;
dev_dbg(priv->dev, "IR raw %sfilter=%016llx & %016llx\n",
@@ -581,6 +589,7 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
/* clear the wakeup scancode filter */
rdev->scancode_wakeup_filter.data = 0;
rdev->scancode_wakeup_filter.mask = 0;
+ rdev->wakeup_protocol = RC_TYPE_UNKNOWN;
/* clear raw filters */
_img_ir_set_filter(priv, NULL);
@@ -685,7 +694,6 @@ success:
if (!hw->decoder || !hw->decoder->filter)
wakeup_protocols = 0;
rdev->allowed_wakeup_protocols = wakeup_protocols;
- rdev->enabled_wakeup_protocols = wakeup_protocols;
return 0;
}
@@ -701,7 +709,6 @@ static void img_ir_set_protocol(struct img_ir_priv *priv, u64 proto)
mutex_lock(&rdev->lock);
rdev->enabled_protocols = proto;
rdev->allowed_wakeup_protocols = proto;
- rdev->enabled_wakeup_protocols = proto;
mutex_unlock(&rdev->lock);
}
@@ -1071,7 +1078,7 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
}
/* Allocate hardware decoder */
- hw->rdev = rdev = rc_allocate_device();
+ hw->rdev = rdev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rdev) {
dev_err(priv->dev, "cannot allocate input device\n");
error = -ENOMEM;
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
index 09314933ea08..044fd42b22a0 100644
--- a/drivers/media/rc/img-ir/img-ir-nec.c
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -11,6 +11,7 @@
#include "img-ir-hw.h"
#include <linux/bitrev.h>
+#include <linux/log2.h>
/* Convert NEC data to a scancode */
static int img_ir_nec_scancode(int len, u64 raw, u64 enabled_protocols,
@@ -62,7 +63,23 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in,
data = in->data & 0xff;
data_m = in->mask & 0xff;
- if ((in->data | in->mask) & 0xff000000) {
+ protocols &= RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32;
+
+ /*
+ * If only one bit is set, we were requested to do an exact
+ * protocol. This should be the case for wakeup filters; for
+ * normal filters, guess the protocol from the scancode.
+ */
+ if (!is_power_of_2(protocols)) {
+ if ((in->data | in->mask) & 0xff000000)
+ protocols = RC_BIT_NEC32;
+ else if ((in->data | in->mask) & 0x00ff0000)
+ protocols = RC_BIT_NECX;
+ else
+ protocols = RC_BIT_NEC;
+ }
+
+ if (protocols == RC_BIT_NEC32) {
/* 32-bit NEC (used by Apple and TiVo remotes) */
/* scan encoding: as transmitted, MSBit = first received bit */
addr = bitrev8(in->data >> 24);
@@ -73,7 +90,7 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in,
data_m = bitrev8(in->mask >> 8);
data_inv = bitrev8(in->data >> 0);
data_inv_m = bitrev8(in->mask >> 0);
- } else if ((in->data | in->mask) & 0x00ff0000) {
+ } else if (protocols == RC_BIT_NECX) {
/* Extended NEC */
/* scan encoding AAaaDD */
addr = (in->data >> 16) & 0xff;
diff --git a/drivers/media/rc/img-ir/img-ir-raw.c b/drivers/media/rc/img-ir/img-ir-raw.c
index 33f37ed87ad2..8d2f8e2006e7 100644
--- a/drivers/media/rc/img-ir/img-ir-raw.c
+++ b/drivers/media/rc/img-ir/img-ir-raw.c
@@ -110,7 +110,7 @@ int img_ir_probe_raw(struct img_ir_priv *priv)
setup_timer(&raw->timer, img_ir_echo_timer, (unsigned long)priv);
/* Allocate raw decoder */
- raw->rdev = rdev = rc_allocate_device();
+ raw->rdev = rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev) {
dev_err(priv->dev, "cannot allocate raw input device\n");
return -ENOMEM;
@@ -118,7 +118,6 @@ int img_ir_probe_raw(struct img_ir_priv *priv)
rdev->priv = priv;
rdev->map_name = RC_MAP_EMPTY;
rdev->input_name = "IMG Infrared Decoder Raw";
- rdev->driver_type = RC_DRIVER_IR_RAW;
/* Register raw decoder */
error = rc_register_device(rdev);
diff --git a/drivers/media/rc/img-ir/img-ir-sony.c b/drivers/media/rc/img-ir/img-ir-sony.c
index 7f7375f82ed6..3fcba271a419 100644
--- a/drivers/media/rc/img-ir/img-ir-sony.c
+++ b/drivers/media/rc/img-ir/img-ir-sony.c
@@ -68,19 +68,29 @@ static int img_ir_sony_filter(const struct rc_scancode_filter *in,
func = (in->data >> 0) & 0x7f;
func_m = (in->mask >> 0) & 0x7f;
- if (subdev & subdev_m) {
+ protocols &= RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20;
+
+ /*
+ * If only one bit is set, we were requested to do an exact
+ * protocol. This should be the case for wakeup filters; for
+ * normal filters, guess the protocol from the scancode.
+ */
+ if (!is_power_of_2(protocols)) {
+ if (subdev & subdev_m)
+ protocols = RC_BIT_SONY20;
+ else if (dev & dev_m & 0xe0)
+ protocols = RC_BIT_SONY15;
+ else
+ protocols = RC_BIT_SONY12;
+ }
+
+ if (protocols == RC_BIT_SONY20) {
/* can't encode subdev and higher device bits */
if (dev & dev_m & 0xe0)
return -EINVAL;
- /* subdevice (extended) bits only in 20 bit encoding */
- if (!(protocols & RC_BIT_SONY20))
- return -EINVAL;
len = 20;
dev_m &= 0x1f;
- } else if (dev & dev_m & 0xe0) {
- /* upper device bits only in 15 bit encoding */
- if (!(protocols & RC_BIT_SONY15))
- return -EINVAL;
+ } else if (protocols == RC_BIT_SONY15) {
len = 15;
subdev_m = 0;
} else {
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 0785a24af8fc..89823d24a384 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -20,10 +20,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
@@ -96,6 +92,7 @@ struct imon_usb_dev_descr {
__u16 flags;
#define IMON_NO_FLAGS 0
#define IMON_NEED_20MS_PKT_DELAY 1
+#define IMON_IR_RAW 2
struct imon_panel_key_table key_table[];
};
@@ -126,6 +123,12 @@ struct imon_context {
unsigned char usb_tx_buf[8];
unsigned int send_packet_delay;
+ struct rx_data {
+ int count; /* length of 0 or 1 sequence */
+ int prev_bit; /* logic level of sequence */
+ int initial_space; /* initial space flag */
+ } rx;
+
struct tx_t {
unsigned char data_buf[35]; /* user data buffer */
struct completion finished; /* wait for write to finish */
@@ -328,6 +331,10 @@ static const struct imon_usb_dev_descr imon_DH102 = {
}
};
+static const struct imon_usb_dev_descr imon_ir_raw = {
+ .flags = IMON_IR_RAW,
+};
+
/*
* USB Device ID for iMON USB Control Boards
*
@@ -411,6 +418,18 @@ static struct usb_device_id imon_usb_id_table[] = {
/* device specifics unknown */
{ USB_DEVICE(0x15c2, 0x0046),
.driver_info = (unsigned long)&imon_default_table},
+ /* TriGem iMON (IR only) -- TG_iMON.inf */
+ { USB_DEVICE(0x0aa8, 0x8001),
+ .driver_info = (unsigned long)&imon_ir_raw},
+ /* SoundGraph iMON (IR only) -- sg_imon.inf */
+ { USB_DEVICE(0x04e8, 0xff30),
+ .driver_info = (unsigned long)&imon_ir_raw},
+ /* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */
+ { USB_DEVICE(0x0aa8, 0xffda),
+ .driver_info = (unsigned long)&imon_ir_raw},
+ /* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */
+ { USB_DEVICE(0x15c2, 0xffda),
+ .driver_info = (unsigned long)&imon_ir_raw},
{}
};
@@ -1577,11 +1596,94 @@ static int imon_parse_press_type(struct imon_context *ictx,
/**
* Process the incoming packet
*/
-static void imon_incoming_packet(struct imon_context *ictx,
+/**
+ * Convert bit count to time duration (in us) and submit
+ * the value to lirc_dev.
+ */
+static void submit_data(struct imon_context *context)
+{
+ DEFINE_IR_RAW_EVENT(ev);
+
+ ev.pulse = context->rx.prev_bit;
+ ev.duration = US_TO_NS(context->rx.count * BIT_DURATION);
+ ir_raw_event_store_with_filter(context->rdev, &ev);
+}
+
+/**
+ * Process the incoming packet
+ */
+static void imon_incoming_ir_raw(struct imon_context *context,
struct urb *urb, int intf)
{
int len = urb->actual_length;
unsigned char *buf = urb->transfer_buffer;
+ struct device *dev = context->dev;
+ int octet, bit;
+ unsigned char mask;
+
+ if (len != 8) {
+ dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
+ __func__, len, intf);
+ return;
+ }
+
+ if (debug)
+ dev_info(dev, "raw packet: %*ph\n", len, buf);
+ /*
+ * Translate received data to pulse and space lengths.
+ * Received data is active low, i.e. pulses are 0 and
+ * spaces are 1.
+ *
+ * My original algorithm was essentially similar to
+ * Changwoo Ryu's with the exception that he switched
+ * the incoming bits to active high and also fed an
+ * initial space to LIRC at the start of a new sequence
+ * if the previous bit was a pulse.
+ *
+ * I've decided to adopt his algorithm.
+ */
+
+ if (buf[7] == 1 && context->rx.initial_space) {
+ /* LIRC requires a leading space */
+ context->rx.prev_bit = 0;
+ context->rx.count = 4;
+ submit_data(context);
+ context->rx.count = 0;
+ }
+
+ for (octet = 0; octet < 5; ++octet) {
+ mask = 0x80;
+ for (bit = 0; bit < 8; ++bit) {
+ int curr_bit = !(buf[octet] & mask);
+
+ if (curr_bit != context->rx.prev_bit) {
+ if (context->rx.count) {
+ submit_data(context);
+ context->rx.count = 0;
+ }
+ context->rx.prev_bit = curr_bit;
+ }
+ ++context->rx.count;
+ mask >>= 1;
+ }
+ }
+
+ if (buf[7] == 10) {
+ if (context->rx.count) {
+ submit_data(context);
+ context->rx.count = 0;
+ }
+ context->rx.initial_space = context->rx.prev_bit;
+ }
+
+ ir_raw_event_handle(context->rdev);
+}
+
+static void imon_incoming_scancode(struct imon_context *ictx,
+ struct urb *urb, int intf)
+{
+ int len = urb->actual_length;
+ unsigned char *buf = urb->transfer_buffer;
struct device *dev = ictx->dev;
unsigned long flags;
u32 kc;
@@ -1761,7 +1863,10 @@ static void usb_rx_callback_intf0(struct urb *urb)
break;
case 0:
- imon_incoming_packet(ictx, urb, intfnum);
+ if (ictx->rdev->driver_type == RC_DRIVER_IR_RAW)
+ imon_incoming_ir_raw(ictx, urb, intfnum);
+ else
+ imon_incoming_scancode(ictx, urb, intfnum);
break;
default:
@@ -1802,7 +1907,10 @@ static void usb_rx_callback_intf1(struct urb *urb)
break;
case 0:
- imon_incoming_packet(ictx, urb, intfnum);
+ if (ictx->rdev->driver_type == RC_DRIVER_IR_RAW)
+ imon_incoming_ir_raw(ictx, urb, intfnum);
+ else
+ imon_incoming_scancode(ictx, urb, intfnum);
break;
default:
@@ -1910,11 +2018,14 @@ static void imon_set_display_type(struct imon_context *ictx)
case 0x0041:
case 0x0042:
case 0x0043:
+ case 0x8001:
+ case 0xff30:
configured_display_type = IMON_DISPLAY_TYPE_NONE;
ictx->display_supported = false;
break;
case 0x0036:
case 0x0044:
+ case 0xffda:
default:
configured_display_type = IMON_DISPLAY_TYPE_VFD;
break;
@@ -1939,7 +2050,8 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x88 };
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(ictx->dev_descr->flags & IMON_IR_RAW ?
+ RC_DRIVER_IR_RAW : RC_DRIVER_SCANCODE);
if (!rdev) {
dev_err(ictx->dev, "remote control dev allocation failed\n");
goto out;
@@ -1957,8 +2069,11 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
rdev->dev.parent = ictx->dev;
rdev->priv = ictx;
- rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protocols = RC_BIT_OTHER | RC_BIT_RC6_MCE; /* iMON PAD or MCE */
+ if (ictx->dev_descr->flags & IMON_IR_RAW)
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ else
+ /* iMON PAD or MCE */
+ rdev->allowed_protocols = RC_BIT_OTHER | RC_BIT_RC6_MCE;
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
@@ -1976,7 +2091,8 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
imon_set_display_type(ictx);
- if (ictx->rc_type == RC_BIT_RC6_MCE)
+ if (ictx->rc_type == RC_BIT_RC6_MCE ||
+ ictx->dev_descr->flags & IMON_IR_RAW)
rdev->map_name = RC_MAP_IMON_MCE;
else
rdev->map_name = RC_MAP_IMON_PAD;
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index d26907e684dc..50951f686852 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -229,7 +229,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
return priv->irq;
}
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
return -ENOMEM;
@@ -242,8 +242,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
clk_prepare_enable(priv->clock);
priv->rate = clk_get_rate(priv->clock);
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rdev->priv = priv;
rdev->open = hix5hd2_ir_open;
rdev->close = hix5hd2_ir_close;
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 182402f7b4d1..674bf156edcb 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -170,9 +170,48 @@ out:
return -EINVAL;
}
+static const struct ir_raw_timings_pd ir_jvc_timings = {
+ .header_pulse = JVC_HEADER_PULSE,
+ .header_space = JVC_HEADER_SPACE,
+ .bit_pulse = JVC_BIT_PULSE,
+ .bit_space[0] = JVC_BIT_0_SPACE,
+ .bit_space[1] = JVC_BIT_1_SPACE,
+ .trailer_pulse = JVC_TRAILER_PULSE,
+ .trailer_space = JVC_TRAILER_SPACE,
+ .msb_first = 1,
+};
+
+/**
+ * ir_jvc_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ */
+static int ir_jvc_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_event *e = events;
+ int ret;
+ u32 raw = (bitrev8((scancode >> 8) & 0xff) << 8) |
+ (bitrev8((scancode >> 0) & 0xff) << 0);
+
+ ret = ir_raw_gen_pd(&e, max, &ir_jvc_timings, JVC_NBITS, raw);
+ if (ret < 0)
+ return ret;
+
+ return e - events;
+}
+
static struct ir_raw_handler jvc_handler = {
.protocols = RC_BIT_JVC,
.decode = ir_jvc_decode,
+ .encode = ir_jvc_encode,
};
static int __init ir_jvc_decode_init(void)
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index c3277308a70b..8517d5153fcf 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -204,11 +204,17 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
/* legacy support */
case LIRC_GET_SEND_MODE:
- val = LIRC_CAN_SEND_PULSE & LIRC_CAN_SEND_MASK;
+ if (!dev->tx_ir)
+ return -ENOTTY;
+
+ val = LIRC_MODE_PULSE;
break;
case LIRC_SET_SEND_MODE:
- if (val != (LIRC_MODE_PULSE & LIRC_CAN_SEND_MASK))
+ if (!dev->tx_ir)
+ return -ENOTTY;
+
+ if (val != LIRC_MODE_PULSE)
return -EINVAL;
return 0;
@@ -273,7 +279,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
case LIRC_GET_MIN_TIMEOUT:
if (!dev->max_timeout)
return -ENOSYS;
- val = dev->min_timeout / 1000;
+ val = DIV_ROUND_UP(dev->min_timeout, 1000);
break;
case LIRC_GET_MAX_TIMEOUT:
@@ -341,7 +347,7 @@ static int ir_lirc_register(struct rc_dev *dev)
struct lirc_driver *drv;
struct lirc_buffer *rbuf;
int rc = -ENOMEM;
- unsigned long features;
+ unsigned long features = 0;
drv = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
if (!drv)
@@ -355,7 +361,8 @@ static int ir_lirc_register(struct rc_dev *dev)
if (rc)
goto rbuf_init_failed;
- features = LIRC_CAN_REC_MODE2;
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
+ features |= LIRC_CAN_REC_MODE2;
if (dev->tx_ir) {
features |= LIRC_CAN_SEND_PULSE;
if (dev->s_tx_mask)
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index d80986251ee0..5226d510e847 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -71,7 +71,7 @@ static unsigned char kbd_keycodes[256] = {
KEY_6, KEY_7, KEY_8, KEY_9, KEY_0,
KEY_ENTER, KEY_ESC, KEY_BACKSPACE, KEY_TAB, KEY_SPACE,
KEY_MINUS, KEY_EQUAL, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH,
- KEY_RESERVED, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA,
+ KEY_BACKSLASH, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA,
KEY_DOT, KEY_SLASH, KEY_CAPSLOCK, KEY_F1, KEY_F2,
KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7,
KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12,
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 2a9d155548ab..3ce850314dca 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -170,7 +170,10 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (send_32bits) {
/* NEC transport, but modified protocol, used by at
* least Apple and TiVo remotes */
- scancode = data->bits;
+ scancode = not_address << 24 |
+ address << 16 |
+ not_command << 8 |
+ command;
IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
rc_type = RC_TYPE_NEC32;
} else if ((address ^ not_address) != 0xff) {
@@ -201,9 +204,90 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
return -EINVAL;
}
+/**
+ * ir_nec_scancode_to_raw() - encode an NEC scancode ready for modulation.
+ * @protocol: specific protocol to use
+ * @scancode: a single NEC scancode.
+ * @raw: raw data to be modulated.
+ */
+static u32 ir_nec_scancode_to_raw(enum rc_type protocol, u32 scancode)
+{
+ unsigned int addr, addr_inv, data, data_inv;
+
+ data = scancode & 0xff;
+
+ if (protocol == RC_TYPE_NEC32) {
+ /* 32-bit NEC (used by Apple and TiVo remotes) */
+ /* scan encoding: aaAAddDD */
+ addr_inv = (scancode >> 24) & 0xff;
+ addr = (scancode >> 16) & 0xff;
+ data_inv = (scancode >> 8) & 0xff;
+ } else if (protocol == RC_TYPE_NECX) {
+ /* Extended NEC */
+ /* scan encoding AAaaDD */
+ addr = (scancode >> 16) & 0xff;
+ addr_inv = (scancode >> 8) & 0xff;
+ data_inv = data ^ 0xff;
+ } else {
+ /* Normal NEC */
+ /* scan encoding: AADD */
+ addr = (scancode >> 8) & 0xff;
+ addr_inv = addr ^ 0xff;
+ data_inv = data ^ 0xff;
+ }
+
+ /* raw encoding: ddDDaaAA */
+ return data_inv << 24 |
+ data << 16 |
+ addr_inv << 8 |
+ addr;
+}
+
+static const struct ir_raw_timings_pd ir_nec_timings = {
+ .header_pulse = NEC_HEADER_PULSE,
+ .header_space = NEC_HEADER_SPACE,
+ .bit_pulse = NEC_BIT_PULSE,
+ .bit_space[0] = NEC_BIT_0_SPACE,
+ .bit_space[1] = NEC_BIT_1_SPACE,
+ .trailer_pulse = NEC_TRAILER_PULSE,
+ .trailer_space = NEC_TRAILER_SPACE,
+ .msb_first = 0,
+};
+
+/**
+ * ir_nec_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ */
+static int ir_nec_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_event *e = events;
+ int ret;
+ u32 raw;
+
+ /* Convert a NEC scancode to raw NEC data */
+ raw = ir_nec_scancode_to_raw(protocol, scancode);
+
+ /* Modulate the raw data using a pulse distance modulation */
+ ret = ir_raw_gen_pd(&e, max, &ir_nec_timings, NEC_NBITS, raw);
+ if (ret < 0)
+ return ret;
+
+ return e - events;
+}
+
static struct ir_raw_handler nec_handler = {
.protocols = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32,
.decode = ir_nec_decode,
+ .encode = ir_nec_encode,
};
static int __init ir_nec_decode_init(void)
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index a0fd4e6b2155..fcfedf95def7 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -124,7 +124,7 @@ again:
if (data->is_rc5x && data->count == RC5X_NBITS) {
/* RC5X */
u8 xdata, command, system;
- if (!(dev->enabled_protocols & RC_BIT_RC5X)) {
+ if (!(dev->enabled_protocols & RC_BIT_RC5X_20)) {
data->state = STATE_INACTIVE;
return 0;
}
@@ -132,9 +132,9 @@ again:
command = (data->bits & 0x00FC0) >> 6;
system = (data->bits & 0x1F000) >> 12;
toggle = (data->bits & 0x20000) ? 1 : 0;
- command += (data->bits & 0x01000) ? 0 : 0x40;
+ command += (data->bits & 0x40000) ? 0 : 0x40;
scancode = system << 16 | command << 8 | xdata;
- protocol = RC_TYPE_RC5X;
+ protocol = RC_TYPE_RC5X_20;
} else if (!data->is_rc5x && data->count == RC5_NBITS) {
/* RC5 */
@@ -181,9 +181,106 @@ out:
return -EINVAL;
}
+static const struct ir_raw_timings_manchester ir_rc5_timings = {
+ .leader = RC5_UNIT,
+ .pulse_space_start = 0,
+ .clock = RC5_UNIT,
+ .trailer_space = RC5_UNIT * 10,
+};
+
+static const struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
+ {
+ .leader = RC5_UNIT,
+ .pulse_space_start = 0,
+ .clock = RC5_UNIT,
+ .trailer_space = RC5X_SPACE,
+ },
+ {
+ .clock = RC5_UNIT,
+ .trailer_space = RC5_UNIT * 10,
+ },
+};
+
+static const struct ir_raw_timings_manchester ir_rc5_sz_timings = {
+ .leader = RC5_UNIT,
+ .pulse_space_start = 0,
+ .clock = RC5_UNIT,
+ .trailer_space = RC5_UNIT * 10,
+};
+
+/**
+ * ir_rc5_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol variant to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ * -EINVAL if the scancode is ambiguous or invalid.
+ */
+static int ir_rc5_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ int ret;
+ struct ir_raw_event *e = events;
+ unsigned int data, xdata, command, commandx, system, pre_space_data;
+
+ /* Detect protocol and convert scancode to raw data */
+ if (protocol == RC_TYPE_RC5) {
+ /* decode scancode */
+ command = (scancode & 0x003f) >> 0;
+ commandx = (scancode & 0x0040) >> 6;
+ system = (scancode & 0x1f00) >> 8;
+ /* encode data */
+ data = !commandx << 12 | system << 6 | command;
+
+ /* Modulate the data */
+ ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings,
+ RC5_NBITS, data);
+ if (ret < 0)
+ return ret;
+ } else if (protocol == RC_TYPE_RC5X_20) {
+ /* decode scancode */
+ xdata = (scancode & 0x00003f) >> 0;
+ command = (scancode & 0x003f00) >> 8;
+ commandx = !(scancode & 0x004000);
+ system = (scancode & 0x1f0000) >> 16;
+
+ /* encode data */
+ data = commandx << 18 | system << 12 | command << 6 | xdata;
+
+ /* Modulate the data */
+ pre_space_data = data >> (RC5X_NBITS - CHECK_RC5X_NBITS);
+ ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
+ CHECK_RC5X_NBITS, pre_space_data);
+ if (ret < 0)
+ return ret;
+ ret = ir_raw_gen_manchester(&e, max - (e - events),
+ &ir_rc5x_timings[1],
+ RC5X_NBITS - CHECK_RC5X_NBITS,
+ data);
+ if (ret < 0)
+ return ret;
+ } else if (protocol == RC_TYPE_RC5_SZ) {
+ /* RC5-SZ scancode is raw enough for Manchester as it is */
+ ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
+ RC5_SZ_NBITS, scancode & 0x2fff);
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+
+ return e - events;
+}
+
static struct ir_raw_handler rc5_handler = {
- .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
+ .protocols = RC_BIT_RC5 | RC_BIT_RC5X_20 | RC_BIT_RC5_SZ,
.decode = ir_rc5_decode,
+ .encode = ir_rc5_encode,
};
static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 5cc54c967a80..6fe2268dada0 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -286,11 +286,128 @@ out:
return -EINVAL;
}
+static const struct ir_raw_timings_manchester ir_rc6_timings[4] = {
+ {
+ .leader = RC6_PREFIX_PULSE,
+ .pulse_space_start = 0,
+ .clock = RC6_UNIT,
+ .invert = 1,
+ .trailer_space = RC6_PREFIX_SPACE,
+ },
+ {
+ .clock = RC6_UNIT,
+ .invert = 1,
+ },
+ {
+ .clock = RC6_UNIT * 2,
+ .invert = 1,
+ },
+ {
+ .clock = RC6_UNIT,
+ .invert = 1,
+ .trailer_space = RC6_SUFFIX_SPACE,
+ },
+};
+
+/**
+ * ir_rc6_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ * -EINVAL if the scancode is ambiguous or invalid.
+ */
+static int ir_rc6_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ int ret;
+ struct ir_raw_event *e = events;
+
+ if (protocol == RC_TYPE_RC6_0) {
+ /* Modulate the preamble */
+ ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Modulate the header (Start Bit & Mode-0) */
+ ret = ir_raw_gen_manchester(&e, max - (e - events),
+ &ir_rc6_timings[1],
+ RC6_HEADER_NBITS, (1 << 3));
+ if (ret < 0)
+ return ret;
+
+ /* Modulate Trailer Bit */
+ ret = ir_raw_gen_manchester(&e, max - (e - events),
+ &ir_rc6_timings[2], 1, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Modulate rest of the data */
+ ret = ir_raw_gen_manchester(&e, max - (e - events),
+ &ir_rc6_timings[3], RC6_0_NBITS,
+ scancode);
+ if (ret < 0)
+ return ret;
+
+ } else {
+ int bits;
+
+ switch (protocol) {
+ case RC_TYPE_RC6_MCE:
+ case RC_TYPE_RC6_6A_32:
+ bits = 32;
+ break;
+ case RC_TYPE_RC6_6A_24:
+ bits = 24;
+ break;
+ case RC_TYPE_RC6_6A_20:
+ bits = 20;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Modulate the preamble */
+ ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Modulate the header (Start Bit & Header-version 6 */
+ ret = ir_raw_gen_manchester(&e, max - (e - events),
+ &ir_rc6_timings[1],
+ RC6_HEADER_NBITS, (1 << 3 | 6));
+ if (ret < 0)
+ return ret;
+
+ /* Modulate Trailer Bit */
+ ret = ir_raw_gen_manchester(&e, max - (e - events),
+ &ir_rc6_timings[2], 1, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Modulate rest of the data */
+ ret = ir_raw_gen_manchester(&e, max - (e - events),
+ &ir_rc6_timings[3],
+ bits,
+ scancode);
+ if (ret < 0)
+ return ret;
+ }
+
+ return e - events;
+}
+
static struct ir_raw_handler rc6_handler = {
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
+ .encode = ir_rc6_encode,
};
static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index e6efa8c267a0..49265f02e772 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -15,32 +15,23 @@
*/
#include <linux/clk.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/uaccess.h>
#include <linux/platform_device.h>
-#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/pwm.h>
#include <linux/of.h>
#include <linux/hrtimer.h>
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
+#include <media/rc-core.h>
#include <linux/platform_data/media/ir-rx51.h>
-#define LIRC_RX51_DRIVER_FEATURES (LIRC_CAN_SET_SEND_DUTY_CYCLE | \
- LIRC_CAN_SET_SEND_CARRIER | \
- LIRC_CAN_SEND_PULSE)
-
-#define DRIVER_NAME "lirc_rx51"
-
#define WBUF_LEN 256
-struct lirc_rx51 {
+struct ir_rx51 {
+ struct rc_dev *rcdev;
struct pwm_device *pwm;
struct hrtimer timer;
struct device *dev;
- struct lirc_rx51_platform_data *pdata;
+ struct ir_rx51_platform_data *pdata;
wait_queue_head_t wqueue;
unsigned int freq; /* carrier frequency */
@@ -50,38 +41,37 @@ struct lirc_rx51 {
unsigned long device_is_open;
};
-static inline void lirc_rx51_on(struct lirc_rx51 *lirc_rx51)
+static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
{
- pwm_enable(lirc_rx51->pwm);
+ pwm_enable(ir_rx51->pwm);
}
-static inline void lirc_rx51_off(struct lirc_rx51 *lirc_rx51)
+static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
{
- pwm_disable(lirc_rx51->pwm);
+ pwm_disable(ir_rx51->pwm);
}
-static int init_timing_params(struct lirc_rx51 *lirc_rx51)
+static int init_timing_params(struct ir_rx51 *ir_rx51)
{
- struct pwm_device *pwm = lirc_rx51->pwm;
- int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, lirc_rx51->freq);
+ struct pwm_device *pwm = ir_rx51->pwm;
+ int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
- duty = DIV_ROUND_CLOSEST(lirc_rx51->duty_cycle * period, 100);
+ duty = DIV_ROUND_CLOSEST(ir_rx51->duty_cycle * period, 100);
pwm_config(pwm, duty, period);
return 0;
}
-static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
+static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
{
- struct lirc_rx51 *lirc_rx51 =
- container_of(timer, struct lirc_rx51, timer);
+ struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
ktime_t now;
- if (lirc_rx51->wbuf_index < 0) {
- dev_err_ratelimited(lirc_rx51->dev,
- "BUG wbuf_index has value of %i\n",
- lirc_rx51->wbuf_index);
+ if (ir_rx51->wbuf_index < 0) {
+ dev_err_ratelimited(ir_rx51->dev,
+ "BUG wbuf_index has value of %i\n",
+ ir_rx51->wbuf_index);
goto end;
}
@@ -92,20 +82,20 @@ static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
do {
u64 ns;
- if (lirc_rx51->wbuf_index >= WBUF_LEN)
+ if (ir_rx51->wbuf_index >= WBUF_LEN)
goto end;
- if (lirc_rx51->wbuf[lirc_rx51->wbuf_index] == -1)
+ if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
goto end;
- if (lirc_rx51->wbuf_index % 2)
- lirc_rx51_off(lirc_rx51);
+ if (ir_rx51->wbuf_index % 2)
+ ir_rx51_off(ir_rx51);
else
- lirc_rx51_on(lirc_rx51);
+ ir_rx51_on(ir_rx51);
- ns = 1000 * lirc_rx51->wbuf[lirc_rx51->wbuf_index];
+ ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
hrtimer_add_expires_ns(timer, ns);
- lirc_rx51->wbuf_index++;
+ ir_rx51->wbuf_index++;
now = timer->base->get_time();
@@ -114,203 +104,112 @@ static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
return HRTIMER_RESTART;
end:
/* Stop TX here */
- lirc_rx51_off(lirc_rx51);
- lirc_rx51->wbuf_index = -1;
+ ir_rx51_off(ir_rx51);
+ ir_rx51->wbuf_index = -1;
- wake_up_interruptible(&lirc_rx51->wqueue);
+ wake_up_interruptible(&ir_rx51->wqueue);
return HRTIMER_NORESTART;
}
-static ssize_t lirc_rx51_write(struct file *file, const char *buf,
- size_t n, loff_t *ppos)
+static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
+ unsigned int count)
{
- int count, i;
- struct lirc_rx51 *lirc_rx51 = file->private_data;
+ struct ir_rx51 *ir_rx51 = dev->priv;
- if (n % sizeof(int))
+ if (count > WBUF_LEN)
return -EINVAL;
- count = n / sizeof(int);
- if ((count > WBUF_LEN) || (count % 2 == 0))
- return -EINVAL;
+ memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
/* Wait any pending transfers to finish */
- wait_event_interruptible(lirc_rx51->wqueue, lirc_rx51->wbuf_index < 0);
-
- if (copy_from_user(lirc_rx51->wbuf, buf, n))
- return -EFAULT;
-
- /* Sanity check the input pulses */
- for (i = 0; i < count; i++)
- if (lirc_rx51->wbuf[i] < 0)
- return -EINVAL;
+ wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
- init_timing_params(lirc_rx51);
+ init_timing_params(ir_rx51);
if (count < WBUF_LEN)
- lirc_rx51->wbuf[count] = -1; /* Insert termination mark */
+ ir_rx51->wbuf[count] = -1; /* Insert termination mark */
/*
* Adjust latency requirements so the device doesn't go in too
* deep sleep states
*/
- lirc_rx51->pdata->set_max_mpu_wakeup_lat(lirc_rx51->dev, 50);
+ ir_rx51->pdata->set_max_mpu_wakeup_lat(ir_rx51->dev, 50);
- lirc_rx51_on(lirc_rx51);
- lirc_rx51->wbuf_index = 1;
- hrtimer_start(&lirc_rx51->timer,
- ns_to_ktime(1000 * lirc_rx51->wbuf[0]),
+ ir_rx51_on(ir_rx51);
+ ir_rx51->wbuf_index = 1;
+ hrtimer_start(&ir_rx51->timer,
+ ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
HRTIMER_MODE_REL);
/*
* Don't return back to the userspace until the transfer has
* finished
*/
- wait_event_interruptible(lirc_rx51->wqueue, lirc_rx51->wbuf_index < 0);
+ wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
/* We can sleep again */
- lirc_rx51->pdata->set_max_mpu_wakeup_lat(lirc_rx51->dev, -1);
+ ir_rx51->pdata->set_max_mpu_wakeup_lat(ir_rx51->dev, -1);
- return n;
+ return count;
}
-static long lirc_rx51_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
+static int ir_rx51_open(struct rc_dev *dev)
{
- int result;
- unsigned long value;
- unsigned int ivalue;
- struct lirc_rx51 *lirc_rx51 = filep->private_data;
-
- switch (cmd) {
- case LIRC_GET_SEND_MODE:
- result = put_user(LIRC_MODE_PULSE, (unsigned long *)arg);
- if (result)
- return result;
- break;
-
- case LIRC_SET_SEND_MODE:
- result = get_user(value, (unsigned long *)arg);
- if (result)
- return result;
-
- /* only LIRC_MODE_PULSE supported */
- if (value != LIRC_MODE_PULSE)
- return -ENOSYS;
- break;
-
- case LIRC_GET_REC_MODE:
- result = put_user(0, (unsigned long *) arg);
- if (result)
- return result;
- break;
-
- case LIRC_GET_LENGTH:
- return -ENOSYS;
- break;
-
- case LIRC_SET_SEND_DUTY_CYCLE:
- result = get_user(ivalue, (unsigned int *) arg);
- if (result)
- return result;
-
- if (ivalue <= 0 || ivalue > 100) {
- dev_err(lirc_rx51->dev, ": invalid duty cycle %d\n",
- ivalue);
- return -EINVAL;
- }
-
- lirc_rx51->duty_cycle = ivalue;
- break;
-
- case LIRC_SET_SEND_CARRIER:
- result = get_user(ivalue, (unsigned int *) arg);
- if (result)
- return result;
-
- if (ivalue > 500000 || ivalue < 20000) {
- dev_err(lirc_rx51->dev, ": invalid carrier freq %d\n",
- ivalue);
- return -EINVAL;
- }
-
- lirc_rx51->freq = ivalue;
- break;
-
- case LIRC_GET_FEATURES:
- result = put_user(LIRC_RX51_DRIVER_FEATURES,
- (unsigned long *) arg);
- if (result)
- return result;
- break;
-
- default:
- return -ENOIOCTLCMD;
- }
-
- return 0;
-}
+ struct ir_rx51 *ir_rx51 = dev->priv;
-static int lirc_rx51_open(struct inode *inode, struct file *file)
-{
- struct lirc_rx51 *lirc_rx51 = lirc_get_pdata(file);
- BUG_ON(!lirc_rx51);
-
- file->private_data = lirc_rx51;
-
- if (test_and_set_bit(1, &lirc_rx51->device_is_open))
+ if (test_and_set_bit(1, &ir_rx51->device_is_open))
return -EBUSY;
- lirc_rx51->pwm = pwm_get(lirc_rx51->dev, NULL);
- if (IS_ERR(lirc_rx51->pwm)) {
- int res = PTR_ERR(lirc_rx51->pwm);
+ ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
+ if (IS_ERR(ir_rx51->pwm)) {
+ int res = PTR_ERR(ir_rx51->pwm);
- dev_err(lirc_rx51->dev, "pwm_get failed: %d\n", res);
+ dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
return res;
}
return 0;
}
-static int lirc_rx51_release(struct inode *inode, struct file *file)
+static void ir_rx51_release(struct rc_dev *dev)
{
- struct lirc_rx51 *lirc_rx51 = file->private_data;
-
- hrtimer_cancel(&lirc_rx51->timer);
- lirc_rx51_off(lirc_rx51);
- pwm_put(lirc_rx51->pwm);
+ struct ir_rx51 *ir_rx51 = dev->priv;
- clear_bit(1, &lirc_rx51->device_is_open);
+ hrtimer_cancel(&ir_rx51->timer);
+ ir_rx51_off(ir_rx51);
+ pwm_put(ir_rx51->pwm);
- return 0;
+ clear_bit(1, &ir_rx51->device_is_open);
}
-static struct lirc_rx51 lirc_rx51 = {
+static struct ir_rx51 ir_rx51 = {
.duty_cycle = 50,
.wbuf_index = -1,
};
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .write = lirc_rx51_write,
- .unlocked_ioctl = lirc_rx51_ioctl,
- .read = lirc_dev_fop_read,
- .poll = lirc_dev_fop_poll,
- .open = lirc_rx51_open,
- .release = lirc_rx51_release,
-};
+static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
+{
+ struct ir_rx51 *ir_rx51 = dev->priv;
-static struct lirc_driver lirc_rx51_driver = {
- .name = DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .data = &lirc_rx51,
- .fops = &lirc_fops,
- .owner = THIS_MODULE,
-};
+ ir_rx51->duty_cycle = duty;
+
+ return 0;
+}
+
+static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+{
+ struct ir_rx51 *ir_rx51 = dev->priv;
+
+ if (carrier > 500000 || carrier < 20000)
+ return -EINVAL;
+
+ ir_rx51->freq = carrier;
+
+ return 0;
+}
#ifdef CONFIG_PM
-static int lirc_rx51_suspend(struct platform_device *dev, pm_message_t state)
+static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
{
/*
* In case the device is still open, do not suspend. Normally
@@ -320,34 +219,34 @@ static int lirc_rx51_suspend(struct platform_device *dev, pm_message_t state)
* were in a middle of a transmit. Thus, we defer any suspend
* actions until transmit has completed.
*/
- if (test_and_set_bit(1, &lirc_rx51.device_is_open))
+ if (test_and_set_bit(1, &ir_rx51.device_is_open))
return -EAGAIN;
- clear_bit(1, &lirc_rx51.device_is_open);
+ clear_bit(1, &ir_rx51.device_is_open);
return 0;
}
-static int lirc_rx51_resume(struct platform_device *dev)
+static int ir_rx51_resume(struct platform_device *dev)
{
return 0;
}
#else
-#define lirc_rx51_suspend NULL
-#define lirc_rx51_resume NULL
+#define ir_rx51_suspend NULL
+#define ir_rx51_resume NULL
#endif /* CONFIG_PM */
-static int lirc_rx51_probe(struct platform_device *dev)
+static int ir_rx51_probe(struct platform_device *dev)
{
struct pwm_device *pwm;
+ struct rc_dev *rcdev;
- lirc_rx51_driver.features = LIRC_RX51_DRIVER_FEATURES;
- lirc_rx51.pdata = dev->dev.platform_data;
+ ir_rx51.pdata = dev->dev.platform_data;
- if (!lirc_rx51.pdata) {
+ if (!ir_rx51.pdata) {
dev_err(&dev->dev, "Platform Data is missing\n");
return -ENXIO;
}
@@ -362,51 +261,56 @@ static int lirc_rx51_probe(struct platform_device *dev)
}
/* Use default, in case userspace does not set the carrier */
- lirc_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC);
+ ir_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC);
pwm_put(pwm);
- hrtimer_init(&lirc_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- lirc_rx51.timer.function = lirc_rx51_timer_cb;
+ hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ir_rx51.timer.function = ir_rx51_timer_cb;
- lirc_rx51.dev = &dev->dev;
- lirc_rx51_driver.dev = &dev->dev;
- lirc_rx51_driver.minor = lirc_register_driver(&lirc_rx51_driver);
- init_waitqueue_head(&lirc_rx51.wqueue);
+ ir_rx51.dev = &dev->dev;
- if (lirc_rx51_driver.minor < 0) {
- dev_err(lirc_rx51.dev, ": lirc_register_driver failed: %d\n",
- lirc_rx51_driver.minor);
- return lirc_rx51_driver.minor;
- }
+ rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
+ if (!rcdev)
+ return -ENOMEM;
- return 0;
+ rcdev->priv = &ir_rx51;
+ rcdev->open = ir_rx51_open;
+ rcdev->close = ir_rx51_release;
+ rcdev->tx_ir = ir_rx51_tx;
+ rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
+ rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
+ rcdev->driver_name = KBUILD_MODNAME;
+
+ ir_rx51.rcdev = rcdev;
+
+ return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
}
-static int lirc_rx51_remove(struct platform_device *dev)
+static int ir_rx51_remove(struct platform_device *dev)
{
- return lirc_unregister_driver(lirc_rx51_driver.minor);
+ return 0;
}
-static const struct of_device_id lirc_rx51_match[] = {
+static const struct of_device_id ir_rx51_match[] = {
{
.compatible = "nokia,n900-ir",
},
{},
};
-MODULE_DEVICE_TABLE(of, lirc_rx51_match);
+MODULE_DEVICE_TABLE(of, ir_rx51_match);
-struct platform_driver lirc_rx51_platform_driver = {
- .probe = lirc_rx51_probe,
- .remove = lirc_rx51_remove,
- .suspend = lirc_rx51_suspend,
- .resume = lirc_rx51_resume,
+static struct platform_driver ir_rx51_platform_driver = {
+ .probe = ir_rx51_probe,
+ .remove = ir_rx51_remove,
+ .suspend = ir_rx51_suspend,
+ .resume = ir_rx51_resume,
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = of_match_ptr(lirc_rx51_match),
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(ir_rx51_match),
},
};
-module_platform_driver(lirc_rx51_platform_driver);
+module_platform_driver(ir_rx51_platform_driver);
-MODULE_DESCRIPTION("LIRC TX driver for Nokia RX51");
+MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
MODULE_AUTHOR("Nokia Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index b07d9caebeb1..520bb77dcb62 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -176,9 +176,52 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
return -EINVAL;
}
+static const struct ir_raw_timings_pd ir_sanyo_timings = {
+ .header_pulse = SANYO_HEADER_PULSE,
+ .header_space = SANYO_HEADER_SPACE,
+ .bit_pulse = SANYO_BIT_PULSE,
+ .bit_space[0] = SANYO_BIT_0_SPACE,
+ .bit_space[1] = SANYO_BIT_1_SPACE,
+ .trailer_pulse = SANYO_TRAILER_PULSE,
+ .trailer_space = SANYO_TRAILER_SPACE,
+ .msb_first = 1,
+};
+
+/**
+ * ir_sanyo_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ */
+static int ir_sanyo_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_event *e = events;
+ int ret;
+ u64 raw;
+
+ raw = ((u64)(bitrev16(scancode >> 8) & 0xfff8) << (8 + 8 + 13 - 3)) |
+ ((u64)(bitrev16(~scancode >> 8) & 0xfff8) << (8 + 8 + 0 - 3)) |
+ ((bitrev8(scancode) & 0xff) << 8) |
+ (bitrev8(~scancode) & 0xff);
+
+ ret = ir_raw_gen_pd(&e, max, &ir_sanyo_timings, SANYO_NBITS, raw);
+ if (ret < 0)
+ return ret;
+
+ return e - events;
+}
+
static struct ir_raw_handler sanyo_handler = {
.protocols = RC_BIT_SANYO,
.decode = ir_sanyo_decode,
+ .encode = ir_sanyo_encode,
};
static int __init ir_sanyo_decode_init(void)
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index 317677f06f2c..b47e89e2c1bd 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -173,9 +173,59 @@ static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev)
return -EINVAL;
}
+static const struct ir_raw_timings_pd ir_sharp_timings = {
+ .header_pulse = 0,
+ .header_space = 0,
+ .bit_pulse = SHARP_BIT_PULSE,
+ .bit_space[0] = SHARP_BIT_0_PERIOD,
+ .bit_space[1] = SHARP_BIT_1_PERIOD,
+ .trailer_pulse = SHARP_BIT_PULSE,
+ .trailer_space = SHARP_ECHO_SPACE,
+ .msb_first = 1,
+};
+
+/**
+ * ir_sharp_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ */
+static int ir_sharp_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_event *e = events;
+ int ret;
+ u32 raw;
+
+ raw = (((bitrev8(scancode >> 8) >> 3) << 8) & 0x1f00) |
+ bitrev8(scancode);
+ ret = ir_raw_gen_pd(&e, max, &ir_sharp_timings, SHARP_NBITS,
+ (raw << 2) | 2);
+ if (ret < 0)
+ return ret;
+
+ max -= ret;
+
+ raw = (((bitrev8(scancode >> 8) >> 3) << 8) & 0x1f00) |
+ bitrev8(~scancode);
+ ret = ir_raw_gen_pd(&e, max, &ir_sharp_timings, SHARP_NBITS,
+ (raw << 2) | 1);
+ if (ret < 0)
+ return ret;
+
+ return e - events;
+}
+
static struct ir_raw_handler sharp_handler = {
.protocols = RC_BIT_SHARP,
.decode = ir_sharp_decode,
+ .encode = ir_sharp_encode,
};
static int __init ir_sharp_decode_init(void)
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index baa972c76e0e..355fa8198f5a 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -169,9 +169,57 @@ finish_state_machine:
return 0;
}
+static const struct ir_raw_timings_pl ir_sony_timings = {
+ .header_pulse = SONY_HEADER_PULSE,
+ .bit_space = SONY_BIT_SPACE,
+ .bit_pulse[0] = SONY_BIT_0_PULSE,
+ .bit_pulse[1] = SONY_BIT_1_PULSE,
+ .trailer_space = SONY_TRAILER_SPACE + SONY_BIT_SPACE,
+ .msb_first = 0,
+};
+
+/**
+ * ir_sony_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ */
+static int ir_sony_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_event *e = events;
+ u32 raw, len;
+ int ret;
+
+ if (protocol == RC_TYPE_SONY12) {
+ raw = (scancode & 0x7f) | ((scancode & 0x1f0000) >> 9);
+ len = 12;
+ } else if (protocol == RC_TYPE_SONY15) {
+ raw = (scancode & 0x7f) | ((scancode & 0xff0000) >> 9);
+ len = 15;
+ } else {
+ raw = (scancode & 0x7f) | ((scancode & 0x1f0000) >> 9) |
+ ((scancode & 0xff00) << 4);
+ len = 20;
+ }
+
+ ret = ir_raw_gen_pl(&e, max, &ir_sony_timings, len, raw);
+ if (ret < 0)
+ return ret;
+
+ return e - events;
+}
+
static struct ir_raw_handler sony_handler = {
.protocols = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20,
.decode = ir_sony_decode,
+ .encode = ir_sony_encode,
};
static int __init ir_sony_decode_init(void)
diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
new file mode 100644
index 000000000000..c8863f36686a
--- /dev/null
+++ b/drivers/media/rc/ir-spi.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author: Andi Shyti <andi.shyti@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * SPI driven IR LED device driver
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <media/rc-core.h>
+
+#define IR_SPI_DRIVER_NAME "ir-spi"
+
+/* pulse value for different duty cycles */
+#define IR_SPI_PULSE_DC_50 0xff00
+#define IR_SPI_PULSE_DC_60 0xfc00
+#define IR_SPI_PULSE_DC_70 0xf800
+#define IR_SPI_PULSE_DC_75 0xf000
+#define IR_SPI_PULSE_DC_80 0xc000
+#define IR_SPI_PULSE_DC_90 0x8000
+
+#define IR_SPI_DEFAULT_FREQUENCY 38000
+#define IR_SPI_BIT_PER_WORD 8
+#define IR_SPI_MAX_BUFSIZE 4096
+
+struct ir_spi_data {
+ u32 freq;
+ u8 duty_cycle;
+ bool negated;
+
+ u16 tx_buf[IR_SPI_MAX_BUFSIZE];
+ u16 pulse;
+ u16 space;
+
+ struct rc_dev *rc;
+ struct spi_device *spi;
+ struct regulator *regulator;
+};
+
+static int ir_spi_tx(struct rc_dev *dev,
+ unsigned int *buffer, unsigned int count)
+{
+ int i;
+ int ret;
+ unsigned int len = 0;
+ struct ir_spi_data *idata = dev->priv;
+ struct spi_transfer xfer;
+
+ /* convert the pulse/space signal to raw binary signal */
+ for (i = 0; i < count; i++) {
+ int j;
+ u16 val = ((i + 1) % 2) ? idata->pulse : idata->space;
+
+ if (len + buffer[i] >= IR_SPI_MAX_BUFSIZE)
+ return -EINVAL;
+
+ /*
+ * the first value in buffer is a pulse, so that 0, 2, 4, ...
+ * contain a pulse duration. On the contrary, 1, 3, 5, ...
+ * contain a space duration.
+ */
+ val = (i % 2) ? idata->space : idata->pulse;
+ for (j = 0; j < buffer[i]; j++)
+ idata->tx_buf[len++] = val;
+ }
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ xfer.speed_hz = idata->freq;
+ xfer.len = len * sizeof(*idata->tx_buf);
+ xfer.tx_buf = idata->tx_buf;
+
+ ret = regulator_enable(idata->regulator);
+ if (ret)
+ return ret;
+
+ ret = spi_sync_transfer(idata->spi, &xfer, 1);
+ if (ret)
+ dev_err(&idata->spi->dev, "unable to deliver the signal\n");
+
+ regulator_disable(idata->regulator);
+
+ return ret ? ret : count;
+}
+
+static int ir_spi_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+{
+ struct ir_spi_data *idata = dev->priv;
+
+ if (!carrier)
+ return -EINVAL;
+
+ idata->freq = carrier;
+
+ return 0;
+}
+
+static int ir_spi_set_duty_cycle(struct rc_dev *dev, u32 duty_cycle)
+{
+ struct ir_spi_data *idata = dev->priv;
+
+ if (duty_cycle >= 90)
+ idata->pulse = IR_SPI_PULSE_DC_90;
+ else if (duty_cycle >= 80)
+ idata->pulse = IR_SPI_PULSE_DC_80;
+ else if (duty_cycle >= 75)
+ idata->pulse = IR_SPI_PULSE_DC_75;
+ else if (duty_cycle >= 70)
+ idata->pulse = IR_SPI_PULSE_DC_70;
+ else if (duty_cycle >= 60)
+ idata->pulse = IR_SPI_PULSE_DC_60;
+ else
+ idata->pulse = IR_SPI_PULSE_DC_50;
+
+ if (idata->negated) {
+ idata->pulse = ~idata->pulse;
+ idata->space = 0xffff;
+ } else {
+ idata->space = 0;
+ }
+
+ return 0;
+}
+
+static int ir_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ u8 dc;
+ struct ir_spi_data *idata;
+
+ idata = devm_kzalloc(&spi->dev, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return -ENOMEM;
+
+ idata->regulator = devm_regulator_get(&spi->dev, "irda_regulator");
+ if (IS_ERR(idata->regulator))
+ return PTR_ERR(idata->regulator);
+
+ idata->rc = devm_rc_allocate_device(&spi->dev, RC_DRIVER_IR_RAW_TX);
+ if (!idata->rc)
+ return -ENOMEM;
+
+ idata->rc->tx_ir = ir_spi_tx;
+ idata->rc->s_tx_carrier = ir_spi_set_tx_carrier;
+ idata->rc->s_tx_duty_cycle = ir_spi_set_duty_cycle;
+ idata->rc->driver_name = IR_SPI_DRIVER_NAME;
+ idata->rc->priv = idata;
+ idata->spi = spi;
+
+ idata->negated = of_property_read_bool(spi->dev.of_node,
+ "led-active-low");
+ ret = of_property_read_u8(spi->dev.of_node, "duty-cycle", &dc);
+ if (ret)
+ dc = 50;
+
+ /* ir_spi_set_duty_cycle cannot fail,
+ * it returns int to be compatible with the
+ * rc->s_tx_duty_cycle function
+ */
+ ir_spi_set_duty_cycle(idata->rc, dc);
+
+ idata->freq = IR_SPI_DEFAULT_FREQUENCY;
+
+ return devm_rc_register_device(&spi->dev, idata->rc);
+}
+
+static int ir_spi_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+static const struct of_device_id ir_spi_of_match[] = {
+ { .compatible = "ir-spi-led" },
+ {},
+};
+
+static struct spi_driver ir_spi_driver = {
+ .probe = ir_spi_probe,
+ .remove = ir_spi_remove,
+ .driver = {
+ .name = IR_SPI_DRIVER_NAME,
+ .of_match_table = ir_spi_of_match,
+ },
+};
+
+module_spi_driver(ir_spi_driver);
+
+MODULE_AUTHOR("Andi Shyti <andi.shyti@samsung.com>");
+MODULE_DESCRIPTION("SPI IR LED");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 367b28bed627..e9e4befbbebb 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -13,11 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA.
- *
* Inspired by the original lirc_it87 and lirc_ite8709 drivers, on top of the
* skeleton provided by the nuvoton-cir driver.
*
@@ -1470,7 +1465,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
return ret;
/* input device for IR remote (and tx) */
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
goto exit_free_dev_rdev;
itdev->rdev = rdev;
@@ -1561,8 +1556,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* set up ir-core props */
rdev->priv = itdev;
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rdev->open = ite_open;
rdev->close = ite_close;
rdev->s_idle = ite_s_idle;
diff --git a/drivers/media/rc/ite-cir.h b/drivers/media/rc/ite-cir.h
index aa899a0b9750..0e8ebc880d1f 100644
--- a/drivers/media/rc/ite-cir.h
+++ b/drivers/media/rc/ite-cir.h
@@ -12,11 +12,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA.
*/
/* platform driver name to register */
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index d7b13fae1267..ffe9e612f8d6 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-cec.o \
rc-cinergy-1400.o \
rc-cinergy.o \
+ rc-d680-dmb.o \
rc-delock-61959.o \
rc-dib0700-nec.o \
rc-dib0700-rc5.o \
@@ -31,6 +32,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-dntv-live-dvbt-pro.o \
rc-dtt200u.o \
rc-dvbsky.o \
+ rc-dvico-mce.o \
+ rc-dvico-portable.o \
rc-em-terratec.o \
rc-encore-enltv2.o \
rc-encore-enltv.o \
@@ -41,6 +44,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-flyvideo.o \
rc-fusionhdtv-mce.o \
rc-gadmei-rm008z.o \
+ rc-geekbox.o \
rc-genius-tvgo-a11mce.o \
rc-gotview7135.o \
rc-imon-mce.o \
diff --git a/drivers/media/rc/keymaps/rc-d680-dmb.c b/drivers/media/rc/keymaps/rc-d680-dmb.c
new file mode 100644
index 000000000000..bb5745d29d8a
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-d680-dmb.c
@@ -0,0 +1,75 @@
+/*
+ * keymap imported from cxusb.c
+ *
+ * Copyright (C) 2016 Sean Young
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table rc_map_d680_dmb_table[] = {
+ { 0x0038, KEY_SWITCHVIDEOMODE }, /* TV/AV */
+ { 0x080c, KEY_ZOOM },
+ { 0x0800, KEY_0 },
+ { 0x0001, KEY_1 },
+ { 0x0802, KEY_2 },
+ { 0x0003, KEY_3 },
+ { 0x0804, KEY_4 },
+ { 0x0005, KEY_5 },
+ { 0x0806, KEY_6 },
+ { 0x0007, KEY_7 },
+ { 0x0808, KEY_8 },
+ { 0x0009, KEY_9 },
+ { 0x000a, KEY_MUTE },
+ { 0x0829, KEY_BACK },
+ { 0x0012, KEY_CHANNELUP },
+ { 0x0813, KEY_CHANNELDOWN },
+ { 0x002b, KEY_VOLUMEUP },
+ { 0x082c, KEY_VOLUMEDOWN },
+ { 0x0020, KEY_UP },
+ { 0x0821, KEY_DOWN },
+ { 0x0011, KEY_LEFT },
+ { 0x0810, KEY_RIGHT },
+ { 0x000d, KEY_OK },
+ { 0x081f, KEY_RECORD },
+ { 0x0017, KEY_PLAYPAUSE },
+ { 0x0816, KEY_PLAYPAUSE },
+ { 0x000b, KEY_STOP },
+ { 0x0827, KEY_FASTFORWARD },
+ { 0x0026, KEY_REWIND },
+ { 0x081e, KEY_UNKNOWN }, /* Time Shift */
+ { 0x000e, KEY_UNKNOWN }, /* Snapshot */
+ { 0x082d, KEY_UNKNOWN }, /* Mouse Cursor */
+ { 0x000f, KEY_UNKNOWN }, /* Minimize/Maximize */
+ { 0x0814, KEY_SHUFFLE }, /* Shuffle */
+ { 0x0025, KEY_POWER },
+};
+
+static struct rc_map_list d680_dmb_map = {
+ .map = {
+ .scan = rc_map_d680_dmb_table,
+ .size = ARRAY_SIZE(rc_map_d680_dmb_table),
+ .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_D680_DMB,
+ }
+};
+
+static int __init init_rc_map_d680_dmb(void)
+{
+ return rc_map_register(&d680_dmb_map);
+}
+
+static void __exit exit_rc_map_d680_dmb(void)
+{
+ rc_map_unregister(&d680_dmb_map);
+}
+
+module_init(init_rc_map_d680_dmb)
+module_exit(exit_rc_map_d680_dmb)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-dvico-mce.c b/drivers/media/rc/keymaps/rc-dvico-mce.c
new file mode 100644
index 000000000000..e5f098c50235
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-dvico-mce.c
@@ -0,0 +1,85 @@
+/*
+ * keymap imported from cxusb.c
+ *
+ * Copyright (C) 2016 Sean Young
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table rc_map_dvico_mce_table[] = {
+ { 0xfe02, KEY_TV },
+ { 0xfe0e, KEY_MP3 },
+ { 0xfe1a, KEY_DVD },
+ { 0xfe1e, KEY_FAVORITES },
+ { 0xfe16, KEY_SETUP },
+ { 0xfe46, KEY_POWER2 },
+ { 0xfe0a, KEY_EPG },
+ { 0xfe49, KEY_BACK },
+ { 0xfe4d, KEY_MENU },
+ { 0xfe51, KEY_UP },
+ { 0xfe5b, KEY_LEFT },
+ { 0xfe5f, KEY_RIGHT },
+ { 0xfe53, KEY_DOWN },
+ { 0xfe5e, KEY_OK },
+ { 0xfe59, KEY_INFO },
+ { 0xfe55, KEY_TAB },
+ { 0xfe0f, KEY_PREVIOUSSONG },/* Replay */
+ { 0xfe12, KEY_NEXTSONG }, /* Skip */
+ { 0xfe42, KEY_ENTER }, /* Windows/Start */
+ { 0xfe15, KEY_VOLUMEUP },
+ { 0xfe05, KEY_VOLUMEDOWN },
+ { 0xfe11, KEY_CHANNELUP },
+ { 0xfe09, KEY_CHANNELDOWN },
+ { 0xfe52, KEY_CAMERA },
+ { 0xfe5a, KEY_TUNER }, /* Live */
+ { 0xfe19, KEY_OPEN },
+ { 0xfe0b, KEY_1 },
+ { 0xfe17, KEY_2 },
+ { 0xfe1b, KEY_3 },
+ { 0xfe07, KEY_4 },
+ { 0xfe50, KEY_5 },
+ { 0xfe54, KEY_6 },
+ { 0xfe48, KEY_7 },
+ { 0xfe4c, KEY_8 },
+ { 0xfe58, KEY_9 },
+ { 0xfe13, KEY_ANGLE }, /* Aspect */
+ { 0xfe03, KEY_0 },
+ { 0xfe1f, KEY_ZOOM },
+ { 0xfe43, KEY_REWIND },
+ { 0xfe47, KEY_PLAYPAUSE },
+ { 0xfe4f, KEY_FASTFORWARD },
+ { 0xfe57, KEY_MUTE },
+ { 0xfe0d, KEY_STOP },
+ { 0xfe01, KEY_RECORD },
+ { 0xfe4e, KEY_POWER },
+};
+
+static struct rc_map_list dvico_mce_map = {
+ .map = {
+ .scan = rc_map_dvico_mce_table,
+ .size = ARRAY_SIZE(rc_map_dvico_mce_table),
+ .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DVICO_MCE,
+ }
+};
+
+static int __init init_rc_map_dvico_mce(void)
+{
+ return rc_map_register(&dvico_mce_map);
+}
+
+static void __exit exit_rc_map_dvico_mce(void)
+{
+ rc_map_unregister(&dvico_mce_map);
+}
+
+module_init(init_rc_map_dvico_mce)
+module_exit(exit_rc_map_dvico_mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-dvico-portable.c b/drivers/media/rc/keymaps/rc-dvico-portable.c
new file mode 100644
index 000000000000..94ceeee94b3f
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-dvico-portable.c
@@ -0,0 +1,76 @@
+/*
+ * keymap imported from cxusb.c
+ *
+ * Copyright (C) 2016 Sean Young
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table rc_map_dvico_portable_table[] = {
+ { 0xfc02, KEY_SETUP }, /* Profile */
+ { 0xfc43, KEY_POWER2 },
+ { 0xfc06, KEY_EPG },
+ { 0xfc5a, KEY_BACK },
+ { 0xfc05, KEY_MENU },
+ { 0xfc47, KEY_INFO },
+ { 0xfc01, KEY_TAB },
+ { 0xfc42, KEY_PREVIOUSSONG },/* Replay */
+ { 0xfc49, KEY_VOLUMEUP },
+ { 0xfc09, KEY_VOLUMEDOWN },
+ { 0xfc54, KEY_CHANNELUP },
+ { 0xfc0b, KEY_CHANNELDOWN },
+ { 0xfc16, KEY_CAMERA },
+ { 0xfc40, KEY_TUNER }, /* ATV/DTV */
+ { 0xfc45, KEY_OPEN },
+ { 0xfc19, KEY_1 },
+ { 0xfc18, KEY_2 },
+ { 0xfc1b, KEY_3 },
+ { 0xfc1a, KEY_4 },
+ { 0xfc58, KEY_5 },
+ { 0xfc59, KEY_6 },
+ { 0xfc15, KEY_7 },
+ { 0xfc14, KEY_8 },
+ { 0xfc17, KEY_9 },
+ { 0xfc44, KEY_ANGLE }, /* Aspect */
+ { 0xfc55, KEY_0 },
+ { 0xfc07, KEY_ZOOM },
+ { 0xfc0a, KEY_REWIND },
+ { 0xfc08, KEY_PLAYPAUSE },
+ { 0xfc4b, KEY_FASTFORWARD },
+ { 0xfc5b, KEY_MUTE },
+ { 0xfc04, KEY_STOP },
+ { 0xfc56, KEY_RECORD },
+ { 0xfc57, KEY_POWER },
+ { 0xfc41, KEY_UNKNOWN }, /* INPUT */
+ { 0xfc00, KEY_UNKNOWN }, /* HD */
+};
+
+static struct rc_map_list dvico_portable_map = {
+ .map = {
+ .scan = rc_map_dvico_portable_table,
+ .size = ARRAY_SIZE(rc_map_dvico_portable_table),
+ .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DVICO_PORTABLE,
+ }
+};
+
+static int __init init_rc_map_dvico_portable(void)
+{
+ return rc_map_register(&dvico_portable_map);
+}
+
+static void __exit exit_rc_map_dvico_portable(void)
+{
+ rc_map_unregister(&dvico_portable_map);
+}
+
+module_init(init_rc_map_dvico_portable)
+module_exit(exit_rc_map_dvico_portable)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/rc/keymaps/rc-geekbox.c b/drivers/media/rc/keymaps/rc-geekbox.c
new file mode 100644
index 000000000000..affc4c481888
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-geekbox.c
@@ -0,0 +1,55 @@
+/*
+ * Keytable for the GeekBox remote controller
+ *
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table geekbox[] = {
+ { 0x01, KEY_BACK },
+ { 0x02, KEY_DOWN },
+ { 0x03, KEY_UP },
+ { 0x07, KEY_OK },
+ { 0x0b, KEY_VOLUMEUP },
+ { 0x0e, KEY_LEFT },
+ { 0x13, KEY_MENU },
+ { 0x14, KEY_POWER },
+ { 0x1a, KEY_RIGHT },
+ { 0x48, KEY_HOME },
+ { 0x58, KEY_VOLUMEDOWN },
+ { 0x5c, KEY_SCREEN },
+};
+
+static struct rc_map_list geekbox_map = {
+ .map = {
+ .scan = geekbox,
+ .size = ARRAY_SIZE(geekbox),
+ .rc_type = RC_TYPE_NEC,
+ .name = RC_MAP_GEEKBOX,
+ }
+};
+
+static int __init init_rc_map_geekbox(void)
+{
+ return rc_map_register(&geekbox_map);
+}
+
+static void __exit exit_rc_map_geekbox(void)
+{
+ rc_map_unregister(&geekbox_map);
+}
+
+module_init(init_rc_map_geekbox)
+module_exit(exit_rc_map_geekbox)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index ef4006fe4de0..5be567506bcd 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -86,6 +86,7 @@ static struct rc_map_table rc6_mce[] = {
{ 0x800f045e, KEY_BLUE },
{ 0x800f0465, KEY_POWER2 }, /* TV Power */
+ { 0x800f0469, KEY_MESSENGER },
{ 0x800f046e, KEY_PLAYPAUSE },
{ 0x800f046f, KEY_PLAYER }, /* Start media application (NEW) */
diff --git a/drivers/media/rc/keymaps/rc-technisat-usb2.c b/drivers/media/rc/keymaps/rc-technisat-usb2.c
index f9733bb289d6..02c9c243c060 100644
--- a/drivers/media/rc/keymaps/rc-technisat-usb2.c
+++ b/drivers/media/rc/keymaps/rc-technisat-usb2.c
@@ -13,10 +13,6 @@
* License, or (at your option) any later version.
*
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND
* TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO
* THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR
diff --git a/drivers/media/rc/keymaps/rc-tivo.c b/drivers/media/rc/keymaps/rc-tivo.c
index 454e06295692..5cc1b456e329 100644
--- a/drivers/media/rc/keymaps/rc-tivo.c
+++ b/drivers/media/rc/keymaps/rc-tivo.c
@@ -15,62 +15,62 @@
* Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
* which also ships with a TiVo-branded IR transceiver, supported by the mceusb
* driver. Note that the remote uses an NEC-ish protocol, but instead of having
- * a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the
+ * a command/not_command pair, it has a vendor ID of 0x3085, but some keys, the
* NEC extended checksums do pass, so the table presently has the intended
* values and the checksum-passed versions for those keys.
*/
static struct rc_map_table tivo[] = {
- { 0xa10c900f, KEY_MEDIA }, /* TiVo Button */
- { 0xa10c0807, KEY_POWER2 }, /* TV Power */
- { 0xa10c8807, KEY_TV }, /* Live TV/Swap */
- { 0xa10c2c03, KEY_VIDEO_NEXT }, /* TV Input */
- { 0xa10cc807, KEY_INFO },
- { 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */
+ { 0x3085f009, KEY_MEDIA }, /* TiVo Button */
+ { 0x3085e010, KEY_POWER2 }, /* TV Power */
+ { 0x3085e011, KEY_TV }, /* Live TV/Swap */
+ { 0x3085c034, KEY_VIDEO_NEXT }, /* TV Input */
+ { 0x3085e013, KEY_INFO },
+ { 0x3085a05f, KEY_CYCLEWINDOWS }, /* Window */
{ 0x0085305f, KEY_CYCLEWINDOWS },
- { 0xa10c6c03, KEY_EPG }, /* Guide */
+ { 0x3085c036, KEY_EPG }, /* Guide */
- { 0xa10c2807, KEY_UP },
- { 0xa10c6807, KEY_DOWN },
- { 0xa10ce807, KEY_LEFT },
- { 0xa10ca807, KEY_RIGHT },
+ { 0x3085e014, KEY_UP },
+ { 0x3085e016, KEY_DOWN },
+ { 0x3085e017, KEY_LEFT },
+ { 0x3085e015, KEY_RIGHT },
- { 0xa10c1807, KEY_SCROLLDOWN }, /* Red Thumbs Down */
- { 0xa10c9807, KEY_SELECT },
- { 0xa10c5807, KEY_SCROLLUP }, /* Green Thumbs Up */
+ { 0x3085e018, KEY_SCROLLDOWN }, /* Red Thumbs Down */
+ { 0x3085e019, KEY_SELECT },
+ { 0x3085e01a, KEY_SCROLLUP }, /* Green Thumbs Up */
- { 0xa10c3807, KEY_VOLUMEUP },
- { 0xa10cb807, KEY_VOLUMEDOWN },
- { 0xa10cd807, KEY_MUTE },
- { 0xa10c040b, KEY_RECORD },
- { 0xa10c7807, KEY_CHANNELUP },
- { 0xa10cf807, KEY_CHANNELDOWN },
+ { 0x3085e01c, KEY_VOLUMEUP },
+ { 0x3085e01d, KEY_VOLUMEDOWN },
+ { 0x3085e01b, KEY_MUTE },
+ { 0x3085d020, KEY_RECORD },
+ { 0x3085e01e, KEY_CHANNELUP },
+ { 0x3085e01f, KEY_CHANNELDOWN },
{ 0x0085301f, KEY_CHANNELDOWN },
- { 0xa10c840b, KEY_PLAY },
- { 0xa10cc40b, KEY_PAUSE },
- { 0xa10ca40b, KEY_SLOW },
- { 0xa10c440b, KEY_REWIND },
- { 0xa10c240b, KEY_FASTFORWARD },
- { 0xa10c640b, KEY_PREVIOUS },
- { 0xa10ce40b, KEY_NEXT }, /* ->| */
+ { 0x3085d021, KEY_PLAY },
+ { 0x3085d023, KEY_PAUSE },
+ { 0x3085d025, KEY_SLOW },
+ { 0x3085d022, KEY_REWIND },
+ { 0x3085d024, KEY_FASTFORWARD },
+ { 0x3085d026, KEY_PREVIOUS },
+ { 0x3085d027, KEY_NEXT }, /* ->| */
- { 0xa10c220d, KEY_ZOOM }, /* Aspect */
- { 0xa10c120d, KEY_STOP },
- { 0xa10c520d, KEY_DVD }, /* DVD Menu */
+ { 0x3085b044, KEY_ZOOM }, /* Aspect */
+ { 0x3085b048, KEY_STOP },
+ { 0x3085b04a, KEY_DVD }, /* DVD Menu */
- { 0xa10c140b, KEY_NUMERIC_1 },
- { 0xa10c940b, KEY_NUMERIC_2 },
- { 0xa10c540b, KEY_NUMERIC_3 },
- { 0xa10cd40b, KEY_NUMERIC_4 },
- { 0xa10c340b, KEY_NUMERIC_5 },
- { 0xa10cb40b, KEY_NUMERIC_6 },
- { 0xa10c740b, KEY_NUMERIC_7 },
- { 0xa10cf40b, KEY_NUMERIC_8 },
+ { 0x3085d028, KEY_NUMERIC_1 },
+ { 0x3085d029, KEY_NUMERIC_2 },
+ { 0x3085d02a, KEY_NUMERIC_3 },
+ { 0x3085d02b, KEY_NUMERIC_4 },
+ { 0x3085d02c, KEY_NUMERIC_5 },
+ { 0x3085d02d, KEY_NUMERIC_6 },
+ { 0x3085d02e, KEY_NUMERIC_7 },
+ { 0x3085d02f, KEY_NUMERIC_8 },
{ 0x0085302f, KEY_NUMERIC_8 },
- { 0xa10c0c03, KEY_NUMERIC_9 },
- { 0xa10c8c03, KEY_NUMERIC_0 },
- { 0xa10ccc03, KEY_ENTER },
- { 0xa10c4c03, KEY_CLEAR },
+ { 0x3085c030, KEY_NUMERIC_9 },
+ { 0x3085c031, KEY_NUMERIC_0 },
+ { 0x3085c033, KEY_ENTER },
+ { 0x3085c032, KEY_CLEAR },
};
static struct rc_map_list tivo_map = {
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 3854809e8531..a54ca531d8ef 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -472,7 +468,7 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
if (retval) {
module_put(cdev->owner);
ir->open--;
- } else {
+ } else if (ir->buf) {
lirc_buffer_clear(ir->buf);
}
if (ir->task)
@@ -582,7 +578,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
result = put_user(ir->d.features, (__u32 __user *)arg);
break;
case LIRC_GET_REC_MODE:
- if (LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(ir->d.features)) {
result = -ENOTTY;
break;
}
@@ -592,7 +588,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
(__u32 __user *)arg);
break;
case LIRC_SET_REC_MODE:
- if (LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(ir->d.features)) {
result = -ENOTTY;
break;
}
@@ -651,6 +647,9 @@ ssize_t lirc_dev_fop_read(struct file *file,
return -ENODEV;
}
+ if (!LIRC_CAN_REC(ir->d.features))
+ return -EINVAL;
+
dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor);
buf = kzalloc(ir->chunk_size, GFP_KERNEL);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 9bf69179eee0..238d8eaf7d94 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -31,10 +31,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/device.h>
@@ -890,7 +886,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
cmdbuf[3] = MCE_IRDATA_TRAILER;
dev_dbg(ir->dev, "disabling carrier modulation");
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
- return carrier;
+ return 0;
}
for (prescaler = 0; prescaler < 4; ++prescaler) {
@@ -904,7 +900,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
/* Transmit new carrier to mce device */
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
- return carrier;
+ return 0;
}
}
@@ -1181,7 +1177,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
struct rc_dev *rc;
int ret;
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc) {
dev_err(dev, "remote dev allocation failed");
goto out;
@@ -1201,8 +1197,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
usb_to_input_id(ir->usbdev, &rc->input_id);
rc->dev.parent = dev;
rc->priv = ir;
- rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protocols = RC_BIT_ALL;
+ rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rc->timeout = MS_TO_NS(100);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 7eb3f4f1ddcd..5576dbd6b1a4 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -131,7 +131,7 @@ static int meson_ir_probe(struct platform_device *pdev)
return ir->irq;
}
- ir->rc = rc_allocate_device();
+ ir->rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir->rc) {
dev_err(dev, "failed to allocate rc device\n");
return -ENOMEM;
@@ -144,8 +144,7 @@ static int meson_ir_probe(struct platform_device *pdev)
map_name = of_get_property(node, "linux,rc-map-name", NULL);
ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
ir->rc->dev.parent = dev;
- ir->rc->driver_type = RC_DRIVER_IR_RAW;
- ir->rc->allowed_protocols = RC_BIT_ALL;
+ ir->rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
ir->rc->rx_resolution = US_TO_NS(MESON_TRATE);
ir->rc->timeout = MS_TO_NS(200);
ir->rc->driver_name = DRIVER_NAME;
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
new file mode 100644
index 000000000000..f1e164e441e8
--- /dev/null
+++ b/drivers/media/rc/mtk-cir.c
@@ -0,0 +1,335 @@
+/*
+ * Driver for Mediatek IR Receiver Controller
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/reset.h>
+#include <media/rc-core.h>
+
+#define MTK_IR_DEV KBUILD_MODNAME
+
+/* Register to enable PWM and IR */
+#define MTK_CONFIG_HIGH_REG 0x0c
+/* Enable IR pulse width detection */
+#define MTK_PWM_EN BIT(13)
+/* Enable IR hardware function */
+#define MTK_IR_EN BIT(0)
+
+/* Register to setting sample period */
+#define MTK_CONFIG_LOW_REG 0x10
+/* Field to set sample period */
+#define CHK_PERIOD DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, \
+ MTK_IR_CLK_PERIOD)
+#define MTK_CHK_PERIOD (((CHK_PERIOD) << 8) & (GENMASK(20, 8)))
+#define MTK_CHK_PERIOD_MASK (GENMASK(20, 8))
+
+/* Register to clear state of state machine */
+#define MTK_IRCLR_REG 0x20
+/* Bit to restart IR receiving */
+#define MTK_IRCLR BIT(0)
+
+/* Register containing pulse width data */
+#define MTK_CHKDATA_REG(i) (0x88 + 4 * (i))
+#define MTK_WIDTH_MASK (GENMASK(7, 0))
+
+/* Register to enable IR interrupt */
+#define MTK_IRINT_EN_REG 0xcc
+/* Bit to enable interrupt */
+#define MTK_IRINT_EN BIT(0)
+
+/* Register to ack IR interrupt */
+#define MTK_IRINT_CLR_REG 0xd0
+/* Bit to clear interrupt status */
+#define MTK_IRINT_CLR BIT(0)
+
+/* Maximum count of samples */
+#define MTK_MAX_SAMPLES 0xff
+/* Indicate the end of IR message */
+#define MTK_IR_END(v, p) ((v) == MTK_MAX_SAMPLES && (p) == 0)
+/* Number of registers to record the pulse width */
+#define MTK_CHKDATA_SZ 17
+/* Source clock frequency */
+#define MTK_IR_BASE_CLK 273000000
+/* Frequency after IR internal divider */
+#define MTK_IR_CLK_FREQ (MTK_IR_BASE_CLK / 4)
+/* Period for MTK_IR_CLK in ns*/
+#define MTK_IR_CLK_PERIOD DIV_ROUND_CLOSEST(1000000000ul, \
+ MTK_IR_CLK_FREQ)
+/* Sample period in ns */
+#define MTK_IR_SAMPLE (MTK_IR_CLK_PERIOD * 0xc00)
+
+/*
+ * struct mtk_ir - This is the main datasructure for holding the state
+ * of the driver
+ * @dev: The device pointer
+ * @rc: The rc instrance
+ * @irq: The IRQ that we are using
+ * @base: The mapped register i/o base
+ * @clk: The clock that we are using
+ */
+struct mtk_ir {
+ struct device *dev;
+ struct rc_dev *rc;
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+};
+
+static void mtk_w32_mask(struct mtk_ir *ir, u32 val, u32 mask, unsigned int reg)
+{
+ u32 tmp;
+
+ tmp = __raw_readl(ir->base + reg);
+ tmp = (tmp & ~mask) | val;
+ __raw_writel(tmp, ir->base + reg);
+}
+
+static void mtk_w32(struct mtk_ir *ir, u32 val, unsigned int reg)
+{
+ __raw_writel(val, ir->base + reg);
+}
+
+static u32 mtk_r32(struct mtk_ir *ir, unsigned int reg)
+{
+ return __raw_readl(ir->base + reg);
+}
+
+static inline void mtk_irq_disable(struct mtk_ir *ir, u32 mask)
+{
+ u32 val;
+
+ val = mtk_r32(ir, MTK_IRINT_EN_REG);
+ mtk_w32(ir, val & ~mask, MTK_IRINT_EN_REG);
+}
+
+static inline void mtk_irq_enable(struct mtk_ir *ir, u32 mask)
+{
+ u32 val;
+
+ val = mtk_r32(ir, MTK_IRINT_EN_REG);
+ mtk_w32(ir, val | mask, MTK_IRINT_EN_REG);
+}
+
+static irqreturn_t mtk_ir_irq(int irqno, void *dev_id)
+{
+ struct mtk_ir *ir = dev_id;
+ u8 wid = 0;
+ u32 i, j, val;
+ DEFINE_IR_RAW_EVENT(rawir);
+
+ /*
+ * Reset decoder state machine explicitly is required
+ * because 1) the longest duration for space MTK IR hardware
+ * could record is not safely long. e.g 12ms if rx resolution
+ * is 46us by default. There is still the risk to satisfying
+ * every decoder to reset themselves through long enough
+ * trailing spaces and 2) the IRQ handler guarantees that
+ * start of IR message is always contained in and starting
+ * from register MTK_CHKDATA_REG(0).
+ */
+ ir_raw_event_reset(ir->rc);
+
+ /* First message must be pulse */
+ rawir.pulse = false;
+
+ /* Handle all pulse and space IR controller captures */
+ for (i = 0 ; i < MTK_CHKDATA_SZ ; i++) {
+ val = mtk_r32(ir, MTK_CHKDATA_REG(i));
+ dev_dbg(ir->dev, "@reg%d=0x%08x\n", i, val);
+
+ for (j = 0 ; j < 4 ; j++) {
+ wid = (val & (MTK_WIDTH_MASK << j * 8)) >> j * 8;
+ rawir.pulse = !rawir.pulse;
+ rawir.duration = wid * (MTK_IR_SAMPLE + 1);
+ ir_raw_event_store_with_filter(ir->rc, &rawir);
+ }
+ }
+
+ /*
+ * The maximum number of edges the IR controller can
+ * hold is MTK_CHKDATA_SZ * 4. So if received IR messages
+ * is over the limit, the last incomplete IR message would
+ * be appended trailing space and still would be sent into
+ * ir-rc-raw to decode. That helps it is possible that it
+ * has enough information to decode a scancode even if the
+ * trailing end of the message is missing.
+ */
+ if (!MTK_IR_END(wid, rawir.pulse)) {
+ rawir.pulse = false;
+ rawir.duration = MTK_MAX_SAMPLES * (MTK_IR_SAMPLE + 1);
+ ir_raw_event_store_with_filter(ir->rc, &rawir);
+ }
+
+ ir_raw_event_handle(ir->rc);
+
+ /*
+ * Restart controller for the next receive that would
+ * clear up all CHKDATA registers
+ */
+ mtk_w32_mask(ir, 0x1, MTK_IRCLR, MTK_IRCLR_REG);
+
+ /* Clear interrupt status */
+ mtk_w32_mask(ir, 0x1, MTK_IRINT_CLR, MTK_IRINT_CLR_REG);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_ir_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct resource *res;
+ struct mtk_ir *ir;
+ u32 val;
+ int ret = 0;
+ const char *map_name;
+
+ ir = devm_kzalloc(dev, sizeof(struct mtk_ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ ir->dev = dev;
+
+ if (!of_device_is_compatible(dn, "mediatek,mt7623-cir"))
+ return -ENODEV;
+
+ ir->clk = devm_clk_get(dev, "clk");
+ if (IS_ERR(ir->clk)) {
+ dev_err(dev, "failed to get a ir clock.\n");
+ return PTR_ERR(ir->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ir->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ir->base)) {
+ dev_err(dev, "failed to map registers\n");
+ return PTR_ERR(ir->base);
+ }
+
+ ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
+ if (!ir->rc) {
+ dev_err(dev, "failed to allocate device\n");
+ return -ENOMEM;
+ }
+
+ ir->rc->priv = ir;
+ ir->rc->input_name = MTK_IR_DEV;
+ ir->rc->input_phys = MTK_IR_DEV "/input0";
+ ir->rc->input_id.bustype = BUS_HOST;
+ ir->rc->input_id.vendor = 0x0001;
+ ir->rc->input_id.product = 0x0001;
+ ir->rc->input_id.version = 0x0001;
+ map_name = of_get_property(dn, "linux,rc-map-name", NULL);
+ ir->rc->map_name = map_name ?: RC_MAP_EMPTY;
+ ir->rc->dev.parent = dev;
+ ir->rc->driver_name = MTK_IR_DEV;
+ ir->rc->allowed_protocols = RC_BIT_ALL;
+ ir->rc->rx_resolution = MTK_IR_SAMPLE;
+ ir->rc->timeout = MTK_MAX_SAMPLES * (MTK_IR_SAMPLE + 1);
+
+ ret = devm_rc_register_device(dev, ir->rc);
+ if (ret) {
+ dev_err(dev, "failed to register rc device\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ir);
+
+ ir->irq = platform_get_irq(pdev, 0);
+ if (ir->irq < 0) {
+ dev_err(dev, "no irq resource\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Enable interrupt after proper hardware
+ * setup and IRQ handler registration
+ */
+ if (clk_prepare_enable(ir->clk)) {
+ dev_err(dev, "try to enable ir_clk failed\n");
+ ret = -EINVAL;
+ goto exit_clkdisable_clk;
+ }
+
+ mtk_irq_disable(ir, MTK_IRINT_EN);
+
+ ret = devm_request_irq(dev, ir->irq, mtk_ir_irq, 0, MTK_IR_DEV, ir);
+ if (ret) {
+ dev_err(dev, "failed request irq\n");
+ goto exit_clkdisable_clk;
+ }
+
+ /* Enable IR and PWM */
+ val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
+ val |= MTK_PWM_EN | MTK_IR_EN;
+ mtk_w32(ir, val, MTK_CONFIG_HIGH_REG);
+
+ /* Setting sample period */
+ mtk_w32_mask(ir, MTK_CHK_PERIOD, MTK_CHK_PERIOD_MASK,
+ MTK_CONFIG_LOW_REG);
+
+ mtk_irq_enable(ir, MTK_IRINT_EN);
+
+ dev_info(dev, "Initialized MT7623 IR driver, sample period = %luus\n",
+ DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000));
+
+ return 0;
+
+exit_clkdisable_clk:
+ clk_disable_unprepare(ir->clk);
+
+ return ret;
+}
+
+static int mtk_ir_remove(struct platform_device *pdev)
+{
+ struct mtk_ir *ir = platform_get_drvdata(pdev);
+
+ /*
+ * Avoid contention between remove handler and
+ * IRQ handler so that disabling IR interrupt and
+ * waiting for pending IRQ handler to complete
+ */
+ mtk_irq_disable(ir, MTK_IRINT_EN);
+ synchronize_irq(ir->irq);
+
+ clk_disable_unprepare(ir->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_ir_match[] = {
+ { .compatible = "mediatek,mt7623-cir" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_ir_match);
+
+static struct platform_driver mtk_ir_driver = {
+ .probe = mtk_ir_probe,
+ .remove = mtk_ir_remove,
+ .driver = {
+ .name = MTK_IR_DEV,
+ .of_match_table = mtk_ir_match,
+ },
+};
+
+module_platform_driver(mtk_ir_driver);
+
+MODULE_DESCRIPTION("Mediatek IR Receiver Controller Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 4b78c891eb77..b109f8246b96 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -18,11 +18,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -176,6 +171,41 @@ static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
}
}
+static void nvt_write_wakeup_codes(struct rc_dev *dev,
+ const u8 *wbuf, int count)
+{
+ u8 tolerance, config;
+ struct nvt_dev *nvt = dev->priv;
+ int i;
+
+ /* hardcode the tolerance to 10% */
+ tolerance = DIV_ROUND_UP(count, 10);
+
+ spin_lock(&nvt->lock);
+
+ nvt_clear_cir_wake_fifo(nvt);
+ nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
+ nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
+
+ config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
+
+ /* enable writes to wake fifo */
+ nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
+ CIR_WAKE_IRCON);
+
+ if (count)
+ pr_info("Wake samples (%d) =", count);
+ else
+ pr_info("Wake sample fifo cleared");
+
+ for (i = 0; i < count; i++)
+ nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA);
+
+ nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
+
+ spin_unlock(&nvt->lock);
+}
+
static ssize_t wakeup_data_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -214,9 +244,7 @@ static ssize_t wakeup_data_store(struct device *dev,
const char *buf, size_t len)
{
struct rc_dev *rc_dev = to_rc_dev(dev);
- struct nvt_dev *nvt = rc_dev->priv;
- unsigned long flags;
- u8 tolerance, config, wake_buf[WAKEUP_MAX_SIZE];
+ u8 wake_buf[WAKEUP_MAX_SIZE];
char **argv;
int i, count;
unsigned int val;
@@ -245,27 +273,7 @@ static ssize_t wakeup_data_store(struct device *dev,
wake_buf[i] |= BUF_PULSE_BIT;
}
- /* hardcode the tolerance to 10% */
- tolerance = DIV_ROUND_UP(count, 10);
-
- spin_lock_irqsave(&nvt->lock, flags);
-
- nvt_clear_cir_wake_fifo(nvt);
- nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
- nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
-
- config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
-
- /* enable writes to wake fifo */
- nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
- CIR_WAKE_IRCON);
-
- for (i = 0; i < count; i++)
- nvt_cir_wake_reg_write(nvt, wake_buf[i], CIR_WAKE_WR_FIFO_DATA);
-
- nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
-
- spin_unlock_irqrestore(&nvt->lock, flags);
+ nvt_write_wakeup_codes(rc_dev, wake_buf, count);
ret = len;
out:
@@ -662,6 +670,62 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
+static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
+ struct rc_scancode_filter *sc_filter)
+{
+ u8 buf_val;
+ int i, ret, count;
+ unsigned int val;
+ struct ir_raw_event *raw;
+ u8 wake_buf[WAKEUP_MAX_SIZE];
+ bool complete;
+
+ /* Require mask to be set */
+ if (!sc_filter->mask)
+ return 0;
+
+ raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return -ENOMEM;
+
+ ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data,
+ raw, WAKEUP_MAX_SIZE);
+ complete = (ret != -ENOBUFS);
+ if (!complete)
+ ret = WAKEUP_MAX_SIZE;
+ else if (ret < 0)
+ goto out_raw;
+
+ /* Inspect the ir samples */
+ for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
+ /* NS to US */
+ val = DIV_ROUND_UP(raw[i].duration, 1000L) / SAMPLE_PERIOD;
+
+ /* Split too large values into several smaller ones */
+ while (val > 0 && count < WAKEUP_MAX_SIZE) {
+ /* Skip last value for better comparison tolerance */
+ if (complete && i == ret - 1 && val < BUF_LEN_MASK)
+ break;
+
+ /* Clamp values to BUF_LEN_MASK at most */
+ buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
+
+ wake_buf[count] = buf_val;
+ val -= buf_val;
+ if ((raw[i]).pulse)
+ wake_buf[count] |= BUF_PULSE_BIT;
+ count++;
+ }
+ }
+
+ nvt_write_wakeup_codes(dev, wake_buf, count);
+ ret = 0;
+out_raw:
+ kfree(raw);
+
+ return ret;
+}
+
/*
* nvt_tx_ir
*
@@ -998,7 +1062,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
return -ENOMEM;
/* input device for IR remote (and tx) */
- nvt->rdev = devm_rc_allocate_device(&pdev->dev);
+ nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
if (!nvt->rdev)
return -ENOMEM;
rdev = nvt->rdev;
@@ -1061,12 +1125,14 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rdev->allowed_wakeup_protocols = RC_BIT_ALL_IR_ENCODER;
+ rdev->encode_wakeup = true;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
rdev->s_tx_carrier = nvt_set_tx_carrier;
+ rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
rdev->input_phys = "nuvoton/cir0";
rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index c41c5765e1d2..88a29df38a57 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -18,11 +18,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
#include <linux/spinlock.h>
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 585d5e52118d..a70a5c557434 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -20,7 +20,6 @@
#define MAX_IR_EVENT_SIZE 512
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <media/rc-core.h>
struct ir_raw_handler {
@@ -28,6 +27,8 @@ struct ir_raw_handler {
u64 protocols; /* which are handled by this handler */
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
+ int (*encode)(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max);
/* These two should only be used by the lirc decoder */
int (*raw_register)(struct rc_dev *dev);
@@ -37,7 +38,6 @@ struct ir_raw_handler {
struct ir_raw_event_ctrl {
struct list_head list; /* to keep track of raw clients */
struct task_struct *thread;
- spinlock_t lock;
/* fifo for the pulse/space durations */
DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE);
ktime_t last_event; /* when last event occurred */
@@ -154,6 +154,111 @@ static inline bool is_timing_event(struct ir_raw_event ev)
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
+/* functions for IR encoders */
+
+static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
+ unsigned int pulse,
+ u32 duration)
+{
+ init_ir_raw_event(ev);
+ ev->duration = duration;
+ ev->pulse = pulse;
+}
+
+/**
+ * struct ir_raw_timings_manchester - Manchester coding timings
+ * @leader: duration of leader pulse (if any) 0 if continuing
+ * existing signal (see @pulse_space_start)
+ * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
+ * @clock: duration of each pulse/space in ns
+ * @invert: if set clock logic is inverted
+ * (0 = space + pulse, 1 = pulse + space)
+ * @trailer_space: duration of trailer space in ns
+ */
+struct ir_raw_timings_manchester {
+ unsigned int leader;
+ unsigned int pulse_space_start:1;
+ unsigned int clock;
+ unsigned int invert:1;
+ unsigned int trailer_space;
+};
+
+int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
+ const struct ir_raw_timings_manchester *timings,
+ unsigned int n, unsigned int data);
+
+/**
+ * ir_raw_gen_pulse_space() - generate pulse and space raw events.
+ * @ev: Pointer to pointer to next free raw event.
+ * Will be incremented for each raw event written.
+ * @max: Pointer to number of raw events available in buffer.
+ * Will be decremented for each raw event written.
+ * @pulse_width: Width of pulse in ns.
+ * @space_width: Width of space in ns.
+ *
+ * Returns: 0 on success.
+ * -ENOBUFS if there isn't enough buffer space to write both raw
+ * events. In this case @max events will have been written.
+ */
+static inline int ir_raw_gen_pulse_space(struct ir_raw_event **ev,
+ unsigned int *max,
+ unsigned int pulse_width,
+ unsigned int space_width)
+{
+ if (!*max)
+ return -ENOBUFS;
+ init_ir_raw_event_duration((*ev)++, 1, pulse_width);
+ if (!--*max)
+ return -ENOBUFS;
+ init_ir_raw_event_duration((*ev)++, 0, space_width);
+ --*max;
+ return 0;
+}
+
+/**
+ * struct ir_raw_timings_pd - pulse-distance modulation timings
+ * @header_pulse: duration of header pulse in ns (0 for none)
+ * @header_space: duration of header space in ns
+ * @bit_pulse: duration of bit pulse in ns
+ * @bit_space: duration of bit space (for logic 0 and 1) in ns
+ * @trailer_pulse: duration of trailer pulse in ns
+ * @trailer_space: duration of trailer space in ns
+ * @msb_first: 1 if most significant bit is sent first
+ */
+struct ir_raw_timings_pd {
+ unsigned int header_pulse;
+ unsigned int header_space;
+ unsigned int bit_pulse;
+ unsigned int bit_space[2];
+ unsigned int trailer_pulse;
+ unsigned int trailer_space;
+ unsigned int msb_first:1;
+};
+
+int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
+ const struct ir_raw_timings_pd *timings,
+ unsigned int n, u64 data);
+
+/**
+ * struct ir_raw_timings_pl - pulse-length modulation timings
+ * @header_pulse: duration of header pulse in ns (0 for none)
+ * @bit_space: duration of bit space in ns
+ * @bit_pulse: duration of bit pulse (for logic 0 and 1) in ns
+ * @trailer_space: duration of trailer space in ns
+ * @msb_first: 1 if most significant bit is sent first
+ */
+struct ir_raw_timings_pl {
+ unsigned int header_pulse;
+ unsigned int bit_space;
+ unsigned int bit_pulse[2];
+ unsigned int trailer_space;
+ unsigned int msb_first:1;
+};
+
+int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
+ const struct ir_raw_timings_pl *timings,
+ unsigned int n, u64 data);
+
/*
* Routines from rc-raw.c to be used internally and by decoders
*/
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 1c42a9f2f290..7fa84b64a2ae 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -17,7 +17,6 @@
#include <linux/mutex.h>
#include <linux/kmod.h>
#include <linux/sched.h>
-#include <linux/freezer.h>
#include "rc-core-priv.h"
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
@@ -34,32 +33,26 @@ static int ir_raw_event_thread(void *data)
struct ir_raw_handler *handler;
struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
- while (!kthread_should_stop()) {
-
- spin_lock_irq(&raw->lock);
-
- if (!kfifo_len(&raw->kfifo)) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop())
- set_current_state(TASK_RUNNING);
-
- spin_unlock_irq(&raw->lock);
- schedule();
- continue;
+ while (1) {
+ mutex_lock(&ir_raw_handler_lock);
+ while (kfifo_out(&raw->kfifo, &ev, 1)) {
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
+ if (raw->dev->enabled_protocols &
+ handler->protocols || !handler->protocols)
+ handler->decode(raw->dev, ev);
+ raw->prev_ev = ev;
}
+ mutex_unlock(&ir_raw_handler_lock);
- if(!kfifo_out(&raw->kfifo, &ev, 1))
- dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
- spin_unlock_irq(&raw->lock);
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list)
- if (raw->dev->enabled_protocols & handler->protocols ||
- !handler->protocols)
- handler->decode(raw->dev, ev);
- raw->prev_ev = ev;
- mutex_unlock(&ir_raw_handler_lock);
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ } else if (!kfifo_is_empty(&raw->kfifo))
+ set_current_state(TASK_RUNNING);
+
+ schedule();
}
return 0;
@@ -218,14 +211,10 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
*/
void ir_raw_event_handle(struct rc_dev *dev)
{
- unsigned long flags;
-
if (!dev->raw)
return;
- spin_lock_irqsave(&dev->raw->lock, flags);
wake_up_process(dev->raw->thread);
- spin_unlock_irqrestore(&dev->raw->lock, flags);
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
@@ -246,10 +235,254 @@ static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
{
mutex_lock(&dev->lock);
dev->enabled_protocols &= ~protocols;
- dev->enabled_wakeup_protocols &= ~protocols;
mutex_unlock(&dev->lock);
}
+/**
+ * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
+ * @ev: Pointer to pointer to next free event. *@ev is incremented for
+ * each raw event filled.
+ * @max: Maximum number of raw events to fill.
+ * @timings: Manchester modulation timings.
+ * @n: Number of bits of data.
+ * @data: Data bits to encode.
+ *
+ * Encodes the @n least significant bits of @data using Manchester (bi-phase)
+ * modulation with the timing characteristics described by @timings, writing up
+ * to @max raw IR events using the *@ev pointer.
+ *
+ * Returns: 0 on success.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * full encoded data. In this case all @max events will have been
+ * written.
+ */
+int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
+ const struct ir_raw_timings_manchester *timings,
+ unsigned int n, unsigned int data)
+{
+ bool need_pulse;
+ unsigned int i;
+ int ret = -ENOBUFS;
+
+ i = 1 << (n - 1);
+
+ if (timings->leader) {
+ if (!max--)
+ return ret;
+ if (timings->pulse_space_start) {
+ init_ir_raw_event_duration((*ev)++, 1, timings->leader);
+
+ if (!max--)
+ return ret;
+ init_ir_raw_event_duration((*ev), 0, timings->leader);
+ } else {
+ init_ir_raw_event_duration((*ev), 1, timings->leader);
+ }
+ i >>= 1;
+ } else {
+ /* continue existing signal */
+ --(*ev);
+ }
+ /* from here on *ev will point to the last event rather than the next */
+
+ while (n && i > 0) {
+ need_pulse = !(data & i);
+ if (timings->invert)
+ need_pulse = !need_pulse;
+ if (need_pulse == !!(*ev)->pulse) {
+ (*ev)->duration += timings->clock;
+ } else {
+ if (!max--)
+ goto nobufs;
+ init_ir_raw_event_duration(++(*ev), need_pulse,
+ timings->clock);
+ }
+
+ if (!max--)
+ goto nobufs;
+ init_ir_raw_event_duration(++(*ev), !need_pulse,
+ timings->clock);
+ i >>= 1;
+ }
+
+ if (timings->trailer_space) {
+ if (!(*ev)->pulse)
+ (*ev)->duration += timings->trailer_space;
+ else if (!max--)
+ goto nobufs;
+ else
+ init_ir_raw_event_duration(++(*ev), 0,
+ timings->trailer_space);
+ }
+
+ ret = 0;
+nobufs:
+ /* point to the next event rather than last event before returning */
+ ++(*ev);
+ return ret;
+}
+EXPORT_SYMBOL(ir_raw_gen_manchester);
+
+/**
+ * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
+ * @ev: Pointer to pointer to next free event. *@ev is incremented for
+ * each raw event filled.
+ * @max: Maximum number of raw events to fill.
+ * @timings: Pulse distance modulation timings.
+ * @n: Number of bits of data.
+ * @data: Data bits to encode.
+ *
+ * Encodes the @n least significant bits of @data using pulse-distance
+ * modulation with the timing characteristics described by @timings, writing up
+ * to @max raw IR events using the *@ev pointer.
+ *
+ * Returns: 0 on success.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * full encoded data. In this case all @max events will have been
+ * written.
+ */
+int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
+ const struct ir_raw_timings_pd *timings,
+ unsigned int n, u64 data)
+{
+ int i;
+ int ret;
+ unsigned int space;
+
+ if (timings->header_pulse) {
+ ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
+ timings->header_space);
+ if (ret)
+ return ret;
+ }
+
+ if (timings->msb_first) {
+ for (i = n - 1; i >= 0; --i) {
+ space = timings->bit_space[(data >> i) & 1];
+ ret = ir_raw_gen_pulse_space(ev, &max,
+ timings->bit_pulse,
+ space);
+ if (ret)
+ return ret;
+ }
+ } else {
+ for (i = 0; i < n; ++i, data >>= 1) {
+ space = timings->bit_space[data & 1];
+ ret = ir_raw_gen_pulse_space(ev, &max,
+ timings->bit_pulse,
+ space);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
+ timings->trailer_space);
+ return ret;
+}
+EXPORT_SYMBOL(ir_raw_gen_pd);
+
+/**
+ * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
+ * @ev: Pointer to pointer to next free event. *@ev is incremented for
+ * each raw event filled.
+ * @max: Maximum number of raw events to fill.
+ * @timings: Pulse distance modulation timings.
+ * @n: Number of bits of data.
+ * @data: Data bits to encode.
+ *
+ * Encodes the @n least significant bits of @data using space-distance
+ * modulation with the timing characteristics described by @timings, writing up
+ * to @max raw IR events using the *@ev pointer.
+ *
+ * Returns: 0 on success.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * full encoded data. In this case all @max events will have been
+ * written.
+ */
+int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
+ const struct ir_raw_timings_pl *timings,
+ unsigned int n, u64 data)
+{
+ int i;
+ int ret = -ENOBUFS;
+ unsigned int pulse;
+
+ if (!max--)
+ return ret;
+
+ init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
+
+ if (timings->msb_first) {
+ for (i = n - 1; i >= 0; --i) {
+ if (!max--)
+ return ret;
+ init_ir_raw_event_duration((*ev)++, 0,
+ timings->bit_space);
+ if (!max--)
+ return ret;
+ pulse = timings->bit_pulse[(data >> i) & 1];
+ init_ir_raw_event_duration((*ev)++, 1, pulse);
+ }
+ } else {
+ for (i = 0; i < n; ++i, data >>= 1) {
+ if (!max--)
+ return ret;
+ init_ir_raw_event_duration((*ev)++, 0,
+ timings->bit_space);
+ if (!max--)
+ return ret;
+ pulse = timings->bit_pulse[data & 1];
+ init_ir_raw_event_duration((*ev)++, 1, pulse);
+ }
+ }
+
+ if (!max--)
+ return ret;
+
+ init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
+
+ return 0;
+}
+EXPORT_SYMBOL(ir_raw_gen_pl);
+
+/**
+ * ir_raw_encode_scancode() - Encode a scancode as raw events
+ *
+ * @protocol: protocol
+ * @scancode: scancode filter describing a single scancode
+ * @events: array of raw events to write into
+ * @max: max number of raw events
+ *
+ * Attempts to encode the scancode as raw events.
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ * -EINVAL if the scancode is ambiguous or invalid, or if no
+ * compatible encoder was found.
+ */
+int ir_raw_encode_scancode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_handler *handler;
+ int ret = -EINVAL;
+ u64 mask = 1ULL << protocol;
+
+ mutex_lock(&ir_raw_handler_lock);
+ list_for_each_entry(handler, &ir_raw_handler_list, list) {
+ if (handler->protocols & mask && handler->encode) {
+ ret = handler->encode(protocol, scancode, events, max);
+ if (ret >= 0 || ret == -ENOBUFS)
+ break;
+ }
+ }
+ mutex_unlock(&ir_raw_handler_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ir_raw_encode_scancode);
+
/*
* Used to (un)register raw event clients
*/
@@ -269,13 +502,18 @@ int ir_raw_event_register(struct rc_dev *dev)
dev->change_protocol = change_protocol;
INIT_KFIFO(dev->raw->kfifo);
- spin_lock_init(&dev->raw->lock);
- dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
- "rc%u", dev->minor);
+ /*
+ * raw transmitters do not need any event registration
+ * because the event is coming from userspace
+ */
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+ dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
+ "rc%u", dev->minor);
- if (IS_ERR(dev->raw->thread)) {
- rc = PTR_ERR(dev->raw->thread);
- goto out;
+ if (IS_ERR(dev->raw->thread)) {
+ rc = PTR_ERR(dev->raw->thread);
+ goto out;
+ }
}
mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index 63dace8198b0..62195af24fbe 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -17,15 +17,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <media/rc-core.h>
#define DRIVER_NAME "rc-loopback"
@@ -176,12 +173,47 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
return 0;
}
+static int loop_set_wakeup_filter(struct rc_dev *dev,
+ struct rc_scancode_filter *sc)
+{
+ static const unsigned int max = 512;
+ struct ir_raw_event *raw;
+ int ret;
+ int i;
+
+ /* fine to disable filter */
+ if (!sc->mask)
+ return 0;
+
+ /* encode the specified filter and loop it back */
+ raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return -ENOMEM;
+
+ ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc->data, raw, max);
+ /* still loop back the partial raw IR even if it's incomplete */
+ if (ret == -ENOBUFS)
+ ret = max;
+ if (ret >= 0) {
+ /* do the loopback */
+ for (i = 0; i < ret; ++i)
+ ir_raw_event_store(dev, &raw[i]);
+ ir_raw_event_handle(dev);
+
+ ret = 0;
+ }
+
+ kfree(raw);
+
+ return ret;
+}
+
static int __init loop_init(void)
{
struct rc_dev *rc;
int ret;
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc) {
printk(KERN_ERR DRIVER_NAME ": rc_dev allocation failed\n");
return -ENOMEM;
@@ -194,8 +226,9 @@ static int __init loop_init(void)
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
- rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protocols = RC_BIT_ALL;
+ rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rc->allowed_wakeup_protocols = RC_BIT_ALL_IR_ENCODER;
+ rc->encode_wakeup = true;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
rc->max_timeout = UINT_MAX;
@@ -209,6 +242,7 @@ static int __init loop_init(void)
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
+ rc->s_wakeup_filter = loop_set_wakeup_filter;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index dedaf38c5ff6..2424946740e6 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -724,6 +724,72 @@ void rc_keydown_notimeout(struct rc_dev *dev, enum rc_type protocol,
}
EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
+/**
+ * rc_validate_filter() - checks that the scancode and mask are valid and
+ * provides sensible defaults
+ * @dev: the struct rc_dev descriptor of the device
+ * @filter: the scancode and mask
+ * @return: 0 or -EINVAL if the filter is not valid
+ */
+static int rc_validate_filter(struct rc_dev *dev,
+ struct rc_scancode_filter *filter)
+{
+ static u32 masks[] = {
+ [RC_TYPE_RC5] = 0x1f7f,
+ [RC_TYPE_RC5X_20] = 0x1f7f3f,
+ [RC_TYPE_RC5_SZ] = 0x2fff,
+ [RC_TYPE_SONY12] = 0x1f007f,
+ [RC_TYPE_SONY15] = 0xff007f,
+ [RC_TYPE_SONY20] = 0x1fff7f,
+ [RC_TYPE_JVC] = 0xffff,
+ [RC_TYPE_NEC] = 0xffff,
+ [RC_TYPE_NECX] = 0xffffff,
+ [RC_TYPE_NEC32] = 0xffffffff,
+ [RC_TYPE_SANYO] = 0x1fffff,
+ [RC_TYPE_RC6_0] = 0xffff,
+ [RC_TYPE_RC6_6A_20] = 0xfffff,
+ [RC_TYPE_RC6_6A_24] = 0xffffff,
+ [RC_TYPE_RC6_6A_32] = 0xffffffff,
+ [RC_TYPE_RC6_MCE] = 0xffff7fff,
+ [RC_TYPE_SHARP] = 0x1fff,
+ };
+ u32 s = filter->data;
+ enum rc_type protocol = dev->wakeup_protocol;
+
+ switch (protocol) {
+ case RC_TYPE_NECX:
+ if ((((s >> 16) ^ ~(s >> 8)) & 0xff) == 0)
+ return -EINVAL;
+ break;
+ case RC_TYPE_NEC32:
+ if ((((s >> 24) ^ ~(s >> 16)) & 0xff) == 0)
+ return -EINVAL;
+ break;
+ case RC_TYPE_RC6_MCE:
+ if ((s & 0xffff0000) != 0x800f0000)
+ return -EINVAL;
+ break;
+ case RC_TYPE_RC6_6A_32:
+ if ((s & 0xffff0000) == 0x800f0000)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ filter->data &= masks[protocol];
+ filter->mask &= masks[protocol];
+
+ /*
+ * If we have to raw encode the IR for wakeup, we cannot have a mask
+ */
+ if (dev->encode_wakeup &&
+ filter->mask != 0 && filter->mask != masks[protocol])
+ return -EINVAL;
+
+ return 0;
+}
+
int rc_open(struct rc_dev *rdev)
{
int rval = 0;
@@ -796,7 +862,7 @@ static const struct {
{ RC_BIT_OTHER, "other", NULL },
{ RC_BIT_UNKNOWN, "unknown", NULL },
{ RC_BIT_RC5 |
- RC_BIT_RC5X, "rc-5", "ir-rc5-decoder" },
+ RC_BIT_RC5X_20, "rc-5", "ir-rc5-decoder" },
{ RC_BIT_NEC |
RC_BIT_NECX |
RC_BIT_NEC32, "nec", "ir-nec-decoder" },
@@ -830,11 +896,6 @@ struct rc_filter_attribute {
};
#define to_rc_filter_attr(a) container_of(a, struct rc_filter_attribute, attr)
-#define RC_PROTO_ATTR(_name, _mode, _show, _store, _type) \
- struct rc_filter_attribute dev_attr_##_name = { \
- .attr = __ATTR(_name, _mode, _show, _store), \
- .type = (_type), \
- }
#define RC_FILTER_ATTR(_name, _mode, _show, _store, _type, _mask) \
struct rc_filter_attribute dev_attr_##_name = { \
.attr = __ATTR(_name, _mode, _show, _store), \
@@ -860,13 +921,13 @@ static bool lirc_is_present(void)
}
/**
- * show_protocols() - shows the current/wakeup IR protocol(s)
+ * show_protocols() - shows the current IR protocol(s)
* @device: the device descriptor
* @mattr: the device attribute struct
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type(s).
- * it is trigged by reading /sys/class/rc/rc?/[wakeup_]protocols.
+ * it is trigged by reading /sys/class/rc/rc?/protocols.
* It returns the protocol names of supported protocols.
* Enabled protocols are printed in brackets.
*
@@ -877,7 +938,6 @@ static ssize_t show_protocols(struct device *device,
struct device_attribute *mattr, char *buf)
{
struct rc_dev *dev = to_rc_dev(device);
- struct rc_filter_attribute *fattr = to_rc_filter_attr(mattr);
u64 allowed, enabled;
char *tmp = buf;
int i;
@@ -891,15 +951,10 @@ static ssize_t show_protocols(struct device *device,
mutex_lock(&dev->lock);
- if (fattr->type == RC_FILTER_NORMAL) {
- enabled = dev->enabled_protocols;
- allowed = dev->allowed_protocols;
- if (dev->raw && !allowed)
- allowed = ir_raw_get_allowed_protocols();
- } else {
- enabled = dev->enabled_wakeup_protocols;
- allowed = dev->allowed_wakeup_protocols;
- }
+ enabled = dev->enabled_protocols;
+ allowed = dev->allowed_protocols;
+ if (dev->raw && !allowed)
+ allowed = ir_raw_get_allowed_protocols();
mutex_unlock(&dev->lock);
@@ -997,7 +1052,6 @@ static int parse_protocol_change(u64 *protocols, const char *buf)
}
static void ir_raw_load_modules(u64 *protocols)
-
{
u64 available;
int i, ret;
@@ -1030,8 +1084,7 @@ static void ir_raw_load_modules(u64 *protocols)
if (!(*protocols & proto_names[i].type & ~available))
continue;
- pr_err("Loaded IR protocol module %s, \
- but protocol %s still not available\n",
+ pr_err("Loaded IR protocol module %s, but protocol %s still not available\n",
proto_names[i].module_name,
proto_names[i].name);
*protocols &= ~proto_names[i].type;
@@ -1058,11 +1111,8 @@ static ssize_t store_protocols(struct device *device,
const char *buf, size_t len)
{
struct rc_dev *dev = to_rc_dev(device);
- struct rc_filter_attribute *fattr = to_rc_filter_attr(mattr);
u64 *current_protocols;
- int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
struct rc_scancode_filter *filter;
- int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
u64 old_protocols, new_protocols;
ssize_t rc;
@@ -1073,21 +1123,11 @@ static ssize_t store_protocols(struct device *device,
if (!atomic_read(&dev->initialized))
return -ERESTARTSYS;
- if (fattr->type == RC_FILTER_NORMAL) {
- IR_dprintk(1, "Normal protocol change requested\n");
- current_protocols = &dev->enabled_protocols;
- change_protocol = dev->change_protocol;
- filter = &dev->scancode_filter;
- set_filter = dev->s_filter;
- } else {
- IR_dprintk(1, "Wakeup protocol change requested\n");
- current_protocols = &dev->enabled_wakeup_protocols;
- change_protocol = dev->change_wakeup_protocol;
- filter = &dev->scancode_wakeup_filter;
- set_filter = dev->s_wakeup_filter;
- }
+ IR_dprintk(1, "Normal protocol change requested\n");
+ current_protocols = &dev->enabled_protocols;
+ filter = &dev->scancode_filter;
- if (!change_protocol) {
+ if (!dev->change_protocol) {
IR_dprintk(1, "Protocol switching not supported\n");
return -EINVAL;
}
@@ -1100,7 +1140,7 @@ static ssize_t store_protocols(struct device *device,
if (rc < 0)
goto out;
- rc = change_protocol(dev, &new_protocols);
+ rc = dev->change_protocol(dev, &new_protocols);
if (rc < 0) {
IR_dprintk(1, "Error setting protocols to 0x%llx\n",
(long long)new_protocols);
@@ -1123,16 +1163,16 @@ static ssize_t store_protocols(struct device *device,
* Try setting the same filter with the new protocol (if any).
* Fall back to clearing the filter.
*/
- if (set_filter && filter->mask) {
+ if (dev->s_filter && filter->mask) {
if (new_protocols)
- rc = set_filter(dev, filter);
+ rc = dev->s_filter(dev, filter);
else
rc = -1;
if (rc < 0) {
filter->data = 0;
filter->mask = 0;
- set_filter(dev, filter);
+ dev->s_filter(dev, filter);
}
}
@@ -1221,7 +1261,6 @@ static ssize_t store_filter(struct device *device,
int ret;
unsigned long val;
int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
- u64 *enabled_protocols;
/* Device is being removed */
if (!dev)
@@ -1236,11 +1275,9 @@ static ssize_t store_filter(struct device *device,
if (fattr->type == RC_FILTER_NORMAL) {
set_filter = dev->s_filter;
- enabled_protocols = &dev->enabled_protocols;
filter = &dev->scancode_filter;
} else {
set_filter = dev->s_wakeup_filter;
- enabled_protocols = &dev->enabled_wakeup_protocols;
filter = &dev->scancode_wakeup_filter;
}
@@ -1255,7 +1292,22 @@ static ssize_t store_filter(struct device *device,
else
new_filter.data = val;
- if (!*enabled_protocols && val) {
+ if (fattr->type == RC_FILTER_WAKEUP) {
+ /*
+ * Refuse to set a filter unless a protocol is enabled
+ * and the filter is valid for that protocol
+ */
+ if (dev->wakeup_protocol != RC_TYPE_UNKNOWN)
+ ret = rc_validate_filter(dev, &new_filter);
+ else
+ ret = -EINVAL;
+
+ if (ret != 0)
+ goto unlock;
+ }
+
+ if (fattr->type == RC_FILTER_NORMAL && !dev->enabled_protocols &&
+ val) {
/* refuse to set a filter unless a protocol is enabled */
ret = -EINVAL;
goto unlock;
@@ -1272,6 +1324,182 @@ unlock:
return (ret < 0) ? ret : len;
}
+/*
+ * This is the list of all variants of all protocols, which is used by
+ * the wakeup_protocols sysfs entry. In the protocols sysfs entry some
+ * some protocols are grouped together (e.g. nec = nec + necx + nec32).
+ *
+ * For wakeup we need to know the exact protocol variant so the hardware
+ * can be programmed exactly what to expect.
+ */
+static const char * const proto_variant_names[] = {
+ [RC_TYPE_UNKNOWN] = "unknown",
+ [RC_TYPE_OTHER] = "other",
+ [RC_TYPE_RC5] = "rc-5",
+ [RC_TYPE_RC5X_20] = "rc-5x-20",
+ [RC_TYPE_RC5_SZ] = "rc-5-sz",
+ [RC_TYPE_JVC] = "jvc",
+ [RC_TYPE_SONY12] = "sony-12",
+ [RC_TYPE_SONY15] = "sony-15",
+ [RC_TYPE_SONY20] = "sony-20",
+ [RC_TYPE_NEC] = "nec",
+ [RC_TYPE_NECX] = "nec-x",
+ [RC_TYPE_NEC32] = "nec-32",
+ [RC_TYPE_SANYO] = "sanyo",
+ [RC_TYPE_MCE_KBD] = "mce_kbd",
+ [RC_TYPE_RC6_0] = "rc-6-0",
+ [RC_TYPE_RC6_6A_20] = "rc-6-6a-20",
+ [RC_TYPE_RC6_6A_24] = "rc-6-6a-24",
+ [RC_TYPE_RC6_6A_32] = "rc-6-6a-32",
+ [RC_TYPE_RC6_MCE] = "rc-6-mce",
+ [RC_TYPE_SHARP] = "sharp",
+ [RC_TYPE_XMP] = "xmp",
+ [RC_TYPE_CEC] = "cec",
+};
+
+/**
+ * show_wakeup_protocols() - shows the wakeup IR protocol
+ * @device: the device descriptor
+ * @mattr: the device attribute struct
+ * @buf: a pointer to the output buffer
+ *
+ * This routine is a callback routine for input read the IR protocol type(s).
+ * it is trigged by reading /sys/class/rc/rc?/wakeup_protocols.
+ * It returns the protocol names of supported protocols.
+ * The enabled protocols are printed in brackets.
+ *
+ * dev->lock is taken to guard against races between device
+ * registration, store_protocols and show_protocols.
+ */
+static ssize_t show_wakeup_protocols(struct device *device,
+ struct device_attribute *mattr,
+ char *buf)
+{
+ struct rc_dev *dev = to_rc_dev(device);
+ u64 allowed;
+ enum rc_type enabled;
+ char *tmp = buf;
+ int i;
+
+ /* Device is being removed */
+ if (!dev)
+ return -EINVAL;
+
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
+ mutex_lock(&dev->lock);
+
+ allowed = dev->allowed_wakeup_protocols;
+ enabled = dev->wakeup_protocol;
+
+ mutex_unlock(&dev->lock);
+
+ IR_dprintk(1, "%s: allowed - 0x%llx, enabled - %d\n",
+ __func__, (long long)allowed, enabled);
+
+ for (i = 0; i < ARRAY_SIZE(proto_variant_names); i++) {
+ if (allowed & (1ULL << i)) {
+ if (i == enabled)
+ tmp += sprintf(tmp, "[%s] ",
+ proto_variant_names[i]);
+ else
+ tmp += sprintf(tmp, "%s ",
+ proto_variant_names[i]);
+ }
+ }
+
+ if (tmp != buf)
+ tmp--;
+ *tmp = '\n';
+
+ return tmp + 1 - buf;
+}
+
+/**
+ * store_wakeup_protocols() - changes the wakeup IR protocol(s)
+ * @device: the device descriptor
+ * @mattr: the device attribute struct
+ * @buf: a pointer to the input buffer
+ * @len: length of the input buffer
+ *
+ * This routine is for changing the IR protocol type.
+ * It is trigged by writing to /sys/class/rc/rc?/wakeup_protocols.
+ * Returns @len on success or a negative error code.
+ *
+ * dev->lock is taken to guard against races between device
+ * registration, store_protocols and show_protocols.
+ */
+static ssize_t store_wakeup_protocols(struct device *device,
+ struct device_attribute *mattr,
+ const char *buf, size_t len)
+{
+ struct rc_dev *dev = to_rc_dev(device);
+ enum rc_type protocol;
+ ssize_t rc;
+ u64 allowed;
+ int i;
+
+ /* Device is being removed */
+ if (!dev)
+ return -EINVAL;
+
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
+ mutex_lock(&dev->lock);
+
+ allowed = dev->allowed_wakeup_protocols;
+
+ if (sysfs_streq(buf, "none")) {
+ protocol = RC_TYPE_UNKNOWN;
+ } else {
+ for (i = 0; i < ARRAY_SIZE(proto_variant_names); i++) {
+ if ((allowed & (1ULL << i)) &&
+ sysfs_streq(buf, proto_variant_names[i])) {
+ protocol = i;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(proto_variant_names)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (dev->encode_wakeup) {
+ u64 mask = 1ULL << protocol;
+
+ ir_raw_load_modules(&mask);
+ if (!mask) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ if (dev->wakeup_protocol != protocol) {
+ dev->wakeup_protocol = protocol;
+ IR_dprintk(1, "Wakeup protocol changed to %d\n", protocol);
+
+ if (protocol == RC_TYPE_RC6_MCE)
+ dev->scancode_wakeup_filter.data = 0x800f0000;
+ else
+ dev->scancode_wakeup_filter.data = 0;
+ dev->scancode_wakeup_filter.mask = 0;
+
+ rc = dev->s_wakeup_filter(dev, &dev->scancode_wakeup_filter);
+ if (rc == 0)
+ rc = len;
+ } else {
+ rc = len;
+ }
+
+out:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
static void rc_dev_release(struct device *device)
{
struct rc_dev *dev = to_rc_dev(device);
@@ -1301,10 +1529,9 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static RC_PROTO_ATTR(protocols, S_IRUGO | S_IWUSR,
- show_protocols, store_protocols, RC_FILTER_NORMAL);
-static RC_PROTO_ATTR(wakeup_protocols, S_IRUGO | S_IWUSR,
- show_protocols, store_protocols, RC_FILTER_WAKEUP);
+static DEVICE_ATTR(protocols, 0644, show_protocols, store_protocols);
+static DEVICE_ATTR(wakeup_protocols, 0644, show_wakeup_protocols,
+ store_wakeup_protocols);
static RC_FILTER_ATTR(filter, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_NORMAL, false);
static RC_FILTER_ATTR(filter_mask, S_IRUGO|S_IWUSR,
@@ -1315,7 +1542,7 @@ static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_WAKEUP, true);
static struct attribute *rc_dev_protocol_attrs[] = {
- &dev_attr_protocols.attr.attr,
+ &dev_attr_protocols.attr,
NULL,
};
@@ -1323,15 +1550,6 @@ static struct attribute_group rc_dev_protocol_attr_grp = {
.attrs = rc_dev_protocol_attrs,
};
-static struct attribute *rc_dev_wakeup_protocol_attrs[] = {
- &dev_attr_wakeup_protocols.attr.attr,
- NULL,
-};
-
-static struct attribute_group rc_dev_wakeup_protocol_attr_grp = {
- .attrs = rc_dev_wakeup_protocol_attrs,
-};
-
static struct attribute *rc_dev_filter_attrs[] = {
&dev_attr_filter.attr.attr,
&dev_attr_filter_mask.attr.attr,
@@ -1345,6 +1563,7 @@ static struct attribute_group rc_dev_filter_attr_grp = {
static struct attribute *rc_dev_wakeup_filter_attrs[] = {
&dev_attr_wakeup_filter.attr.attr,
&dev_attr_wakeup_filter_mask.attr.attr,
+ &dev_attr_wakeup_protocols.attr,
NULL,
};
@@ -1357,7 +1576,7 @@ static struct device_type rc_dev_type = {
.uevent = rc_dev_uevent,
};
-struct rc_dev *rc_allocate_device(void)
+struct rc_dev *rc_allocate_device(enum rc_driver_type type)
{
struct rc_dev *dev;
@@ -1365,25 +1584,31 @@ struct rc_dev *rc_allocate_device(void)
if (!dev)
return NULL;
- dev->input_dev = input_allocate_device();
- if (!dev->input_dev) {
- kfree(dev);
- return NULL;
- }
+ if (type != RC_DRIVER_IR_RAW_TX) {
+ dev->input_dev = input_allocate_device();
+ if (!dev->input_dev) {
+ kfree(dev);
+ return NULL;
+ }
- dev->input_dev->getkeycode = ir_getkeycode;
- dev->input_dev->setkeycode = ir_setkeycode;
- input_set_drvdata(dev->input_dev, dev);
+ dev->input_dev->getkeycode = ir_getkeycode;
+ dev->input_dev->setkeycode = ir_setkeycode;
+ input_set_drvdata(dev->input_dev, dev);
- spin_lock_init(&dev->rc_map.lock);
- spin_lock_init(&dev->keylock);
+ setup_timer(&dev->timer_keyup, ir_timer_keyup,
+ (unsigned long)dev);
+
+ spin_lock_init(&dev->rc_map.lock);
+ spin_lock_init(&dev->keylock);
+ }
mutex_init(&dev->lock);
- setup_timer(&dev->timer_keyup, ir_timer_keyup, (unsigned long)dev);
dev->dev.type = &rc_dev_type;
dev->dev.class = &rc_class;
device_initialize(&dev->dev);
+ dev->driver_type = type;
+
__module_get(THIS_MODULE);
return dev;
}
@@ -1410,7 +1635,8 @@ static void devm_rc_alloc_release(struct device *dev, void *res)
rc_free_device(*(struct rc_dev **)res);
}
-struct rc_dev *devm_rc_allocate_device(struct device *dev)
+struct rc_dev *devm_rc_allocate_device(struct device *dev,
+ enum rc_driver_type type)
{
struct rc_dev **dr, *rc;
@@ -1418,7 +1644,7 @@ struct rc_dev *devm_rc_allocate_device(struct device *dev)
if (!dr)
return NULL;
- rc = rc_allocate_device();
+ rc = rc_allocate_device(type);
if (!rc) {
devres_free(dr);
return NULL;
@@ -1433,16 +1659,12 @@ struct rc_dev *devm_rc_allocate_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_rc_allocate_device);
-int rc_register_device(struct rc_dev *dev)
+static int rc_setup_rx_device(struct rc_dev *dev)
{
- static bool raw_init = false; /* raw decoders loaded? */
- struct rc_map *rc_map;
- const char *path;
- int attr = 0;
- int minor;
int rc;
+ struct rc_map *rc_map;
- if (!dev || !dev->map_name)
+ if (!dev->map_name)
return -EINVAL;
rc_map = rc_map_get(dev->map_name);
@@ -1451,6 +1673,19 @@ int rc_register_device(struct rc_dev *dev)
if (!rc_map || !rc_map->scan || rc_map->size == 0)
return -EINVAL;
+ rc = ir_setkeytable(dev, rc_map);
+ if (rc)
+ return rc;
+
+ if (dev->change_protocol) {
+ u64 rc_type = (1ll << rc_map->rc_type);
+
+ rc = dev->change_protocol(dev, &rc_type);
+ if (rc < 0)
+ goto out_table;
+ dev->enabled_protocols = rc_type;
+ }
+
set_bit(EV_KEY, dev->input_dev->evbit);
set_bit(EV_REP, dev->input_dev->evbit);
set_bit(EV_MSC, dev->input_dev->evbit);
@@ -1460,6 +1695,61 @@ int rc_register_device(struct rc_dev *dev)
if (dev->close)
dev->input_dev->close = ir_close;
+ /*
+ * Default delay of 250ms is too short for some protocols, especially
+ * since the timeout is currently set to 250ms. Increase it to 500ms,
+ * to avoid wrong repetition of the keycodes. Note that this must be
+ * set after the call to input_register_device().
+ */
+ dev->input_dev->rep[REP_DELAY] = 500;
+
+ /*
+ * As a repeat event on protocols like RC-5 and NEC take as long as
+ * 110/114ms, using 33ms as a repeat period is not the right thing
+ * to do.
+ */
+ dev->input_dev->rep[REP_PERIOD] = 125;
+
+ dev->input_dev->dev.parent = &dev->dev;
+ memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id));
+ dev->input_dev->phys = dev->input_phys;
+ dev->input_dev->name = dev->input_name;
+
+ /* rc_open will be called here */
+ rc = input_register_device(dev->input_dev);
+ if (rc)
+ goto out_table;
+
+ return 0;
+
+out_table:
+ ir_free_table(&dev->rc_map);
+
+ return rc;
+}
+
+static void rc_free_rx_device(struct rc_dev *dev)
+{
+ if (!dev || dev->driver_type == RC_DRIVER_IR_RAW_TX)
+ return;
+
+ ir_free_table(&dev->rc_map);
+
+ input_unregister_device(dev->input_dev);
+ dev->input_dev = NULL;
+}
+
+int rc_register_device(struct rc_dev *dev)
+{
+ static bool raw_init; /* 'false' default value, raw decoders loaded? */
+ const char *path;
+ int attr = 0;
+ int minor;
+ int rc;
+
+ if (!dev)
+ return -EINVAL;
+
minor = ida_simple_get(&rc_ida, 0, RC_DEV_MAX, GFP_KERNEL);
if (minor < 0)
return minor;
@@ -1470,89 +1760,51 @@ int rc_register_device(struct rc_dev *dev)
atomic_set(&dev->initialized, 0);
dev->dev.groups = dev->sysfs_groups;
- dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
+ dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
if (dev->s_filter)
dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;
if (dev->s_wakeup_filter)
dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp;
- if (dev->change_wakeup_protocol)
- dev->sysfs_groups[attr++] = &rc_dev_wakeup_protocol_attr_grp;
dev->sysfs_groups[attr++] = NULL;
rc = device_add(&dev->dev);
if (rc)
goto out_unlock;
- rc = ir_setkeytable(dev, rc_map);
- if (rc)
- goto out_dev;
-
- dev->input_dev->dev.parent = &dev->dev;
- memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id));
- dev->input_dev->phys = dev->input_phys;
- dev->input_dev->name = dev->input_name;
-
- rc = input_register_device(dev->input_dev);
- if (rc)
- goto out_table;
-
- /*
- * Default delay of 250ms is too short for some protocols, especially
- * since the timeout is currently set to 250ms. Increase it to 500ms,
- * to avoid wrong repetition of the keycodes. Note that this must be
- * set after the call to input_register_device().
- */
- dev->input_dev->rep[REP_DELAY] = 500;
-
- /*
- * As a repeat event on protocols like RC-5 and NEC take as long as
- * 110/114ms, using 33ms as a repeat period is not the right thing
- * to do.
- */
- dev->input_dev->rep[REP_PERIOD] = 125;
-
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
dev_info(&dev->dev, "%s as %s\n",
dev->input_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- if (dev->driver_type == RC_DRIVER_IR_RAW) {
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+ rc = rc_setup_rx_device(dev);
+ if (rc)
+ goto out_dev;
+ }
+
+ if (dev->driver_type == RC_DRIVER_IR_RAW ||
+ dev->driver_type == RC_DRIVER_IR_RAW_TX) {
if (!raw_init) {
request_module_nowait("ir-lirc-codec");
raw_init = true;
}
rc = ir_raw_event_register(dev);
if (rc < 0)
- goto out_input;
- }
-
- if (dev->change_protocol) {
- u64 rc_type = (1ll << rc_map->rc_type);
- rc = dev->change_protocol(dev, &rc_type);
- if (rc < 0)
- goto out_raw;
- dev->enabled_protocols = rc_type;
+ goto out_rx;
}
/* Allow the RC sysfs nodes to be accessible */
atomic_set(&dev->initialized, 1);
- IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n",
+ IR_dprintk(1, "Registered rc%u (driver: %s)\n",
dev->minor,
- dev->driver_name ? dev->driver_name : "unknown",
- rc_map->name ? rc_map->name : "unknown",
- dev->driver_type == RC_DRIVER_IR_RAW ? "raw" : "cooked");
+ dev->driver_name ? dev->driver_name : "unknown");
return 0;
-out_raw:
- if (dev->driver_type == RC_DRIVER_IR_RAW)
- ir_raw_event_unregister(dev);
-out_input:
- input_unregister_device(dev->input_dev);
- dev->input_dev = NULL;
-out_table:
- ir_free_table(&dev->rc_map);
+out_rx:
+ rc_free_rx_device(dev);
out_dev:
device_del(&dev->dev);
out_unlock:
@@ -1598,12 +1850,7 @@ void rc_unregister_device(struct rc_dev *dev)
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_event_unregister(dev);
- /* Freeing the table should also call the stop callback */
- ir_free_table(&dev->rc_map);
- IR_dprintk(1, "Freed keycode table\n");
-
- input_unregister_device(dev->input_dev);
- dev->input_dev = NULL;
+ rc_free_rx_device(dev);
device_del(&dev->dev);
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 2784f5dae398..56d43be2756b 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -39,10 +39,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <asm/unaligned.h>
@@ -945,7 +941,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
int ret;
u16 prod = le16_to_cpu(rr3->udev->descriptor.idProduct);
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc)
return NULL;
@@ -960,8 +956,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
usb_to_input_id(rr3->udev, &rc->input_id);
rc->dev.parent = dev;
rc->priv = rr3;
- rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protocols = RC_BIT_ALL;
+ rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
rc->timeout = US_TO_NS(redrat3_get_timeout(rr3));
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 436bd58b5f05..923fb2299553 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -137,6 +137,7 @@ struct serial_ir {
ktime_t lastkt;
struct rc_dev *rcdev;
struct platform_device *pdev;
+ struct timer_list timeout_timer;
unsigned int freq;
unsigned int duty_cycle;
@@ -395,9 +396,14 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
frbwrite(data, !(dcd ^ sense));
serial_ir.lastkt = kt;
last_dcd = dcd;
- ir_raw_event_handle(serial_ir.rcdev);
}
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
+
+ mod_timer(&serial_ir.timeout_timer,
+ jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+
+ ir_raw_event_handle(serial_ir.rcdev);
+
return IRQ_HANDLED;
}
@@ -471,6 +477,16 @@ static int hardware_init_port(void)
return 0;
}
+static void serial_ir_timeout(unsigned long arg)
+{
+ DEFINE_IR_RAW_EVENT(ev);
+
+ ev.timeout = true;
+ ev.duration = serial_ir.rcdev->timeout;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ir_raw_event_handle(serial_ir.rcdev);
+}
+
static int serial_ir_probe(struct platform_device *dev)
{
int i, nlow, nhigh, result;
@@ -500,6 +516,9 @@ static int serial_ir_probe(struct platform_device *dev)
return -EBUSY;
}
+ setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
+ (unsigned long)&serial_ir);
+
result = hardware_init_port();
if (result < 0)
return result;
@@ -738,7 +757,7 @@ static int __init serial_ir_init_module(void)
if (result)
return result;
- rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev);
+ rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW);
if (!rcdev) {
result = -ENOMEM;
goto serial_cleanup;
@@ -777,11 +796,12 @@ static int __init serial_ir_init_module(void)
rcdev->open = serial_ir_open;
rcdev->close = serial_ir_close;
rcdev->dev.parent = &serial_ir.pdev->dev;
- rcdev->driver_type = RC_DRIVER_IR_RAW;
- rcdev->allowed_protocols = RC_BIT_ALL;
+ rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rcdev->driver_name = KBUILD_MODNAME;
rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rcdev->rx_resolution = 250000;
serial_ir.rcdev = rcdev;
@@ -797,6 +817,7 @@ serial_cleanup:
static void __exit serial_ir_exit_module(void)
{
+ del_timer_sync(&serial_ir.timeout_timer);
rc_unregister_device(serial_ir.rcdev);
serial_ir_exit();
}
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 1fa0c9d1c508..f0d7190e3919 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -235,7 +235,7 @@ static int st_rc_probe(struct platform_device *pdev)
if (!rc_dev)
return -ENOMEM;
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
return -ENOMEM;
@@ -290,8 +290,7 @@ static int st_rc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rc_dev);
st_rc_hardware_init(rc_dev);
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
/* rx sampling rate is 10Mhz */
rdev->rx_resolution = 100;
rdev->timeout = US_TO_NS(MAX_SYMB_TIME);
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 53f9b0af358a..b09c45abb5f3 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -25,10 +25,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/device.h>
@@ -291,7 +287,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
struct device *dev = sz->dev;
int ret;
- rdev = rc_allocate_device();
+ rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev) {
dev_err(dev, "remote dev allocation failed\n");
goto out;
@@ -308,8 +304,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
usb_to_input_id(sz->usbdev, &rdev->input_id);
rdev->dev.parent = dev;
rdev->priv = sz;
- rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index eaadc081760a..25b006167810 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -212,7 +212,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
goto exit_clkdisable_clk;
}
- ir->rc = rc_allocate_device();
+ ir->rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir->rc) {
dev_err(dev, "failed to allocate device\n");
ret = -ENOMEM;
@@ -229,8 +229,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
ir->map_name = of_get_property(dn, "linux,rc-map-name", NULL);
ir->rc->map_name = ir->map_name ?: RC_MAP_EMPTY;
ir->rc->dev.parent = dev;
- ir->rc->driver_type = RC_DRIVER_IR_RAW;
- ir->rc->allowed_protocols = RC_BIT_ALL;
+ ir->rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
ir->rc->rx_resolution = SUNXI_IR_SAMPLE;
ir->rc->timeout = MS_TO_NS(SUNXI_IR_TIMEOUT);
ir->rc->driver_name = SUNXI_IR_DEV;
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index bc214e2b3a36..23be7702e2df 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
@@ -205,7 +201,7 @@ static int ttusbir_probe(struct usb_interface *intf,
int altsetting = -1;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!tt || !rc) {
ret = -ENOMEM;
goto out;
@@ -317,12 +313,14 @@ static int ttusbir_probe(struct usb_interface *intf,
rc->input_phys = tt->phys;
usb_to_input_id(tt->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
- rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protocols = RC_BIT_ALL;
+ rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
rc->priv = tt;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_TT_1500;
- rc->timeout = MS_TO_NS(100);
+ rc->min_timeout = 1;
+ rc->timeout = IR_DEFAULT_TIMEOUT;
+ rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
+
/*
* The precision is NS_PER_BIT, but since every 8th bit can be
* overwritten with garbage the accuracy is at best 2 * NS_PER_BIT.
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 78491ed48d92..dc1c8305ad23 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -34,10 +34,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -194,7 +190,6 @@ enum wbcir_txstate {
#define WBCIR_NAME "Winbond CIR"
#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */
#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */
-#define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */
#define WAKEUP_IOMEM_LEN 0x10 /* Wake-Up I/O Reg Len */
#define EHFUNC_IOMEM_LEN 0x10 /* Enhanced Func I/O Reg Len */
#define SP_IOMEM_LEN 0x08 /* Serial Port 3 (IR) Reg Len */
@@ -225,10 +220,6 @@ struct wbcir_data {
u32 txcarrier;
};
-static enum wbcir_protocol protocol = IR_PROTOCOL_RC6;
-module_param(protocol, uint, 0444);
-MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command (0 = RC5, 1 = NEC, 2 = RC6A, default)");
-
static bool invert; /* default = 0 */
module_param(invert, bool, 0444);
MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
@@ -237,15 +228,6 @@ static bool txandrx; /* default = 0 */
module_param(txandrx, bool, 0444);
MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
-static unsigned int wake_sc = 0x800F040C;
-module_param(wake_sc, uint, 0644);
-MODULE_PARM_DESC(wake_sc, "Scancode of the power-on IR command");
-
-static unsigned int wake_rc6mode = 6;
-module_param(wake_rc6mode, uint, 0644);
-MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command (0 = 0, 6 = 6A, default)");
-
-
/*****************************************************************************
*
@@ -696,138 +678,153 @@ wbcir_shutdown(struct pnp_dev *device)
{
struct device *dev = &device->dev;
struct wbcir_data *data = pnp_get_drvdata(device);
+ struct rc_dev *rc = data->dev;
bool do_wake = true;
u8 match[11];
u8 mask[11];
u8 rc6_csl = 0;
+ u8 proto;
+ u32 wake_sc = rc->scancode_wakeup_filter.data;
+ u32 mask_sc = rc->scancode_wakeup_filter.mask;
int i;
memset(match, 0, sizeof(match));
memset(mask, 0, sizeof(mask));
- if (wake_sc == INVALID_SCANCODE || !device_may_wakeup(dev)) {
+ if (!mask_sc || !device_may_wakeup(dev)) {
do_wake = false;
goto finish;
}
- switch (protocol) {
- case IR_PROTOCOL_RC5:
- if (wake_sc > 0xFFF) {
- do_wake = false;
- dev_err(dev, "RC5 - Invalid wake scancode\n");
- break;
- }
-
+ switch (rc->wakeup_protocol) {
+ case RC_TYPE_RC5:
/* Mask = 13 bits, ex toggle */
- mask[0] = 0xFF;
- mask[1] = 0x17;
+ mask[0] = (mask_sc & 0x003f);
+ mask[0] |= (mask_sc & 0x0300) >> 2;
+ mask[1] = (mask_sc & 0x1c00) >> 10;
+ if (mask_sc & 0x0040) /* 2nd start bit */
+ match[1] |= 0x10;
- match[0] = (wake_sc & 0x003F); /* 6 command bits */
- match[0] |= (wake_sc & 0x0180) >> 1; /* 2 address bits */
- match[1] = (wake_sc & 0x0E00) >> 9; /* 3 address bits */
- if (!(wake_sc & 0x0040)) /* 2nd start bit */
+ match[0] = (wake_sc & 0x003F); /* 6 command bits */
+ match[0] |= (wake_sc & 0x0300) >> 2; /* 2 address bits */
+ match[1] = (wake_sc & 0x1c00) >> 10; /* 3 address bits */
+ if (!(wake_sc & 0x0040)) /* 2nd start bit */
match[1] |= 0x10;
+ proto = IR_PROTOCOL_RC5;
break;
- case IR_PROTOCOL_NEC:
- if (wake_sc > 0xFFFFFF) {
- do_wake = false;
- dev_err(dev, "NEC - Invalid wake scancode\n");
- break;
- }
+ case RC_TYPE_NEC:
+ mask[1] = bitrev8(mask_sc);
+ mask[0] = mask[1];
+ mask[3] = bitrev8(mask_sc >> 8);
+ mask[2] = mask[3];
- mask[0] = mask[1] = mask[2] = mask[3] = 0xFF;
-
- match[1] = bitrev8((wake_sc & 0xFF));
+ match[1] = bitrev8(wake_sc);
match[0] = ~match[1];
+ match[3] = bitrev8(wake_sc >> 8);
+ match[2] = ~match[3];
- match[3] = bitrev8((wake_sc & 0xFF00) >> 8);
- if (wake_sc > 0xFFFF)
- match[2] = bitrev8((wake_sc & 0xFF0000) >> 16);
- else
- match[2] = ~match[3];
+ proto = IR_PROTOCOL_NEC;
+ break;
+
+ case RC_TYPE_NECX:
+ mask[1] = bitrev8(mask_sc);
+ mask[0] = mask[1];
+ mask[2] = bitrev8(mask_sc >> 8);
+ mask[3] = bitrev8(mask_sc >> 16);
+ match[1] = bitrev8(wake_sc);
+ match[0] = ~match[1];
+ match[2] = bitrev8(wake_sc >> 8);
+ match[3] = bitrev8(wake_sc >> 16);
+
+ proto = IR_PROTOCOL_NEC;
break;
- case IR_PROTOCOL_RC6:
+ case RC_TYPE_NEC32:
+ mask[0] = bitrev8(mask_sc);
+ mask[1] = bitrev8(mask_sc >> 8);
+ mask[2] = bitrev8(mask_sc >> 16);
+ mask[3] = bitrev8(mask_sc >> 24);
- if (wake_rc6mode == 0) {
- if (wake_sc > 0xFFFF) {
- do_wake = false;
- dev_err(dev, "RC6 - Invalid wake scancode\n");
- break;
- }
+ match[0] = bitrev8(wake_sc);
+ match[1] = bitrev8(wake_sc >> 8);
+ match[2] = bitrev8(wake_sc >> 16);
+ match[3] = bitrev8(wake_sc >> 24);
- /* Command */
- match[0] = wbcir_to_rc6cells(wake_sc >> 0);
- mask[0] = 0xFF;
- match[1] = wbcir_to_rc6cells(wake_sc >> 4);
- mask[1] = 0xFF;
-
- /* Address */
- match[2] = wbcir_to_rc6cells(wake_sc >> 8);
- mask[2] = 0xFF;
- match[3] = wbcir_to_rc6cells(wake_sc >> 12);
- mask[3] = 0xFF;
-
- /* Header */
- match[4] = 0x50; /* mode1 = mode0 = 0, ignore toggle */
- mask[4] = 0xF0;
- match[5] = 0x09; /* start bit = 1, mode2 = 0 */
- mask[5] = 0x0F;
-
- rc6_csl = 44;
-
- } else if (wake_rc6mode == 6) {
- i = 0;
-
- /* Command */
- match[i] = wbcir_to_rc6cells(wake_sc >> 0);
- mask[i++] = 0xFF;
- match[i] = wbcir_to_rc6cells(wake_sc >> 4);
- mask[i++] = 0xFF;
-
- /* Address + Toggle */
- match[i] = wbcir_to_rc6cells(wake_sc >> 8);
- mask[i++] = 0xFF;
- match[i] = wbcir_to_rc6cells(wake_sc >> 12);
- mask[i++] = 0x3F;
-
- /* Customer bits 7 - 0 */
- match[i] = wbcir_to_rc6cells(wake_sc >> 16);
- mask[i++] = 0xFF;
+ proto = IR_PROTOCOL_NEC;
+ break;
+
+ case RC_TYPE_RC6_0:
+ /* Command */
+ match[0] = wbcir_to_rc6cells(wake_sc >> 0);
+ mask[0] = wbcir_to_rc6cells(mask_sc >> 0);
+ match[1] = wbcir_to_rc6cells(wake_sc >> 4);
+ mask[1] = wbcir_to_rc6cells(mask_sc >> 4);
+
+ /* Address */
+ match[2] = wbcir_to_rc6cells(wake_sc >> 8);
+ mask[2] = wbcir_to_rc6cells(mask_sc >> 8);
+ match[3] = wbcir_to_rc6cells(wake_sc >> 12);
+ mask[3] = wbcir_to_rc6cells(mask_sc >> 12);
+
+ /* Header */
+ match[4] = 0x50; /* mode1 = mode0 = 0, ignore toggle */
+ mask[4] = 0xF0;
+ match[5] = 0x09; /* start bit = 1, mode2 = 0 */
+ mask[5] = 0x0F;
+
+ rc6_csl = 44;
+ proto = IR_PROTOCOL_RC6;
+ break;
+
+ case RC_TYPE_RC6_6A_24:
+ case RC_TYPE_RC6_6A_32:
+ case RC_TYPE_RC6_MCE:
+ i = 0;
+
+ /* Command */
+ match[i] = wbcir_to_rc6cells(wake_sc >> 0);
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 0);
+ match[i] = wbcir_to_rc6cells(wake_sc >> 4);
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 4);
+
+ /* Address + Toggle */
+ match[i] = wbcir_to_rc6cells(wake_sc >> 8);
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 8);
+ match[i] = wbcir_to_rc6cells(wake_sc >> 12);
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 12);
+
+ /* Customer bits 7 - 0 */
+ match[i] = wbcir_to_rc6cells(wake_sc >> 16);
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 16);
+
+ if (rc->wakeup_protocol == RC_TYPE_RC6_6A_20) {
+ rc6_csl = 52;
+ } else {
match[i] = wbcir_to_rc6cells(wake_sc >> 20);
- mask[i++] = 0xFF;
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 20);
- if (wake_sc & 0x80000000) {
+ if (rc->wakeup_protocol == RC_TYPE_RC6_6A_24) {
+ rc6_csl = 60;
+ } else {
/* Customer range bit and bits 15 - 8 */
match[i] = wbcir_to_rc6cells(wake_sc >> 24);
- mask[i++] = 0xFF;
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 24);
match[i] = wbcir_to_rc6cells(wake_sc >> 28);
- mask[i++] = 0xFF;
+ mask[i++] = wbcir_to_rc6cells(mask_sc >> 28);
rc6_csl = 76;
- } else if (wake_sc <= 0x007FFFFF) {
- rc6_csl = 60;
- } else {
- do_wake = false;
- dev_err(dev, "RC6 - Invalid wake scancode\n");
- break;
}
-
- /* Header */
- match[i] = 0x93; /* mode1 = mode0 = 1, submode = 0 */
- mask[i++] = 0xFF;
- match[i] = 0x0A; /* start bit = 1, mode2 = 1 */
- mask[i++] = 0x0F;
-
- } else {
- do_wake = false;
- dev_err(dev, "RC6 - Invalid wake mode\n");
}
+ /* Header */
+ match[i] = 0x93; /* mode1 = mode0 = 1, submode = 0 */
+ mask[i++] = 0xFF;
+ match[i] = 0x0A; /* start bit = 1, mode2 = 1 */
+ mask[i++] = 0x0F;
+ proto = IR_PROTOCOL_RC6;
break;
-
default:
do_wake = false;
break;
@@ -855,7 +852,8 @@ finish:
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x01, 0x07);
/* Set CEIR_EN */
- wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x01, 0x01);
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL,
+ (proto << 4) | 0x01, 0x31);
} else {
/* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */
@@ -875,6 +873,15 @@ finish:
disable_irq(data->irq);
}
+/*
+ * Wakeup handling is done on shutdown.
+ */
+static int
+wbcir_set_wakeup_filter(struct rc_dev *rc, struct rc_scancode_filter *filter)
+{
+ return 0;
+}
+
static int
wbcir_suspend(struct pnp_dev *device, pm_message_t state)
{
@@ -887,16 +894,11 @@ wbcir_suspend(struct pnp_dev *device, pm_message_t state)
static void
wbcir_init_hw(struct wbcir_data *data)
{
- u8 tmp;
-
/* Disable interrupts */
wbcir_set_irqmask(data, WBCIR_IRQ_NONE);
- /* Set PROT_SEL, RX_INV, Clear CEIR_EN (needed for the led) */
- tmp = protocol << 4;
- if (invert)
- tmp |= 0x08;
- outb(tmp, data->wbase + WBCIR_REG_WCEIR_CTL);
+ /* Set RX_INV, Clear CEIR_EN (needed for the led) */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, invert ? 8 : 0, 0x09);
/* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */
wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17);
@@ -1059,13 +1061,12 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
if (err)
goto exit_free_data;
- data->dev = rc_allocate_device();
+ data->dev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!data->dev) {
err = -ENOMEM;
goto exit_unregister_led;
}
- data->dev->driver_type = RC_DRIVER_IR_RAW;
data->dev->driver_name = DRVNAME;
data->dev->input_name = WBCIR_NAME;
data->dev->input_phys = "wbcir/cir0";
@@ -1083,7 +1084,15 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->dev.parent = &device->dev;
data->dev->timeout = MS_TO_NS(100);
data->dev->rx_resolution = US_TO_NS(2);
- data->dev->allowed_protocols = RC_BIT_ALL;
+ data->dev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ data->dev->allowed_wakeup_protocols = RC_BIT_NEC | RC_BIT_NECX |
+ RC_BIT_NEC32 | RC_BIT_RC5 | RC_BIT_RC6_0 |
+ RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
+ RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE;
+ data->dev->wakeup_protocol = RC_TYPE_RC6_MCE;
+ data->dev->scancode_wakeup_filter.data = 0x800f040c;
+ data->dev->scancode_wakeup_filter.mask = 0xffff7fff;
+ data->dev->s_wakeup_filter = wbcir_set_wakeup_filter;
err = rc_register_device(data->dev);
if (err)
@@ -1199,15 +1208,6 @@ wbcir_init(void)
{
int ret;
- switch (protocol) {
- case IR_PROTOCOL_RC5:
- case IR_PROTOCOL_NEC:
- case IR_PROTOCOL_RC6:
- break;
- default:
- pr_err("Invalid power-on protocol\n");
- }
-
ret = pnp_register_driver(&wbcir_driver);
if (ret)
pr_err("Unable to register driver\n");
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 00489a9df4e4..192b1c7740df 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "fc0011.h"
diff --git a/drivers/media/tuners/fc0012-priv.h b/drivers/media/tuners/fc0012-priv.h
index 1a86ce1d3fcf..0fbf0114bdcd 100644
--- a/drivers/media/tuners/fc0012-priv.h
+++ b/drivers/media/tuners/fc0012-priv.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _FC0012_PRIV_H_
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 30508f44e5f9..dcc323ffbde7 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "fc0012.h"
diff --git a/drivers/media/tuners/fc0012.h b/drivers/media/tuners/fc0012.h
index 4a23e418daf0..64d07a2adb2e 100644
--- a/drivers/media/tuners/fc0012.h
+++ b/drivers/media/tuners/fc0012.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _FC0012_H_
diff --git a/drivers/media/tuners/fc0013-priv.h b/drivers/media/tuners/fc0013-priv.h
index bfd49dedea22..2eeaca8abae5 100644
--- a/drivers/media/tuners/fc0013-priv.h
+++ b/drivers/media/tuners/fc0013-priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _FC0013_PRIV_H_
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index f7cf0e9e7c99..91dfa770a5cc 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "fc0013.h"
diff --git a/drivers/media/tuners/fc0013.h b/drivers/media/tuners/fc0013.h
index 8c34105c9383..4431e7ceb43d 100644
--- a/drivers/media/tuners/fc0013.h
+++ b/drivers/media/tuners/fc0013.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _FC0013_H_
diff --git a/drivers/media/tuners/fc001x-common.h b/drivers/media/tuners/fc001x-common.h
index 718818156934..3a96ff76c195 100644
--- a/drivers/media/tuners/fc001x-common.h
+++ b/drivers/media/tuners/fc001x-common.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _FC001X_COMMON_H_
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
index 6c3ef2181fcd..27e5bc1c3cb5 100644
--- a/drivers/media/tuners/it913x.c
+++ b/drivers/media/tuners/it913x.c
@@ -14,17 +14,14 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#include "it913x.h"
+#include <linux/platform_device.h>
#include <linux/regmap.h>
struct it913x_dev {
- struct i2c_client *client;
+ struct platform_device *pdev;
struct regmap *regmap;
struct dvb_frontend *fe;
u8 chip_ver:2;
@@ -39,13 +36,14 @@ struct it913x_dev {
static int it913x_init(struct dvb_frontend *fe)
{
struct it913x_dev *dev = fe->tuner_priv;
+ struct platform_device *pdev = dev->pdev;
int ret;
unsigned int utmp;
u8 iqik_m_cal, nv_val, buf[2];
static const u8 nv[] = {48, 32, 24, 16, 12, 8, 6, 4, 2};
unsigned long timeout;
- dev_dbg(&dev->client->dev, "role %u\n", dev->role);
+ dev_dbg(&pdev->dev, "role %u\n", dev->role);
ret = regmap_write(dev->regmap, 0x80ec4c, 0x68);
if (ret)
@@ -73,7 +71,7 @@ static int it913x_init(struct dvb_frontend *fe)
iqik_m_cal = 6;
break;
default:
- dev_err(&dev->client->dev, "unknown clock identifier %d\n", utmp);
+ dev_err(&pdev->dev, "unknown clock identifier %d\n", utmp);
goto err;
}
@@ -98,14 +96,14 @@ static int it913x_init(struct dvb_frontend *fe)
break;
}
- dev_dbg(&dev->client->dev, "r_fbc_m_bdry took %u ms, val %u\n",
+ dev_dbg(&pdev->dev, "r_fbc_m_bdry took %u ms, val %u\n",
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT), utmp);
dev->fn_min = dev->xtal * utmp;
dev->fn_min /= (dev->fdiv * nv_val);
dev->fn_min *= 1000;
- dev_dbg(&dev->client->dev, "fn_min %u\n", dev->fn_min);
+ dev_dbg(&pdev->dev, "fn_min %u\n", dev->fn_min);
/*
* Chip version BX never sets that flag so we just wait 50ms in that
@@ -125,7 +123,7 @@ static int it913x_init(struct dvb_frontend *fe)
break;
}
- dev_dbg(&dev->client->dev, "p_tsm_init_mode took %u ms, val %u\n",
+ dev_dbg(&pdev->dev, "p_tsm_init_mode took %u ms, val %u\n",
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT), utmp);
} else {
@@ -152,16 +150,17 @@ static int it913x_init(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&dev->client->dev, "failed %d\n", ret);
+ dev_dbg(&pdev->dev, "failed %d\n", ret);
return ret;
}
static int it913x_sleep(struct dvb_frontend *fe)
{
struct it913x_dev *dev = fe->tuner_priv;
+ struct platform_device *pdev = dev->pdev;
int ret, len;
- dev_dbg(&dev->client->dev, "role %u\n", dev->role);
+ dev_dbg(&pdev->dev, "role %u\n", dev->role);
dev->active = false;
@@ -178,7 +177,7 @@ static int it913x_sleep(struct dvb_frontend *fe)
else
len = 15;
- dev_dbg(&dev->client->dev, "role %u, len %d\n", dev->role, len);
+ dev_dbg(&pdev->dev, "role %u, len %d\n", dev->role, len);
ret = regmap_bulk_write(dev->regmap, 0x80ec02,
"\x3f\x1f\x3f\x3e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
@@ -210,13 +209,14 @@ static int it913x_sleep(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&dev->client->dev, "failed %d\n", ret);
+ dev_dbg(&pdev->dev, "failed %d\n", ret);
return ret;
}
static int it913x_set_params(struct dvb_frontend *fe)
{
struct it913x_dev *dev = fe->tuner_priv;
+ struct platform_device *pdev = dev->pdev;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
unsigned int utmp;
@@ -224,7 +224,7 @@ static int it913x_set_params(struct dvb_frontend *fe)
u16 iqik_m_cal, n_div;
u8 u8tmp, n, l_band, lna_band;
- dev_dbg(&dev->client->dev, "role=%u, frequency %u, bandwidth_hz %u\n",
+ dev_dbg(&pdev->dev, "role=%u, frequency %u, bandwidth_hz %u\n",
dev->role, c->frequency, c->bandwidth_hz);
if (!dev->active) {
@@ -290,7 +290,7 @@ static int it913x_set_params(struct dvb_frontend *fe)
pre_lo_freq += (u32) n << 13;
/* Frequency OMEGA_IQIK_M_CAL_MID*/
t_cal_freq = pre_lo_freq + (u32)iqik_m_cal;
- dev_dbg(&dev->client->dev, "t_cal_freq %u, pre_lo_freq %u\n",
+ dev_dbg(&pdev->dev, "t_cal_freq %u, pre_lo_freq %u\n",
t_cal_freq, pre_lo_freq);
if (c->frequency <= 440000000) {
@@ -369,7 +369,7 @@ static int it913x_set_params(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&dev->client->dev, "failed %d\n", ret);
+ dev_dbg(&pdev->dev, "failed %d\n", ret);
return ret;
}
@@ -385,40 +385,32 @@ static const struct dvb_tuner_ops it913x_tuner_ops = {
.set_params = it913x_set_params,
};
-static int it913x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int it913x_probe(struct platform_device *pdev)
{
- struct it913x_config *cfg = client->dev.platform_data;
- struct dvb_frontend *fe = cfg->fe;
+ struct it913x_platform_data *pdata = pdev->dev.platform_data;
+ struct dvb_frontend *fe = pdata->fe;
struct it913x_dev *dev;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
int ret;
char *chip_ver_str;
- static const struct regmap_config regmap_config = {
- .reg_bits = 24,
- .val_bits = 8,
- };
dev = kzalloc(sizeof(struct it913x_dev), GFP_KERNEL);
if (dev == NULL) {
ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
+ dev_err(&pdev->dev, "kzalloc() failed\n");
goto err;
}
- dev->client = client;
- dev->fe = cfg->fe;
- dev->chip_ver = cfg->chip_ver;
- dev->role = cfg->role;
- dev->regmap = regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(dev->regmap)) {
- ret = PTR_ERR(dev->regmap);
- goto err_kfree;
- }
+ dev->pdev = pdev;
+ dev->regmap = pdata->regmap;
+ dev->fe = pdata->fe;
+ dev->chip_ver = id->driver_data;
+ dev->role = pdata->role;
fe->tuner_priv = dev;
memcpy(&fe->ops.tuner_ops, &it913x_tuner_ops,
sizeof(struct dvb_tuner_ops));
- i2c_set_clientdata(client, dev);
+ platform_set_drvdata(pdev, dev);
if (dev->chip_ver == 1)
chip_ver_str = "AX";
@@ -427,41 +419,37 @@ static int it913x_probe(struct i2c_client *client,
else
chip_ver_str = "??";
- dev_info(&dev->client->dev, "ITE IT913X %s successfully attached\n",
- chip_ver_str);
- dev_dbg(&dev->client->dev, "chip_ver %u, role %u\n",
- dev->chip_ver, dev->role);
+ dev_info(&pdev->dev, "ITE IT913X %s successfully attached\n",
+ chip_ver_str);
+ dev_dbg(&pdev->dev, "chip_ver %u, role %u\n", dev->chip_ver, dev->role);
return 0;
-
-err_kfree:
- kfree(dev);
err:
- dev_dbg(&client->dev, "failed %d\n", ret);
+ dev_dbg(&pdev->dev, "failed %d\n", ret);
return ret;
}
-static int it913x_remove(struct i2c_client *client)
+static int it913x_remove(struct platform_device *pdev)
{
- struct it913x_dev *dev = i2c_get_clientdata(client);
+ struct it913x_dev *dev = platform_get_drvdata(pdev);
struct dvb_frontend *fe = dev->fe;
- dev_dbg(&client->dev, "\n");
+ dev_dbg(&pdev->dev, "\n");
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
- regmap_exit(dev->regmap);
kfree(dev);
return 0;
}
-static const struct i2c_device_id it913x_id_table[] = {
- {"it913x", 0},
- {}
+static const struct platform_device_id it913x_id_table[] = {
+ {"it9133ax-tuner", 1},
+ {"it9133bx-tuner", 2},
+ {},
};
-MODULE_DEVICE_TABLE(i2c, it913x_id_table);
+MODULE_DEVICE_TABLE(platform, it913x_id_table);
-static struct i2c_driver it913x_driver = {
+static struct platform_driver it913x_driver = {
.driver = {
.name = "it913x",
.suppress_bind_attrs = true,
@@ -471,7 +459,7 @@ static struct i2c_driver it913x_driver = {
.id_table = it913x_id_table,
};
-module_i2c_driver(it913x_driver);
+module_platform_driver(it913x_driver);
MODULE_DESCRIPTION("ITE IT913X silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/it913x.h b/drivers/media/tuners/it913x.h
index 33de53d4a566..226f657228fb 100644
--- a/drivers/media/tuners/it913x.h
+++ b/drivers/media/tuners/it913x.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef IT913X_H
@@ -25,26 +21,16 @@
#include "dvb_frontend.h"
-/*
- * I2C address
- * 0x38, 0x3a, 0x3c, 0x3e
+/**
+ * struct it913x_platform_data - Platform data for the it913x driver
+ * @regmap: af9033 demod driver regmap.
+ * @dvb_frontend: af9033 demod driver DVB frontend.
+ * @role: Chip role, single or dual configuration.
*/
-struct it913x_config {
- /*
- * pointer to DVB frontend
- */
- struct dvb_frontend *fe;
- /*
- * chip version
- * 1 = IT9135 AX
- * 2 = IT9135 BX
- */
- unsigned int chip_ver:2;
-
- /*
- * tuner role
- */
+struct it913x_platform_data {
+ struct regmap *regmap;
+ struct dvb_frontend *fe;
#define IT913X_ROLE_SINGLE 0
#define IT913X_ROLE_DUAL_MASTER 1
#define IT913X_ROLE_DUAL_SLAVE 2
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index c3f10925b0d4..a86c08114915 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/tuners/max2165.h b/drivers/media/tuners/max2165.h
index aadd9fea59e4..3120c54ec154 100644
--- a/drivers/media/tuners/max2165.h
+++ b/drivers/media/tuners/max2165.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MAX2165_H__
diff --git a/drivers/media/tuners/max2165_priv.h b/drivers/media/tuners/max2165_priv.h
index 91bbe021a08d..20d7751881a3 100644
--- a/drivers/media/tuners/max2165_priv.h
+++ b/drivers/media/tuners/max2165_priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MAX2165_PRIV_H__
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index aba580b4ac2c..12f545ef1243 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#include <linux/module.h>
diff --git a/drivers/media/tuners/mc44s803.h b/drivers/media/tuners/mc44s803.h
index 6b40df339284..f68133fb9760 100644
--- a/drivers/media/tuners/mc44s803.h
+++ b/drivers/media/tuners/mc44s803.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef MC44S803_H
diff --git a/drivers/media/tuners/mc44s803_priv.h b/drivers/media/tuners/mc44s803_priv.h
index 14a92780906d..52325395dfe7 100644
--- a/drivers/media/tuners/mc44s803_priv.h
+++ b/drivers/media/tuners/mc44s803_priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef MC44S803_PRIV_H
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 94077ea78dde..2e487f9a2cc3 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
/* In that file, frequencies are expressed in kiloHertz to avoid 32 bits overflows */
@@ -71,13 +67,24 @@ static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
// Writes a set of consecutive registers
static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
{
+ int rem, val_len;
+ u8 xfer_buf[16];
struct i2c_msg msg = {
- .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = len
+ .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf
};
- if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
- printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n",(int)len);
- return -EREMOTEIO;
+
+ for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
+ val_len = min_t(int, rem, priv->i2c_max_regs);
+ msg.len = 1 + val_len;
+ xfer_buf[0] = buf[0] + len - 1 - rem;
+ memcpy(&xfer_buf[1], &buf[1 + len - 1 - rem], val_len);
+
+ if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
+ return -EREMOTEIO;
+ }
}
+
return 0;
}
@@ -306,9 +313,16 @@ static int mt2060_init(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */
+ if (priv->sleep) {
+ ret = mt2060_writereg(priv, REG_MISC_CTRL, 0x20);
+ if (ret)
+ goto err_i2c_gate_ctrl;
+ }
+
ret = mt2060_writereg(priv, REG_VGAG,
(priv->cfg->clock_out << 6) | 0x33);
+err_i2c_gate_ctrl:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */
@@ -325,7 +339,13 @@ static int mt2060_sleep(struct dvb_frontend *fe)
ret = mt2060_writereg(priv, REG_VGAG,
(priv->cfg->clock_out << 6) | 0x30);
+ if (ret)
+ goto err_i2c_gate_ctrl;
+
+ if (priv->sleep)
+ ret = mt2060_writereg(priv, REG_MISC_CTRL, 0xe8);
+err_i2c_gate_ctrl:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */
@@ -369,6 +389,7 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter
priv->cfg = cfg;
priv->i2c = i2c;
priv->if1_freq = if1;
+ priv->i2c_max_regs = ~0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */
@@ -396,6 +417,98 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter
}
EXPORT_SYMBOL(mt2060_attach);
+static int mt2060_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mt2060_platform_data *pdata = client->dev.platform_data;
+ struct dvb_frontend *fe;
+ struct mt2060_priv *dev;
+ int ret;
+ u8 chip_id;
+
+ dev_dbg(&client->dev, "\n");
+
+ if (!pdata) {
+ dev_err(&client->dev, "Cannot proceed without platform data\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ fe = pdata->dvb_frontend;
+ dev->config.i2c_address = client->addr;
+ dev->config.clock_out = pdata->clock_out;
+ dev->cfg = &dev->config;
+ dev->i2c = client->adapter;
+ dev->if1_freq = pdata->if1 ? pdata->if1 : 1220;
+ dev->client = client;
+ dev->i2c_max_regs = pdata->i2c_write_max ? pdata->i2c_write_max - 1 : ~0;
+ dev->sleep = true;
+
+ ret = mt2060_readreg(dev, REG_PART_REV, &chip_id);
+ if (ret) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ dev_dbg(&client->dev, "chip id=%02x\n", chip_id);
+
+ if (chip_id != PART_REV) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Power on, calibrate, sleep */
+ ret = mt2060_writereg(dev, REG_MISC_CTRL, 0x20);
+ if (ret)
+ goto err;
+ mt2060_calibrate(dev);
+ ret = mt2060_writereg(dev, REG_MISC_CTRL, 0xe8);
+ if (ret)
+ goto err;
+
+ dev_info(&client->dev, "Microtune MT2060 successfully identified\n");
+ memcpy(&fe->ops.tuner_ops, &mt2060_tuner_ops, sizeof(fe->ops.tuner_ops));
+ fe->ops.tuner_ops.release = NULL;
+ fe->tuner_priv = dev;
+ i2c_set_clientdata(client, dev);
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mt2060_remove(struct i2c_client *client)
+{
+ dev_dbg(&client->dev, "\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id mt2060_id_table[] = {
+ {"mt2060", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mt2060_id_table);
+
+static struct i2c_driver mt2060_driver = {
+ .driver = {
+ .name = "mt2060",
+ .suppress_bind_attrs = true,
+ },
+ .probe = mt2060_probe,
+ .remove = mt2060_remove,
+ .id_table = mt2060_id_table,
+};
+
+module_i2c_driver(mt2060_driver);
+
MODULE_AUTHOR("Olivier DANET");
MODULE_DESCRIPTION("Microtune MT2060 silicon tuner driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/mt2060.h b/drivers/media/tuners/mt2060.h
index 6efed359a24f..cc534eb41378 100644
--- a/drivers/media/tuners/mt2060.h
+++ b/drivers/media/tuners/mt2060.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef MT2060_H
@@ -25,6 +21,29 @@
struct dvb_frontend;
struct i2c_adapter;
+/*
+ * I2C address
+ * 0x60, ...
+ */
+
+/**
+ * struct mt2060_platform_data - Platform data for the mt2060 driver
+ * @clock_out: Clock output setting. 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1.
+ * @if1: First IF used [MHz]. 0 defaults to 1220.
+ * @i2c_write_max: Maximum number of bytes I2C adapter can write at once.
+ * 0 defaults to maximum.
+ * @dvb_frontend: DVB frontend.
+ */
+
+struct mt2060_platform_data {
+ u8 clock_out;
+ u16 if1;
+ unsigned int i2c_write_max:5;
+ struct dvb_frontend *dvb_frontend;
+};
+
+
+/* configuration struct for mt2060_attach() */
struct mt2060_config {
u8 i2c_address;
u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
diff --git a/drivers/media/tuners/mt2060_priv.h b/drivers/media/tuners/mt2060_priv.h
index 2b60de6c707d..a6c931c1a5a7 100644
--- a/drivers/media/tuners/mt2060_priv.h
+++ b/drivers/media/tuners/mt2060_priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
*/
#ifndef MT2060_PRIV_H
@@ -95,10 +91,21 @@
struct mt2060_priv {
struct mt2060_config *cfg;
struct i2c_adapter *i2c;
+ struct i2c_client *client;
+ struct mt2060_config config;
+ u8 i2c_max_regs;
u32 frequency;
u16 if1_freq;
u8 fmfreq;
+
+ /*
+ * Use REG_MISC_CTRL register for sleep. That drops sleep power usage
+ * about 0.9W (huge!). Register bit meanings are unknown, so let it be
+ * disabled by default to avoid possible regression. Convert driver to
+ * i2c model in order to enable it.
+ */
+ bool sleep;
};
#endif
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index e7790e4afcfe..dd85d58fa8d0 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/tuners/mt2131.h b/drivers/media/tuners/mt2131.h
index 8267a6ae5d84..050da5540b15 100644
--- a/drivers/media/tuners/mt2131.h
+++ b/drivers/media/tuners/mt2131.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MT2131_H__
diff --git a/drivers/media/tuners/mt2131_priv.h b/drivers/media/tuners/mt2131_priv.h
index 91283b599cb3..d2b6f29182cc 100644
--- a/drivers/media/tuners/mt2131_priv.h
+++ b/drivers/media/tuners/mt2131_priv.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MT2131_PRIV_H__
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index b16dfa5e85fb..4081fd97c3b2 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/i2c.h>
diff --git a/drivers/media/tuners/mxl5007t.h b/drivers/media/tuners/mxl5007t.h
index e786d1f23ff1..273f61aeb8be 100644
--- a/drivers/media/tuners/mxl5007t.h
+++ b/drivers/media/tuners/mxl5007t.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MXL5007T_H__
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index a2c6cd1c3923..ee33b7cc7682 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "qt1010.h"
#include "qt1010_priv.h"
diff --git a/drivers/media/tuners/qt1010.h b/drivers/media/tuners/qt1010.h
index e3198f23437c..276e59e85032 100644
--- a/drivers/media/tuners/qt1010.h
+++ b/drivers/media/tuners/qt1010.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef QT1010_H
diff --git a/drivers/media/tuners/qt1010_priv.h b/drivers/media/tuners/qt1010_priv.h
index 2c42d3f01636..4cb78ecc8985 100644
--- a/drivers/media/tuners/qt1010_priv.h
+++ b/drivers/media/tuners/qt1010_priv.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef QT1010_PRIV_H
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 8357a3c08a70..c56fcf5d48e3 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "tda18218_priv.h"
diff --git a/drivers/media/tuners/tda18218.h b/drivers/media/tuners/tda18218.h
index 076b5f2e888d..9c0e3fd7ed7f 100644
--- a/drivers/media/tuners/tda18218.h
+++ b/drivers/media/tuners/tda18218.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef TDA18218_H
diff --git a/drivers/media/tuners/tda18218_priv.h b/drivers/media/tuners/tda18218_priv.h
index 285b77366c8d..9d04781966e7 100644
--- a/drivers/media/tuners/tda18218_priv.h
+++ b/drivers/media/tuners/tda18218_priv.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef TDA18218_PRIV_H
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index 2137eadf30f1..8400808f8f7f 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 03eef9b87a24..e30948e4ff87 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/tuners/xc4000.h b/drivers/media/tuners/xc4000.h
index 40517860cf67..8af93b63ff9e 100644
--- a/drivers/media/tuners/xc4000.h
+++ b/drivers/media/tuners/xc4000.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __XC4000_H__
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 796e7638b3b2..0345b274eccc 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/tuners/xc5000.h b/drivers/media/tuners/xc5000.h
index 336bd49eb09b..42bbec2409fd 100644
--- a/drivers/media/tuners/xc5000.h
+++ b/drivers/media/tuners/xc5000.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __XC5000_H__
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 6b469e8c4c6e..313f659f0bfb 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "au0828.h"
diff --git a/drivers/media/usb/au0828/au0828-cards.h b/drivers/media/usb/au0828/au0828-cards.h
index 48a1882c2b6b..1f4412ee6da4 100644
--- a/drivers/media/usb/au0828/au0828-cards.h
+++ b/drivers/media/usb/au0828/au0828-cards.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define AU0828_BOARD_UNKNOWN 0
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index bf53553d2624..739df61cec4f 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "au0828.h"
@@ -153,9 +149,11 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
}
/* clear enable_source, disable_source */
+ mutex_lock(&mdev->graph_mutex);
dev->media_dev->source_priv = NULL;
dev->media_dev->enable_source = NULL;
dev->media_dev->disable_source = NULL;
+ mutex_unlock(&mdev->graph_mutex);
media_device_unregister(dev->media_dev);
media_device_cleanup(dev->media_dev);
@@ -278,6 +276,7 @@ create_link:
}
}
+/* Callers should hold graph_mutex */
static int au0828_enable_source(struct media_entity *entity,
struct media_pipeline *pipe)
{
@@ -291,8 +290,6 @@ static int au0828_enable_source(struct media_entity *entity,
if (!mdev)
return -ENODEV;
- mutex_lock(&mdev->graph_mutex);
-
dev = mdev->source_priv;
/*
@@ -397,7 +394,7 @@ static int au0828_enable_source(struct media_entity *entity,
goto end;
}
- ret = __media_entity_pipeline_start(entity, pipe);
+ ret = __media_pipeline_start(entity, pipe);
if (ret) {
pr_err("Start Pipeline: %s->%s Error %d\n",
source->name, entity->name, ret);
@@ -419,12 +416,12 @@ static int au0828_enable_source(struct media_entity *entity,
dev->active_source->name, dev->active_sink->name,
dev->active_link_owner->name, ret);
end:
- mutex_unlock(&mdev->graph_mutex);
pr_debug("au0828_enable_source() end %s %d %d\n",
entity->name, entity->function, ret);
return ret;
}
+/* Callers should hold graph_mutex */
static void au0828_disable_source(struct media_entity *entity)
{
int ret = 0;
@@ -434,13 +431,10 @@ static void au0828_disable_source(struct media_entity *entity)
if (!mdev)
return;
- mutex_lock(&mdev->graph_mutex);
dev = mdev->source_priv;
- if (!dev->active_link) {
- ret = -ENODEV;
- goto end;
- }
+ if (!dev->active_link)
+ return;
/* link is active - stop pipeline from source (tuner) */
if (dev->active_link->sink->entity == dev->active_sink &&
@@ -450,8 +444,8 @@ static void au0828_disable_source(struct media_entity *entity)
* has active pipeline
*/
if (dev->active_link_owner != entity)
- goto end;
- __media_entity_pipeline_stop(entity);
+ return;
+ __media_pipeline_stop(entity);
ret = __media_entity_setup_link(dev->active_link, 0);
if (ret)
pr_err("Deactivate link Error %d\n", ret);
@@ -465,9 +459,6 @@ static void au0828_disable_source(struct media_entity *entity)
dev->active_source = NULL;
dev->active_sink = NULL;
}
-
-end:
- mutex_unlock(&mdev->graph_mutex);
}
#endif
@@ -549,9 +540,11 @@ static int au0828_media_device_register(struct au0828_dev *dev,
return ret;
}
/* set enable_source */
+ mutex_lock(&dev->media_dev->graph_mutex);
dev->media_dev->source_priv = (void *) dev;
dev->media_dev->enable_source = au0828_enable_source;
dev->media_dev->disable_source = au0828_disable_source;
+ mutex_unlock(&dev->media_dev->graph_mutex);
#endif
return 0;
}
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 0e174e860614..7e0c9b795e52 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "au0828.h"
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index ae7ac6669769..42b352bb4f02 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "au0828.h"
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 1e66e7828d8f..9ec919c68482 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -298,7 +298,7 @@ int au0828_rc_register(struct au0828_dev *dev)
return -ENODEV;
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir || !rc)
goto error;
@@ -343,7 +343,6 @@ int au0828_rc_register(struct au0828_dev *dev)
rc->input_id.product = le16_to_cpu(dev->usbdev->descriptor.idProduct);
rc->dev.parent = &dev->usbdev->dev;
rc->driver_name = "au0828-input";
- rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protocols = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 |
RC_BIT_RC5;
diff --git a/drivers/media/usb/au0828/au0828-reg.h b/drivers/media/usb/au0828/au0828-reg.h
index 2140f4cfb645..7aaf10739c8b 100644
--- a/drivers/media/usb/au0828/au0828-reg.h
+++ b/drivers/media/usb/au0828/au0828-reg.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* We'll start to rename these registers once we have a better
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 7a10eaa38f67..16f9125a985a 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -13,11 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
/* Developer Notes:
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index dd7b378fe070..88e59748ebc2 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
index cdef677d57ec..81f72c0b561f 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/media/usb/cpia2/cpia2.h
@@ -22,10 +22,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
****************************************************************************/
#ifndef __CPIA2_H__
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index 0310fd6ed103..431dd0b4b332 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Stripped of 2.4 stuff ready for main kernel submit by
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
diff --git a/drivers/media/usb/cpia2/cpia2_registers.h b/drivers/media/usb/cpia2/cpia2_registers.h
index 3bbec514a967..eebe46ea9c01 100644
--- a/drivers/media/usb/cpia2/cpia2_registers.h
+++ b/drivers/media/usb/cpia2/cpia2_registers.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
****************************************************************************/
#ifndef CPIA2_REGISTER_HEADER
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 37f9b30b0abc..1c7e16e5d88b 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Stripped of 2.4 stuff ready for main kernel submit by
* Alan Cox <alan@lxorguk.ukuu.org.uk>
****************************************************************************/
@@ -551,12 +547,10 @@ static int write_packet(struct usb_device *udev,
if (!registers || size <= 0)
return -EINVAL;
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(registers, size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf, registers, size);
-
ret = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
request,
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 9caea8344547..7122023e7004 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Stripped of 2.4 stuff ready for main kernel submit by
* Alan Cox <alan@lxorguk.ukuu.org.uk>
****************************************************************************/
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 0cced3e5b040..58de80bff4c7 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -50,6 +50,7 @@ config VIDEO_CX231XX_DVB
select DVB_LGDT3306A if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2165 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
---help---
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 29d450c15f29..509d9711d590 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx231xx.h"
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 8263c4b0610b..cf80842dfa08 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx231xx.h"
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 36bc25494319..f730fdbc9156 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -841,6 +841,33 @@ struct cx231xx_board cx231xx_boards[] = {
.gpio = NULL,
} },
},
+ [CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD] = {
+ .name = "Evromedia USB Full Hybrid Full HD",
+ .tuner_type = TUNER_ABSENT,
+ .demod_addr = 0x64, /* 0xc8 >> 1 */
+ .demod_i2c_master = I2C_1_MUX_3,
+ .has_dvb = 1,
+ .ir_i2c_master = I2C_0,
+ .norm = V4L2_STD_PAL,
+ .output_mode = OUT_MODE_VIP11,
+ .tuner_addr = 0x60, /* 0xc0 >> 1 */
+ .tuner_i2c_master = I2C_2,
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = 0,
+ .amux = CX231XX_AMUX_VIDEO,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -908,6 +935,8 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_OTG102},
{USB_DEVICE(USB_VID_TERRATEC, 0x00a6),
.driver_info = CX231XX_BOARD_TERRATEC_GRABBY},
+ {USB_DEVICE(0x1b80, 0xd3b2),
+ .driver_info = CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD},
{},
};
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 550ec932f931..46646ecd2dbc 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -355,7 +355,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
*/
if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
(ven_req->bRequest == 0x5) ||
- (ven_req->bRequest == 0x6))) {
+ (ven_req->bRequest == 0x6) ||
+
+ /* Internal Master 3 Bus can send
+ * and receive only 4 bytes per time
+ */
+ (ven_req->bRequest == 0x2))) {
unsend_size = 0;
pdata = ven_req->pBuff;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dif.h b/drivers/media/usb/cx231xx/cx231xx-dif.h
index 2b63c2f6d3b0..2b9eb9fd7c52 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dif.h
+++ b/drivers/media/usb/cx231xx/cx231xx-dif.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY, without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program, if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _CX231XX_DIF_H
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 2868546999ca..46427fd3b220 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -33,6 +33,7 @@
#include "s5h1411.h"
#include "lgdt3305.h"
#include "si2165.h"
+#include "si2168.h"
#include "mb86a20s.h"
#include "si2157.h"
#include "lgdt3306a.h"
@@ -949,6 +950,75 @@ static int dvb_init(struct cx231xx *dev)
&pv_tda18271_config);
break;
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
+ {
+ struct si2157_config si2157_config = {};
+ struct si2168_config si2168_config = {};
+ struct i2c_board_info info = {};
+ struct i2c_client *client;
+ struct i2c_adapter *adapter;
+
+ /* attach demodulator chip */
+ si2168_config.ts_mode = SI2168_TS_SERIAL; /* from *.inf file */
+ si2168_config.fe = &dev->dvb->frontend;
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.ts_clock_inv = true;
+
+ strlcpy(info.type, "si2168", sizeof(info.type));
+ info.addr = dev->board.demod_addr;
+ info.platform_data = &si2168_config;
+
+ request_module(info.type);
+ client = i2c_new_device(demod_i2c, &info);
+
+ if (client == NULL || client->dev.driver == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_demod = client;
+
+ /* attach tuner chip */
+ si2157_config.fe = dev->dvb->frontend;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ si2157_config.mdev = dev->media_dev;
+#endif
+ si2157_config.if_port = 1;
+ si2157_config.inversion = false;
+
+ memset(&info, 0, sizeof(info));
+ strlcpy(info.type, "si2157", sizeof(info.type));
+ info.addr = dev->board.tuner_addr;
+ info.platform_data = &si2157_config;
+
+ request_module(info.type);
+ client = i2c_new_device(tuner_i2c, &info);
+
+ if (client == NULL || client->dev.driver == NULL) {
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dev->cx231xx_reset_analog_tuner = NULL;
+ dev->dvb->i2c_client_tuner = client;
+ break;
+ }
default:
dev_err(dev->dev,
"%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 15d8d1b5f05c..6e80f3c573f3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -72,7 +72,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
memset(&info, 0, sizeof(struct i2c_board_info));
memset(&dev->init_data, 0, sizeof(dev->init_data));
- dev->init_data.rc_dev = rc_allocate_device();
+ dev->init_data.rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!dev->init_data.rc_dev)
return -ENOMEM;
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index 90c867683076..d9792ea4bbc6 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -78,6 +78,7 @@
#define CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx 20
#define CX231XX_BOARD_HAUPPAUGE_955Q 21
#define CX231XX_BOARD_TERRATEC_GRABBY 22
+#define CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD 23
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 524533d3eb29..0e4944b2b0f4 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -156,3 +156,11 @@ config DVB_USB_DVBSKY
select DVB_SP2 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the USB receivers from DVBSky.
+
+config DVB_USB_ZD1301
+ tristate "ZyDAS ZD1301"
+ depends on DVB_USB_V2
+ select DVB_ZD1301_DEMOD if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to support the ZyDAS ZD1301 DVB USB receiver.
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index f10d4df0eae5..969f68e55265 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -40,6 +40,9 @@ obj-$(CONFIG_DVB_USB_RTL28XXU) += dvb-usb-rtl28xxu.o
dvb-usb-dvbsky-objs := dvbsky.o
obj-$(CONFIG_DVB_USB_DVBSKY) += dvb-usb-dvbsky.o
+dvb-usb-zd1301-objs := zd1301.o
+obj-$(CONFIG_DVB_USB_ZD1301) += zd1301.o
+
ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
ccflags-y += -I$(srctree)/drivers/media/tuners
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 29011dfabb11..caa1e6101f58 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "af9015.h"
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.h b/drivers/media/usb/dvb-usb-v2/af9015.h
index 1db1bb0d57bc..2dd9231a8ece 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.h
+++ b/drivers/media/usb/dvb-usb-v2/af9015.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef AF9015_H
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index c673726d9b70..4df9486e19b9 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -335,14 +335,12 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
- (msg[0].addr == state->af9033_i2c_addr[1]) ||
- (state->chip_type == 0x9135)) {
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
- if (msg[0].addr == state->af9033_i2c_addr[1] ||
- msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
+ if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
@@ -396,14 +394,12 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
- (msg[0].addr == state->af9033_i2c_addr[1]) ||
- (state->chip_type == 0x9135)) {
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
- if (msg[0].addr == state->af9033_i2c_addr[1] ||
- msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
+ if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
@@ -496,7 +492,8 @@ static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
{
struct state *state = d_to_priv(d);
struct usb_interface *intf = d->intf;
- int ret, ts_mode_invalid;
+ int ret, i, ts_mode_invalid;
+ unsigned int utmp, eeprom_addr;
u8 tmp;
u8 wbuf[1] = { 1 };
u8 rbuf[4];
@@ -518,25 +515,48 @@ static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
state->prechip_version, state->chip_version, state->chip_type);
if (state->chip_type == 0x9135) {
- if (state->chip_version == 0x02)
+ if (state->chip_version == 0x02) {
*name = AF9035_FIRMWARE_IT9135_V2;
- else
+ utmp = 0x00461d;
+ } else {
*name = AF9035_FIRMWARE_IT9135_V1;
- state->eeprom_addr = EEPROM_BASE_IT9135;
+ utmp = 0x00461b;
+ }
+
+ /* Check if eeprom exists */
+ ret = af9035_rd_reg(d, utmp, &tmp);
+ if (ret < 0)
+ goto err;
+
+ if (tmp == 0x00) {
+ dev_dbg(&intf->dev, "no eeprom\n");
+ state->no_eeprom = true;
+ goto check_firmware_status;
+ }
+
+ eeprom_addr = EEPROM_BASE_IT9135;
} else if (state->chip_type == 0x9306) {
*name = AF9035_FIRMWARE_IT9303;
- state->eeprom_addr = EEPROM_BASE_IT9135;
+ state->no_eeprom = true;
+ goto check_firmware_status;
} else {
*name = AF9035_FIRMWARE_AF9035;
- state->eeprom_addr = EEPROM_BASE_AF9035;
+ eeprom_addr = EEPROM_BASE_AF9035;
+ }
+
+ /* Read and store eeprom */
+ for (i = 0; i < 256; i += 32) {
+ ret = af9035_rd_regs(d, eeprom_addr + i, &state->eeprom[i], 32);
+ if (ret < 0)
+ goto err;
}
+ dev_dbg(&intf->dev, "eeprom dump:\n");
+ for (i = 0; i < 256; i += 16)
+ dev_dbg(&intf->dev, "%*ph\n", 16, &state->eeprom[i]);
/* check for dual tuner mode */
- ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_TS_MODE, &tmp);
- if (ret < 0)
- goto err;
-
+ tmp = state->eeprom[EEPROM_TS_MODE];
ts_mode_invalid = 0;
switch (tmp) {
case 0:
@@ -560,7 +580,7 @@ static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
if (ts_mode_invalid)
dev_info(&intf->dev, "ts mode=%d not supported, defaulting to single tuner mode!", tmp);
-
+check_firmware_status:
ret = af9035_ctrl_msg(d, &req);
if (ret < 0)
goto err;
@@ -750,15 +770,11 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
goto err;
/* tell the slave I2C address */
- ret = af9035_rd_reg(d,
- state->eeprom_addr + EEPROM_2ND_DEMOD_ADDR,
- &tmp);
- if (ret < 0)
- goto err;
+ tmp = state->eeprom[EEPROM_2ND_DEMOD_ADDR];
- /* use default I2C address if eeprom has no address set */
+ /* Use default I2C address if eeprom has no address set */
if (!tmp)
- tmp = 0x3a;
+ tmp = 0x1d << 1; /* 8-bit format used by chip */
if ((state->chip_type == 0x9135) ||
(state->chip_type == 0x9306)) {
@@ -819,11 +835,11 @@ static int af9035_read_config(struct dvb_usb_device *d)
struct state *state = d_to_priv(d);
int ret, i;
u8 tmp;
- u16 tmp16, addr;
+ u16 tmp16;
- /* demod I2C "address" */
- state->af9033_i2c_addr[0] = 0x38;
- state->af9033_i2c_addr[1] = 0x3a;
+ /* Demod I2C address */
+ state->af9033_i2c_addr[0] = 0x1c;
+ state->af9033_i2c_addr[1] = 0x1d;
state->af9033_config[0].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[1].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB;
@@ -837,20 +853,16 @@ static int af9035_read_config(struct dvb_usb_device *d)
if (state->chip_version == 0x02) {
state->af9033_config[0].tuner = AF9033_TUNER_IT9135_60;
state->af9033_config[1].tuner = AF9033_TUNER_IT9135_60;
- tmp16 = 0x00461d; /* eeprom memory mapped location */
} else {
state->af9033_config[0].tuner = AF9033_TUNER_IT9135_38;
state->af9033_config[1].tuner = AF9033_TUNER_IT9135_38;
- tmp16 = 0x00461b; /* eeprom memory mapped location */
}
- /* check if eeprom exists */
- ret = af9035_rd_reg(d, tmp16, &tmp);
- if (ret < 0)
- goto err;
+ if (state->no_eeprom) {
+ /* Remote controller to NEC polling by default */
+ state->ir_mode = 0x05;
+ state->ir_type = 0x00;
- if (tmp == 0x00) {
- dev_dbg(&intf->dev, "no eeprom\n");
goto skip_eeprom;
}
} else if (state->chip_type == 0x9306) {
@@ -861,29 +873,25 @@ static int af9035_read_config(struct dvb_usb_device *d)
return 0;
}
+ /* Remote controller */
+ state->ir_mode = state->eeprom[EEPROM_IR_MODE];
+ state->ir_type = state->eeprom[EEPROM_IR_TYPE];
if (state->dual_mode) {
- /* read 2nd demodulator I2C address */
- ret = af9035_rd_reg(d,
- state->eeprom_addr + EEPROM_2ND_DEMOD_ADDR,
- &tmp);
- if (ret < 0)
- goto err;
-
+ /* Read 2nd demodulator I2C address. 8-bit format on eeprom */
+ tmp = state->eeprom[EEPROM_2ND_DEMOD_ADDR];
if (tmp)
- state->af9033_i2c_addr[1] = tmp;
+ state->af9033_i2c_addr[1] = tmp >> 1;
- dev_dbg(&intf->dev, "2nd demod I2C addr=%02x\n", tmp);
+ dev_dbg(&intf->dev, "2nd demod I2C addr=%02x\n",
+ state->af9033_i2c_addr[1]);
}
- addr = state->eeprom_addr;
-
for (i = 0; i < state->dual_mode + 1; i++) {
- /* tuner */
- ret = af9035_rd_reg(d, addr + EEPROM_1_TUNER_ID, &tmp);
- if (ret < 0)
- goto err;
+ unsigned int eeprom_offset = 0;
+ /* tuner */
+ tmp = state->eeprom[EEPROM_1_TUNER_ID + eeprom_offset];
dev_dbg(&intf->dev, "[%d]tuner=%02x\n", i, tmp);
/* tuner sanity check */
@@ -956,21 +964,13 @@ static int af9035_read_config(struct dvb_usb_device *d)
}
/* tuner IF frequency */
- ret = af9035_rd_reg(d, addr + EEPROM_1_IF_L, &tmp);
- if (ret < 0)
- goto err;
-
- tmp16 = tmp;
-
- ret = af9035_rd_reg(d, addr + EEPROM_1_IF_H, &tmp);
- if (ret < 0)
- goto err;
-
+ tmp = state->eeprom[EEPROM_1_IF_L + eeprom_offset];
+ tmp16 = tmp << 0;
+ tmp = state->eeprom[EEPROM_1_IF_H + eeprom_offset];
tmp16 |= tmp << 8;
-
dev_dbg(&intf->dev, "[%d]IF=%d\n", i, tmp16);
- addr += 0x10; /* shift for the 2nd tuner params */
+ eeprom_offset += 0x10; /* shift for the 2nd tuner params */
}
skip_eeprom:
@@ -1247,30 +1247,11 @@ static int af9035_frontend_detach(struct dvb_usb_adapter *adap)
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
struct usb_interface *intf = d->intf;
- int demod2;
dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
- /*
- * For dual tuner devices we have to resolve 2nd demod client, as there
- * is two different kind of tuner drivers; one is using I2C binding
- * and the other is using DVB attach/detach binding.
- */
- switch (state->af9033_config[adap->id].tuner) {
- case AF9033_TUNER_IT9135_38:
- case AF9033_TUNER_IT9135_51:
- case AF9033_TUNER_IT9135_52:
- case AF9033_TUNER_IT9135_60:
- case AF9033_TUNER_IT9135_61:
- case AF9033_TUNER_IT9135_62:
- demod2 = 2;
- break;
- default:
- demod2 = 1;
- }
-
if (adap->id == 1) {
- if (state->i2c_client[demod2])
+ if (state->i2c_client[1])
af9035_del_i2c_dev(d);
} else if (adap->id == 0) {
if (state->i2c_client[0])
@@ -1510,50 +1491,58 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
- {
- struct it913x_config it913x_config = {
- .fe = adap->fe[0],
- .chip_ver = 1,
- };
-
- if (state->dual_mode) {
- if (adap->id == 0)
- it913x_config.role = IT913X_ROLE_DUAL_MASTER;
- else
- it913x_config.role = IT913X_ROLE_DUAL_SLAVE;
- }
-
- ret = af9035_add_i2c_dev(d, "it913x",
- state->af9033_i2c_addr[adap->id] >> 1,
- &it913x_config, &d->i2c_adap);
- if (ret)
- goto err;
-
- fe = adap->fe[0];
- break;
- }
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
{
- struct it913x_config it913x_config = {
+ struct platform_device *pdev;
+ const char *name;
+ struct it913x_platform_data it913x_pdata = {
+ .regmap = state->af9033_config[adap->id].regmap,
.fe = adap->fe[0],
- .chip_ver = 2,
};
+ switch (state->af9033_config[adap->id].tuner) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ name = "it9133ax-tuner";
+ break;
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ name = "it9133bx-tuner";
+ break;
+ default:
+ ret = -ENODEV;
+ goto err;
+ }
+
if (state->dual_mode) {
if (adap->id == 0)
- it913x_config.role = IT913X_ROLE_DUAL_MASTER;
+ it913x_pdata.role = IT913X_ROLE_DUAL_MASTER;
else
- it913x_config.role = IT913X_ROLE_DUAL_SLAVE;
+ it913x_pdata.role = IT913X_ROLE_DUAL_SLAVE;
+ } else {
+ it913x_pdata.role = IT913X_ROLE_SINGLE;
}
- ret = af9035_add_i2c_dev(d, "it913x",
- state->af9033_i2c_addr[adap->id] >> 1,
- &it913x_config, &d->i2c_adap);
- if (ret)
+ request_module("%s", "it913x");
+ pdev = platform_device_register_data(&d->intf->dev, name,
+ PLATFORM_DEVID_AUTO,
+ &it913x_pdata,
+ sizeof(it913x_pdata));
+ if (IS_ERR(pdev) || !pdev->dev.driver) {
+ ret = -ENODEV;
+ goto err;
+ }
+ if (!try_module_get(pdev->dev.driver->owner)) {
+ platform_device_unregister(pdev);
+ ret = -ENODEV;
goto err;
+ }
+ state->platform_device_tuner[adap->id] = pdev;
fe = adap->fe[0];
break;
}
@@ -1675,12 +1664,6 @@ static int af9035_tuner_detach(struct dvb_usb_adapter *adap)
switch (state->af9033_config[adap->id].tuner) {
case AF9033_TUNER_TUA9001:
case AF9033_TUNER_FC2580:
- case AF9033_TUNER_IT9135_38:
- case AF9033_TUNER_IT9135_51:
- case AF9033_TUNER_IT9135_52:
- case AF9033_TUNER_IT9135_60:
- case AF9033_TUNER_IT9135_61:
- case AF9033_TUNER_IT9135_62:
if (adap->id == 1) {
if (state->i2c_client[3])
af9035_del_i2c_dev(d);
@@ -1688,6 +1671,23 @@ static int af9035_tuner_detach(struct dvb_usb_adapter *adap)
if (state->i2c_client[1])
af9035_del_i2c_dev(d);
}
+ break;
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ {
+ struct platform_device *pdev;
+
+ pdev = state->platform_device_tuner[adap->id];
+ if (pdev) {
+ module_put(pdev->dev.driver->owner);
+ platform_device_unregister(pdev);
+ }
+ break;
+ }
}
return 0;
@@ -1872,25 +1872,13 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
struct state *state = d_to_priv(d);
struct usb_interface *intf = d->intf;
- int ret;
- u8 tmp;
-
- ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_IR_MODE, &tmp);
- if (ret < 0)
- goto err;
- dev_dbg(&intf->dev, "ir_mode=%02x\n", tmp);
+ dev_dbg(&intf->dev, "ir_mode=%02x ir_type=%02x\n",
+ state->ir_mode, state->ir_type);
/* don't activate rc if in HID mode or if not available */
- if (tmp == 5) {
- ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_IR_TYPE,
- &tmp);
- if (ret < 0)
- goto err;
-
- dev_dbg(&intf->dev, "ir_type=%02x\n", tmp);
-
- switch (tmp) {
+ if (state->ir_mode == 0x05) {
+ switch (state->ir_type) {
case 0: /* NEC */
default:
rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX |
@@ -1910,11 +1898,6 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
}
return 0;
-
-err:
- dev_dbg(&intf->dev, "failed=%d\n", ret);
-
- return ret;
}
#else
#define af9035_get_rc_config NULL
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index 1f83c9218ad0..a76e6bf0ab1e 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -22,6 +22,7 @@
#ifndef AF9035_H
#define AF9035_H
+#include <linux/platform_device.h>
#include "dvb_usb.h"
#include "af9033.h"
#include "tua9001.h"
@@ -61,15 +62,19 @@ struct state {
u8 prechip_version;
u8 chip_version;
u16 chip_type;
+ u8 eeprom[256];
+ bool no_eeprom;
+ u8 ir_mode;
+ u8 ir_type;
u8 dual_mode:1;
u8 no_read:1;
- u16 eeprom_addr;
u8 af9033_i2c_addr[2];
struct af9033_config af9033_config[2];
struct af9033_ops ops;
#define AF9035_I2C_CLIENT_MAX 4
struct i2c_client *i2c_client[AF9035_I2C_CLIENT_MAX];
struct i2c_adapter *i2c_adapter_demod;
+ struct platform_device *platform_device_tuner[2];
};
static const u32 clock_lut_af9035[] = {
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index ae917c042a52..6795c0c609b1 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* TODO:
* - add smart card reader support for Conditional Access (CA)
*
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.h b/drivers/media/usb/dvb-usb-v2/anysee.h
index 3ca2bca4ebaf..393e2fce2aed 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.h
+++ b/drivers/media/usb/dvb-usb-v2/anysee.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* TODO:
* - add smart card reader support for Conditional Access (CA)
*
diff --git a/drivers/media/usb/dvb-usb-v2/au6610.c b/drivers/media/usb/dvb-usb-v2/au6610.c
index ae6a671b7fd5..6ee01cb64ca5 100644
--- a/drivers/media/usb/dvb-usb-v2/au6610.c
+++ b/drivers/media/usb/dvb-usb-v2/au6610.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "au6610.h"
diff --git a/drivers/media/usb/dvb-usb-v2/au6610.h b/drivers/media/usb/dvb-usb-v2/au6610.h
index ea337bfc00b1..aacfcc6fa0f5 100644
--- a/drivers/media/usb/dvb-usb-v2/au6610.h
+++ b/drivers/media/usb/dvb-usb-v2/au6610.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef AU6610_H
diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c
index f67b14bc32e3..e596031a708d 100644
--- a/drivers/media/usb/dvb-usb-v2/ce6230.c
+++ b/drivers/media/usb/dvb-usb-v2/ce6230.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "ce6230.h"
diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.h b/drivers/media/usb/dvb-usb-v2/ce6230.h
index 299e57e3390b..b25b3b938e49 100644
--- a/drivers/media/usb/dvb-usb-v2/ce6230.h
+++ b/drivers/media/usb/dvb-usb-v2/ce6230.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef CE6230_H
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index a8e6624fbe83..955fb0d07507 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -147,7 +147,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
if (!d->rc.map_name)
return 0;
- dev = rc_allocate_device();
+ dev = rc_allocate_device(d->rc.driver_type);
if (!dev) {
ret = -ENOMEM;
goto err;
@@ -162,7 +162,6 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
/* TODO: likely RC-core should took const char * */
dev->driver_name = (char *) d->props->driver_name;
dev->map_name = d->rc.map_name;
- dev->driver_type = d->rc.driver_type;
dev->allowed_protocols = d->rc.allowed_protos;
dev->change_protocol = d->rc.change_protocol;
dev->priv = d;
@@ -1013,8 +1012,8 @@ EXPORT_SYMBOL(dvb_usbv2_probe);
void dvb_usbv2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- const char *name = d->name;
- struct device dev = d->udev->dev;
+ const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
+ const char *drvname = d->name;
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
@@ -1024,8 +1023,9 @@ void dvb_usbv2_disconnect(struct usb_interface *intf)
dvb_usbv2_exit(d);
- dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
- KBUILD_MODNAME, name);
+ pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
+ KBUILD_MODNAME, drvname, devname);
+ kfree(devname);
}
EXPORT_SYMBOL(dvb_usbv2_disconnect);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 0636eac37bbb..5730760e4e93 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "dvb_usb.h"
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 0c2b377704ff..1db8aeef3655 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "ec168.h"
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.h b/drivers/media/usb/dvb-usb-v2/ec168.h
index 615a6569421f..704955bcaa10 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.h
+++ b/drivers/media/usb/dvb-usb-v2/ec168.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef EC168_H
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 5fea02672685..924adfdb660d 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -48,10 +48,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*
* see Documentation/dvb/README.dvb-usb for more information
*
@@ -99,9 +95,7 @@ static int dvb_usb_lme2510_debug;
} while (0)
#define deb_info(level, args...) lme_debug(dvb_usb_lme2510_debug, level, args)
#define debug_data_snipet(level, name, p) \
- deb_info(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \
- *p, *(p+1), *(p+2), *(p+3), *(p+4), \
- *(p+5), *(p+6), *(p+7));
+ deb_info(level, name" (%8phN)", p);
#define info(args...) pr_info(DVB_USB_LOG_PREFIX": "args)
module_param_named(debug, dvb_usb_lme2510_debug, int, 0644);
@@ -315,7 +309,7 @@ static void lme2510_int_response(struct urb *lme_urb)
{
struct dvb_usb_adapter *adap = lme_urb->context;
struct lme2510_state *st = adap_to_priv(adap);
- static u8 *ibuf, *rbuf;
+ u8 *ibuf, *rbuf;
int i = 0, offset;
u32 key;
u8 signal_lock = 0;
@@ -1002,8 +996,9 @@ static int lme_name(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d = adap_to_d(adap);
struct lme2510_state *st = adap_to_priv(adap);
const char *desc = d->name;
- char *fe_name[] = {"", " LG TDQY-P001F", " SHARP:BS2F7HZ7395",
- " SHARP:BS2F7HZ0194", " RS2000"};
+ static const char * const fe_name[] = {
+ "", " LG TDQY-P001F", " SHARP:BS2F7HZ7395",
+ " SHARP:BS2F7HZ0194", " RS2000"};
char *name = adap->fe[0]->ops.info.name;
strlcpy(name, desc, 128);
@@ -1124,7 +1119,7 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap_to_d(adap);
struct lme2510_state *st = adap_to_priv(adap);
- char *tun_msg[] = {"", "TDA8263", "IX2505V", "DVB_PLL_OPERA", "RS2000"};
+ static const char * const tun_msg[] = {"", "TDA8263", "IX2505V", "DVB_PLL_OPERA", "RS2000"};
int ret = 0;
switch (st->tuner_config) {
@@ -1178,10 +1173,7 @@ static int lme2510_powerup(struct dvb_usb_device *d, int onoff)
mutex_lock(&d->i2c_mutex);
- if (onoff)
- ret = lme2510_usb_talk(d, lnb_on, len, rbuf, rlen);
- else
- ret = lme2510_usb_talk(d, lnb_off, len, rbuf, rlen);
+ ret = lme2510_usb_talk(d, onoff ? lnb_on : lnb_off, len, rbuf, rlen);
st->i2c_talk_onoff = 1;
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 639e156e0c1b..f0ed37da73d4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-demod.h"
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index e6eae9d88e9f..9cb4972ce7a3 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MXL111SF_DEMOD_H__
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index 2180c13a6dcc..c66861c9342b 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-gpio.h"
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 16fa4d4daf88..af2c7bc8f301 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _DVB_USB_MXL111SF_GPIO_H_
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 6427137a09ef..ffb49c28b15a 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-i2c.h"
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index c486fe02f018..28877c7a8175 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _DVB_USB_MXL111SF_I2C_H_
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index 5b0191178f9f..ffb6e7c72f57 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-phy.h"
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index 25aa4a1ea755..0a61e8a56584 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _DVB_USB_MXL111SF_PHY_H_
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 1f4bfbcdbabb..ad3f806dcc7a 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _DVB_USB_MXL111SF_REG_H_
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index f84bef6034dc..240d736bf1bb 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-tuner.h"
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index e96d9a444ed1..11ea07a73271 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MXL111SF_TUNER_H__
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c583c638e468..e16ca07acf1d 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1778,7 +1778,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
/* load empty to enable rc */
if (!rc->map_name)
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_BIT_ALL;
+ rc->allowed_protos = RC_BIT_ALL_IR_DECODER;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->query = rtl2832u_rc_query;
rc->interval = 200;
diff --git a/drivers/media/usb/dvb-usb-v2/zd1301.c b/drivers/media/usb/dvb-usb-v2/zd1301.c
new file mode 100644
index 000000000000..d1eb4b7bc051
--- /dev/null
+++ b/drivers/media/usb/dvb-usb-v2/zd1301.c
@@ -0,0 +1,298 @@
+/*
+ * ZyDAS ZD1301 driver (USB interface)
+ *
+ * Copyright (C) 2015 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dvb_usb.h"
+#include "zd1301_demod.h"
+#include "mt2060.h"
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+struct zd1301_dev {
+ #define BUF_LEN 8
+ u8 buf[BUF_LEN]; /* bulk USB control message */
+ struct zd1301_demod_platform_data demod_pdata;
+ struct mt2060_platform_data mt2060_pdata;
+ struct platform_device *platform_device_demod;
+ struct i2c_client *i2c_client_tuner;
+};
+
+static int zd1301_ctrl_msg(struct dvb_usb_device *d, const u8 *wbuf,
+ unsigned int wlen, u8 *rbuf, unsigned int rlen)
+{
+ struct zd1301_dev *dev = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
+ int ret, actual_length;
+
+ mutex_lock(&d->usb_mutex);
+
+ memcpy(&dev->buf, wbuf, wlen);
+
+ dev_dbg(&intf->dev, ">>> %*ph\n", wlen, dev->buf);
+
+ ret = usb_bulk_msg(d->udev, usb_sndbulkpipe(d->udev, 0x04), dev->buf,
+ wlen, &actual_length, 1000);
+ if (ret) {
+ dev_err(&intf->dev, "1st usb_bulk_msg() failed %d\n", ret);
+ goto err_mutex_unlock;
+ }
+
+ if (rlen) {
+ ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, 0x83),
+ dev->buf, rlen, &actual_length, 1000);
+ if (ret) {
+ dev_err(&intf->dev,
+ "2nd usb_bulk_msg() failed %d\n", ret);
+ goto err_mutex_unlock;
+ }
+
+ dev_dbg(&intf->dev, "<<< %*ph\n", actual_length, dev->buf);
+
+ if (actual_length != rlen) {
+ /*
+ * Chip replies often with 3 byte len stub. On that case
+ * we have to query new reply.
+ */
+ dev_dbg(&intf->dev, "repeating reply message\n");
+
+ ret = usb_bulk_msg(d->udev,
+ usb_rcvbulkpipe(d->udev, 0x83),
+ dev->buf, rlen, &actual_length,
+ 1000);
+ if (ret) {
+ dev_err(&intf->dev,
+ "3rd usb_bulk_msg() failed %d\n", ret);
+ goto err_mutex_unlock;
+ }
+
+ dev_dbg(&intf->dev,
+ "<<< %*ph\n", actual_length, dev->buf);
+ }
+
+ memcpy(rbuf, dev->buf, rlen);
+ }
+
+err_mutex_unlock:
+ mutex_unlock(&d->usb_mutex);
+ return ret;
+}
+
+static int zd1301_demod_wreg(void *reg_priv, u16 reg, u8 val)
+{
+ struct dvb_usb_device *d = reg_priv;
+ struct usb_interface *intf = d->intf;
+ int ret;
+ u8 buf[7] = {0x07, 0x00, 0x03, 0x01,
+ (reg >> 0) & 0xff, (reg >> 8) & 0xff, val};
+
+ ret = zd1301_ctrl_msg(d, buf, 7, NULL, 0);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int zd1301_demod_rreg(void *reg_priv, u16 reg, u8 *val)
+{
+ struct dvb_usb_device *d = reg_priv;
+ struct usb_interface *intf = d->intf;
+ int ret;
+ u8 buf[7] = {0x07, 0x00, 0x04, 0x01,
+ (reg >> 0) & 0xff, (reg >> 8) & 0xff, 0};
+
+ ret = zd1301_ctrl_msg(d, buf, 7, buf, 7);
+ if (ret)
+ goto err;
+
+ *val = buf[6];
+
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int zd1301_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap_to_d(adap);
+ struct zd1301_dev *dev = adap_to_priv(adap);
+ struct usb_interface *intf = d->intf;
+ struct platform_device *pdev;
+ struct i2c_client *client;
+ struct i2c_board_info board_info;
+ struct i2c_adapter *adapter;
+ struct dvb_frontend *frontend;
+ int ret;
+
+ dev_dbg(&intf->dev, "\n");
+
+ /* Add platform demod */
+ dev->demod_pdata.reg_priv = d;
+ dev->demod_pdata.reg_read = zd1301_demod_rreg;
+ dev->demod_pdata.reg_write = zd1301_demod_wreg;
+ request_module("%s", "zd1301_demod");
+ pdev = platform_device_register_data(&intf->dev,
+ "zd1301_demod",
+ PLATFORM_DEVID_AUTO,
+ &dev->demod_pdata,
+ sizeof(dev->demod_pdata));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto err;
+ }
+ if (!pdev->dev.driver) {
+ ret = -ENODEV;
+ goto err;
+ }
+ if (!try_module_get(pdev->dev.driver->owner)) {
+ ret = -ENODEV;
+ goto err_platform_device_unregister;
+ }
+
+ adapter = zd1301_demod_get_i2c_adapter(pdev);
+ frontend = zd1301_demod_get_dvb_frontend(pdev);
+ if (!adapter || !frontend) {
+ ret = -ENODEV;
+ goto err_module_put_demod;
+ }
+
+ /* Add I2C tuner */
+ dev->mt2060_pdata.i2c_write_max = 9;
+ dev->mt2060_pdata.dvb_frontend = frontend;
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "mt2060", I2C_NAME_SIZE);
+ board_info.addr = 0x60;
+ board_info.platform_data = &dev->mt2060_pdata;
+ request_module("%s", "mt2060");
+ client = i2c_new_device(adapter, &board_info);
+ if (!client || !client->dev.driver) {
+ ret = -ENODEV;
+ goto err_module_put_demod;
+ }
+ if (!try_module_get(client->dev.driver->owner)) {
+ ret = -ENODEV;
+ goto err_i2c_unregister_device;
+ }
+
+ dev->platform_device_demod = pdev;
+ dev->i2c_client_tuner = client;
+ adap->fe[0] = frontend;
+
+ return 0;
+err_i2c_unregister_device:
+ i2c_unregister_device(client);
+err_module_put_demod:
+ module_put(pdev->dev.driver->owner);
+err_platform_device_unregister:
+ platform_device_unregister(pdev);
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int zd1301_frontend_detach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap_to_d(adap);
+ struct zd1301_dev *dev = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
+ struct platform_device *pdev;
+ struct i2c_client *client;
+
+ dev_dbg(&intf->dev, "\n");
+
+ client = dev->i2c_client_tuner;
+ pdev = dev->platform_device_demod;
+
+ /* Remove I2C tuner */
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ /* Remove platform demod */
+ if (pdev) {
+ module_put(pdev->dev.driver->owner);
+ platform_device_unregister(pdev);
+ }
+
+ return 0;
+}
+
+static int zd1301_streaming_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ struct dvb_usb_device *d = fe_to_d(fe);
+ struct usb_interface *intf = d->intf;
+ int ret;
+ u8 buf[3] = {0x03, 0x00, onoff ? 0x07 : 0x08};
+
+ dev_dbg(&intf->dev, "onoff=%d\n", onoff);
+
+ ret = zd1301_ctrl_msg(d, buf, 3, NULL, 0);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static const struct dvb_usb_device_properties zd1301_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct zd1301_dev),
+
+ .frontend_attach = zd1301_frontend_attach,
+ .frontend_detach = zd1301_frontend_detach,
+ .streaming_ctrl = zd1301_streaming_ctrl,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x81, 6, 21 * 188),
+ },
+ },
+};
+
+static const struct usb_device_id zd1301_id_table[] = {
+ {DVB_USB_DEVICE(USB_VID_ZYDAS, 0x13a1, &zd1301_props,
+ "ZyDAS ZD1301 reference design", NULL)},
+ {}
+};
+MODULE_DEVICE_TABLE(usb, zd1301_id_table);
+
+/* Usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver zd1301_usb_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = zd1301_id_table,
+ .probe = dvb_usbv2_probe,
+ .disconnect = dvb_usbv2_disconnect,
+ .suspend = dvb_usbv2_suspend,
+ .resume = dvb_usbv2_resume,
+ .reset_resume = dvb_usbv2_reset_resume,
+ .no_dynamic_id = 1,
+ .soft_unbind = 1,
+};
+module_usb_driver(zd1301_usb_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("ZyDAS ZD1301 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 9862d3e6b8e8..544bdf18fb2f 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "af9005.h"
diff --git a/drivers/media/usb/dvb-usb/af9005-remote.c b/drivers/media/usb/dvb-usb/af9005-remote.c
index 7e3961d0db6b..9b29ffa93075 100644
--- a/drivers/media/usb/dvb-usb/af9005-remote.c
+++ b/drivers/media/usb/dvb-usb/af9005-remote.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "af9005.h"
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index f5f476841aea..986763b1b2b3 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "af9005.h"
diff --git a/drivers/media/usb/dvb-usb/af9005.h b/drivers/media/usb/dvb-usb/af9005.h
index 6a2bf3de8456..a1eae0fa02ed 100644
--- a/drivers/media/usb/dvb-usb/af9005.h
+++ b/drivers/media/usb/dvb-usb/af9005.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* see Documentation/dvb/README.dvb-usb for more information
*/
#ifndef _DVB_USB_AF9005_H_
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 6404205560eb..6131aa7914a9 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "cinergyT2.h"
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index bbb10fab65bc..f9772ad0a2a5 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "cinergyT2.h"
diff --git a/drivers/media/usb/dvb-usb/cinergyT2.h b/drivers/media/usb/dvb-usb/cinergyT2.h
index 84efe03771eb..c04b819be160 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2.h
+++ b/drivers/media/usb/dvb-usb/cinergyT2.h
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _DVB_USB_CINERGYT2_H_
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 9b8771eb31d4..51620e02292f 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -59,23 +59,24 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
struct cxusb_state *st = d->priv;
- int ret, wo;
+ int ret;
if (1 + wlen > MAX_XFER_SIZE) {
warn("i2c wr: len=%d is too big!\n", wlen);
return -EOPNOTSUPP;
}
- wo = (rbuf == NULL || rlen == 0); /* write-only */
+ if (rlen > MAX_XFER_SIZE) {
+ warn("i2c rd: len=%d is too big!\n", rlen);
+ return -EOPNOTSUPP;
+ }
mutex_lock(&d->data_mutex);
st->data[0] = cmd;
memcpy(&st->data[1], wbuf, wlen);
- if (wo)
- ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
- else
- ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
- rbuf, rlen, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, st->data, rlen, 0);
+ if (!ret && rbuf && rlen)
+ memcpy(rbuf, st->data, rlen);
mutex_unlock(&d->data_mutex);
return ret;
@@ -450,209 +451,46 @@ static int cxusb_d680_dmb_streaming_ctrl(
}
}
-static int cxusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int cxusb_rc_query(struct dvb_usb_device *d)
{
- struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
u8 ircode[4];
- int i;
cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
- *event = 0;
- *state = REMOTE_NO_KEY_PRESSED;
-
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) {
- if (rc5_custom(&keymap[i]) == ircode[2] &&
- rc5_data(&keymap[i]) == ircode[3]) {
- *event = keymap[i].keycode;
- *state = REMOTE_KEY_PRESSED;
-
- return 0;
- }
- }
-
+ if (ircode[2] || ircode[3])
+ rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN,
+ RC_SCANCODE_RC5(ircode[2], ircode[3]), 0);
return 0;
}
-static int cxusb_bluebird2_rc_query(struct dvb_usb_device *d, u32 *event,
- int *state)
+static int cxusb_bluebird2_rc_query(struct dvb_usb_device *d)
{
- struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
u8 ircode[4];
- int i;
struct i2c_msg msg = { .addr = 0x6b, .flags = I2C_M_RD,
.buf = ircode, .len = 4 };
- *event = 0;
- *state = REMOTE_NO_KEY_PRESSED;
-
if (cxusb_i2c_xfer(&d->i2c_adap, &msg, 1) != 1)
return 0;
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) {
- if (rc5_custom(&keymap[i]) == ircode[1] &&
- rc5_data(&keymap[i]) == ircode[2]) {
- *event = keymap[i].keycode;
- *state = REMOTE_KEY_PRESSED;
-
- return 0;
- }
- }
-
+ if (ircode[1] || ircode[2])
+ rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN,
+ RC_SCANCODE_RC5(ircode[1], ircode[2]), 0);
return 0;
}
-static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d, u32 *event,
- int *state)
+static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d)
{
- struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
u8 ircode[2];
- int i;
-
- *event = 0;
- *state = REMOTE_NO_KEY_PRESSED;
if (cxusb_ctrl_msg(d, 0x10, NULL, 0, ircode, 2) < 0)
return 0;
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) {
- if (rc5_custom(&keymap[i]) == ircode[0] &&
- rc5_data(&keymap[i]) == ircode[1]) {
- *event = keymap[i].keycode;
- *state = REMOTE_KEY_PRESSED;
-
- return 0;
- }
- }
-
+ if (ircode[0] || ircode[1])
+ rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN,
+ RC_SCANCODE_RC5(ircode[0], ircode[1]), 0);
return 0;
}
-static struct rc_map_table rc_map_dvico_mce_table[] = {
- { 0xfe02, KEY_TV },
- { 0xfe0e, KEY_MP3 },
- { 0xfe1a, KEY_DVD },
- { 0xfe1e, KEY_FAVORITES },
- { 0xfe16, KEY_SETUP },
- { 0xfe46, KEY_POWER2 },
- { 0xfe0a, KEY_EPG },
- { 0xfe49, KEY_BACK },
- { 0xfe4d, KEY_MENU },
- { 0xfe51, KEY_UP },
- { 0xfe5b, KEY_LEFT },
- { 0xfe5f, KEY_RIGHT },
- { 0xfe53, KEY_DOWN },
- { 0xfe5e, KEY_OK },
- { 0xfe59, KEY_INFO },
- { 0xfe55, KEY_TAB },
- { 0xfe0f, KEY_PREVIOUSSONG },/* Replay */
- { 0xfe12, KEY_NEXTSONG }, /* Skip */
- { 0xfe42, KEY_ENTER }, /* Windows/Start */
- { 0xfe15, KEY_VOLUMEUP },
- { 0xfe05, KEY_VOLUMEDOWN },
- { 0xfe11, KEY_CHANNELUP },
- { 0xfe09, KEY_CHANNELDOWN },
- { 0xfe52, KEY_CAMERA },
- { 0xfe5a, KEY_TUNER }, /* Live */
- { 0xfe19, KEY_OPEN },
- { 0xfe0b, KEY_1 },
- { 0xfe17, KEY_2 },
- { 0xfe1b, KEY_3 },
- { 0xfe07, KEY_4 },
- { 0xfe50, KEY_5 },
- { 0xfe54, KEY_6 },
- { 0xfe48, KEY_7 },
- { 0xfe4c, KEY_8 },
- { 0xfe58, KEY_9 },
- { 0xfe13, KEY_ANGLE }, /* Aspect */
- { 0xfe03, KEY_0 },
- { 0xfe1f, KEY_ZOOM },
- { 0xfe43, KEY_REWIND },
- { 0xfe47, KEY_PLAYPAUSE },
- { 0xfe4f, KEY_FASTFORWARD },
- { 0xfe57, KEY_MUTE },
- { 0xfe0d, KEY_STOP },
- { 0xfe01, KEY_RECORD },
- { 0xfe4e, KEY_POWER },
-};
-
-static struct rc_map_table rc_map_dvico_portable_table[] = {
- { 0xfc02, KEY_SETUP }, /* Profile */
- { 0xfc43, KEY_POWER2 },
- { 0xfc06, KEY_EPG },
- { 0xfc5a, KEY_BACK },
- { 0xfc05, KEY_MENU },
- { 0xfc47, KEY_INFO },
- { 0xfc01, KEY_TAB },
- { 0xfc42, KEY_PREVIOUSSONG },/* Replay */
- { 0xfc49, KEY_VOLUMEUP },
- { 0xfc09, KEY_VOLUMEDOWN },
- { 0xfc54, KEY_CHANNELUP },
- { 0xfc0b, KEY_CHANNELDOWN },
- { 0xfc16, KEY_CAMERA },
- { 0xfc40, KEY_TUNER }, /* ATV/DTV */
- { 0xfc45, KEY_OPEN },
- { 0xfc19, KEY_1 },
- { 0xfc18, KEY_2 },
- { 0xfc1b, KEY_3 },
- { 0xfc1a, KEY_4 },
- { 0xfc58, KEY_5 },
- { 0xfc59, KEY_6 },
- { 0xfc15, KEY_7 },
- { 0xfc14, KEY_8 },
- { 0xfc17, KEY_9 },
- { 0xfc44, KEY_ANGLE }, /* Aspect */
- { 0xfc55, KEY_0 },
- { 0xfc07, KEY_ZOOM },
- { 0xfc0a, KEY_REWIND },
- { 0xfc08, KEY_PLAYPAUSE },
- { 0xfc4b, KEY_FASTFORWARD },
- { 0xfc5b, KEY_MUTE },
- { 0xfc04, KEY_STOP },
- { 0xfc56, KEY_RECORD },
- { 0xfc57, KEY_POWER },
- { 0xfc41, KEY_UNKNOWN }, /* INPUT */
- { 0xfc00, KEY_UNKNOWN }, /* HD */
-};
-
-static struct rc_map_table rc_map_d680_dmb_table[] = {
- { 0x0038, KEY_UNKNOWN }, /* TV/AV */
- { 0x080c, KEY_ZOOM },
- { 0x0800, KEY_0 },
- { 0x0001, KEY_1 },
- { 0x0802, KEY_2 },
- { 0x0003, KEY_3 },
- { 0x0804, KEY_4 },
- { 0x0005, KEY_5 },
- { 0x0806, KEY_6 },
- { 0x0007, KEY_7 },
- { 0x0808, KEY_8 },
- { 0x0009, KEY_9 },
- { 0x000a, KEY_MUTE },
- { 0x0829, KEY_BACK },
- { 0x0012, KEY_CHANNELUP },
- { 0x0813, KEY_CHANNELDOWN },
- { 0x002b, KEY_VOLUMEUP },
- { 0x082c, KEY_VOLUMEDOWN },
- { 0x0020, KEY_UP },
- { 0x0821, KEY_DOWN },
- { 0x0011, KEY_LEFT },
- { 0x0810, KEY_RIGHT },
- { 0x000d, KEY_OK },
- { 0x081f, KEY_RECORD },
- { 0x0017, KEY_PLAYPAUSE },
- { 0x0816, KEY_PLAYPAUSE },
- { 0x000b, KEY_STOP },
- { 0x0827, KEY_FASTFORWARD },
- { 0x0026, KEY_REWIND },
- { 0x081e, KEY_UNKNOWN }, /* Time Shift */
- { 0x000e, KEY_UNKNOWN }, /* Snapshot */
- { 0x082d, KEY_UNKNOWN }, /* Mouse Cursor */
- { 0x000f, KEY_UNKNOWN }, /* Minimize/Maximize */
- { 0x0814, KEY_UNKNOWN }, /* Shuffle */
- { 0x0025, KEY_POWER },
-};
-
static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
{
static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 };
@@ -1000,7 +838,7 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
return -EIO;
/* try to determine if there is no IR decoder on the I2C bus */
- for (i = 0; adap->dev->props.rc.legacy.rc_map_table != NULL && i < 5; i++) {
+ for (i = 0; adap->dev->props.rc.core.rc_codes && i < 5; i++) {
msleep(20);
if (cxusb_i2c_xfer(&adap->dev->i2c_adap, &msg, 1) != 1)
goto no_IR;
@@ -1008,7 +846,7 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
continue;
if (ircode[2] + ircode[3] != 0xff) {
no_IR:
- adap->dev->props.rc.legacy.rc_map_table = NULL;
+ adap->dev->props.rc.core.rc_codes = NULL;
info("No IR receiver detected on this device.");
break;
}
@@ -1720,11 +1558,12 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.i2c_algo = &cxusb_i2c_algo,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_dvico_portable_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_portable_table),
- .rc_query = cxusb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_PORTABLE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1776,11 +1615,12 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.i2c_algo = &cxusb_i2c_algo,
- .rc.legacy = {
- .rc_interval = 150,
- .rc_map_table = rc_map_dvico_mce_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_mce_table),
- .rc_query = cxusb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_MCE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1840,11 +1680,12 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.i2c_algo = &cxusb_i2c_algo,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_dvico_portable_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_portable_table),
- .rc_query = cxusb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_PORTABLE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1895,11 +1736,12 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.i2c_algo = &cxusb_i2c_algo,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_dvico_portable_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_portable_table),
- .rc_query = cxusb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_PORTABLE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1949,11 +1791,12 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_dvico_mce_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_mce_table),
- .rc_query = cxusb_bluebird2_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_MCE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_bluebird2_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_device_descs = 1,
@@ -2002,11 +1845,12 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_dvico_portable_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_portable_table),
- .rc_query = cxusb_bluebird2_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_PORTABLE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_bluebird2_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_device_descs = 1,
@@ -2057,11 +1901,12 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
.generic_bulk_ctrl_endpoint = 0x01,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_dvico_portable_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_portable_table),
- .rc_query = cxusb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_PORTABLE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_device_descs = 1,
@@ -2155,11 +2000,12 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_dvico_mce_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dvico_mce_table),
- .rc_query = cxusb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_DVICO_MCE,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_device_descs = 1,
@@ -2208,11 +2054,12 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_d680_dmb_table,
- .rc_map_size = ARRAY_SIZE(rc_map_d680_dmb_table),
- .rc_query = cxusb_d680_dmb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_D680_DMB,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_d680_dmb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_device_descs = 1,
@@ -2262,11 +2109,12 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_d680_dmb_table,
- .rc_map_size = ARRAY_SIZE(rc_map_d680_dmb_table),
- .rc_query = cxusb_d680_dmb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_D680_DMB,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_d680_dmb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_device_descs = 1,
@@ -2315,11 +2163,12 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .rc.legacy = {
- .rc_interval = 100,
- .rc_map_table = rc_map_d680_dmb_table,
- .rc_map_size = ARRAY_SIZE(rc_map_d680_dmb_table),
- .rc_query = cxusb_d680_dmb_rc_query,
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_D680_DMB,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_d680_dmb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_device_descs = 1,
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index b29d4894c2f1..81d7fd4f7776 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -3815,6 +3815,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E_SE) },
{ USB_DEVICE(USB_VID_PCTV, USB_PID_DIBCOM_STK8096PVR) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096PVR) },
+ { USB_DEVICE(USB_VID_HAMA, USB_PID_HAMA_DVBT_HYBRID) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -4379,7 +4380,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
- .num_device_descs = 9,
+ .num_device_descs = 10,
.devices = {
{ "Terratec Cinergy HT USB XE",
{ &dib0700_usb_id_table[27], NULL },
@@ -4417,6 +4418,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[54], NULL },
{ NULL },
},
+ { "Hama DVB=T Hybrid USB Stick",
+ { &dib0700_usb_id_table[85], NULL },
+ { NULL },
+ },
},
.rc.core = {
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index c60fb54f445f..2fa2abd3e726 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "dtv5100.h"
diff --git a/drivers/media/usb/dvb-usb/dtv5100.h b/drivers/media/usb/dvb-usb/dtv5100.h
index 93e96e04a82a..1ab1eafd3187 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.h
+++ b/drivers/media/usb/dvb-usb/dtv5100.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _DVB_USB_DTV5100_H_
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index f0023dbb7276..ab9866024ec7 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -35,28 +35,33 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
- struct hexline hx;
+ struct hexline *hx;
u8 reset;
int ret,pos=0;
+ hx = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!hx)
+ return -ENOMEM;
+
/* stop the CPU */
reset = 1;
if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
err("could not stop the USB controller CPU.");
- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
+ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
- if (ret != hx.len) {
+ if (ret != hx->len) {
err("error while transferring firmware (transferred size: %d, block size: %d)",
- ret,hx.len);
+ ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
+ kfree(hx);
return ret;
}
@@ -70,6 +75,8 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
} else
ret = -EIO;
+ kfree(hx);
+
return ret;
}
EXPORT_SYMBOL(usb_cypress_load_firmware);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
index c259f9e43542..059ded59208e 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
@@ -265,7 +265,7 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d)
int err, rc_interval;
struct rc_dev *dev;
- dev = rc_allocate_device();
+ dev = rc_allocate_device(d->props.rc.core.driver_type);
if (!dev)
return -ENOMEM;
@@ -273,7 +273,6 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d)
dev->map_name = d->props.rc.core.rc_codes;
dev->change_protocol = d->props.rc.core.change_protocol;
dev->allowed_protocols = d->props.rc.core.allowed_protos;
- dev->driver_type = d->props.rc.core.driver_type;
usb_to_input_id(d->udev, &dev->input_id);
dev->input_name = "IR-receiver inside an USB DVB receiver";
dev->input_phys = d->rc_phys;
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 2360e7e32b06..37f062225ed2 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -161,7 +161,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
goto out_free;
}
if (buflen > 64) {
- err("firmare chunk size bigger than 64 bytes.");
+ err("firmware chunk size bigger than 64 bytes.");
goto out_free;
}
@@ -278,7 +278,7 @@ static int gp8psk_fe_reload(void *priv)
return gp8psk_bcm4500_reload(d);
}
-const struct gp8psk_fe_ops gp8psk_fe_ops = {
+static const struct gp8psk_fe_ops gp8psk_fe_ops = {
.in = gp8psk_fe_in,
.out = gp8psk_fe_out,
.reload = gp8psk_fe_reload,
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 07fa08be9e99..d54ebe7e0215 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,14 +97,13 @@ struct pctv452e_state {
u8 c; /* transaction counter, wraps around... */
u8 initialized; /* set to 1 if 0x15 has been sent */
u16 last_rc_key;
-
- unsigned char data[80];
};
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *buf;
u8 id;
unsigned int rlen;
int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
return -EIO;
}
- mutex_lock(&state->ca_mutex);
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
id = state->c++;
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = cmd;
- state->data[3] = write_len;
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = cmd;
+ buf[3] = write_len;
- memcpy(state->data + 4, data, write_len);
+ memcpy(buf + 4, data, write_len);
rlen = (read_len > 0) ? 64 : 0;
- ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
- state->data, rlen, /* delay_ms */ 0);
+ ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+ buf, rlen, /* delay_ms */ 0);
if (0 != ret)
goto failed;
ret = -EIO;
- if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
goto failed;
- memcpy(data, state->data + 4, read_len);
+ memcpy(data, buf + 4, read_len);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return 0;
failed:
err("CI error %d; %02X %02X %02X -> %*ph.",
- ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+ ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return ret;
}
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *rcv_buf, u8 rcv_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *buf;
u8 id;
int ret;
- mutex_lock(&state->ca_mutex);
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
id = state->c++;
ret = -EINVAL;
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
goto failed;
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = PCTV_CMD_I2C;
- state->data[3] = snd_len + 3;
- state->data[4] = addr << 1;
- state->data[5] = snd_len;
- state->data[6] = rcv_len;
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = PCTV_CMD_I2C;
+ buf[3] = snd_len + 3;
+ buf[4] = addr << 1;
+ buf[5] = snd_len;
+ buf[6] = rcv_len;
- memcpy(state->data + 7, snd_buf, snd_len);
+ memcpy(buf + 7, snd_buf, snd_len);
- ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
- state->data, /* rcv_len */ 64,
+ ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+ buf, /* rcv_len */ 64,
/* delay_ms */ 0);
if (ret < 0)
goto failed;
/* TT USB protocol error. */
ret = -EIO;
- if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
goto failed;
/* I2C device didn't respond as expected. */
ret = -EREMOTEIO;
- if (state->data[5] < snd_len || state->data[6] < rcv_len)
+ if (buf[5] < snd_len || buf[6] < rcv_len)
goto failed;
- memcpy(rcv_buf, state->data + 7, rcv_len);
- mutex_unlock(&state->ca_mutex);
+ memcpy(rcv_buf, buf + 7, rcv_len);
+ kfree(buf);
return rcv_len;
failed:
err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
- 7, state->data);
+ 7, buf);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return ret;
}
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 *rx;
+ u8 *b0, *rx;
int ret;
info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
if (state->initialized)
return 0;
- rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
- if (!rx)
+ b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!b0)
return -ENOMEM;
- mutex_lock(&state->ca_mutex);
+ rx = b0 + 5;
+
/* hmm where shoud this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
__func__, ret);
/* this is a one-time initialization, dont know where to put */
- state->data[0] = 0xaa;
- state->data[1] = state->c++;
- state->data[2] = PCTV_CMD_RESET;
- state->data[3] = 1;
- state->data[4] = 0;
+ b0[0] = 0xaa;
+ b0[1] = state->c++;
+ b0[2] = PCTV_CMD_RESET;
+ b0[3] = 1;
+ b0[4] = 0;
/* reset board */
- ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
goto ret;
- state->data[1] = state->c++;
- state->data[4] = 1;
+ b0[1] = state->c++;
+ b0[4] = 1;
/* reset board (again?) */
- ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
goto ret;
state->initialized = 1;
ret:
- mutex_unlock(&state->ca_mutex);
- kfree(rx);
+ kfree(b0);
return ret;
}
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *b, *rx;
int ret, i;
u8 id;
- mutex_lock(&state->ca_mutex);
+ b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ rx = b + CMD_BUFFER_SIZE;
+
id = state->c++;
/* prepare command header */
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = PCTV_CMD_IR;
- state->data[3] = 0;
+ b[0] = SYNC_BYTE_OUT;
+ b[1] = id;
+ b[2] = PCTV_CMD_IR;
+ b[3] = 0;
/* send ir request */
- ret = dvb_usb_generic_rw(d, state->data, 4,
- state->data, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
if (ret != 0)
goto ret;
if (debug > 3) {
- info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
- for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
- info(" %02x", state->data[i + 3]);
+ info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
+ for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", rx[i+3]);
info("\n");
}
- if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
+ if ((rx[3] == 9) && (rx[12] & 0x01)) {
/* got a "press" event */
- state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
+ state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
if (debug > 2)
info("%s: cmd=0x%02x sys=0x%02x\n",
- __func__, state->data[6], state->data[7]);
+ __func__, rx[6], rx[7]);
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
} else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
state->last_rc_key = 0;
}
ret:
- mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 02c3bee6f83b..9f7dd1afcb15 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -14,10 +14,6 @@
* License, or (at your option) any later version.
*
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND
* TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO
* THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR
@@ -753,7 +749,7 @@ static struct dvb_usb_device_properties technisat_usb2_devices = {
.rc_codes = RC_MAP_TECHNISAT_USB2,
.module_name = "technisat-usb2",
.rc_query = technisat_usb2_rc_query,
- .allowed_protos = RC_BIT_ALL,
+ .allowed_protos = RC_BIT_ALL_IR_DECODER,
.driver_type = RC_DRIVER_IR_RAW,
}
};
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 7969ddb9e2dd..ffad7f1af166 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "em28xx.h"
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 23c67494762d..5f90d0899a45 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -509,6 +509,7 @@ static struct em28xx_reg_seq plex_px_bcud[] = {
/*
* 2040:0265 Hauppauge WinTV-dualHD DVB
+ * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM
* reg 0x80/0x84:
* GPIO_0: Yellow LED tuner 1, 0=on, 1=off
* GPIO_1: Green LED tuner 1, 0=on, 1=off
@@ -2389,6 +2390,21 @@ struct em28xx_board em28xx_boards[] = {
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
},
+ /*
+ * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM).
+ * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157
+ */
+ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = {
+ .name = "Hauppauge WinTV-dualHD 01595 ATSC/QAM",
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = hauppauge_dualhd_dvb,
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_HAUPPAUGE,
+ .leds = hauppauge_dualhd_leds,
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2514,6 +2530,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 },
{ USB_DEVICE(0x2040, 0x0265),
.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
+ { USB_DEVICE(0x2040, 0x026d),
+ .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
{ USB_DEVICE(0x0438, 0xb002),
.driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
{ USB_DEVICE(0x2001, 0xf112),
@@ -2945,6 +2963,7 @@ static void em28xx_card_setup(struct em28xx *dev)
case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C:
case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB:
+ case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595:
{
struct tveeprom tv;
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 75a75dab2e8e..82edd37f0d73 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -37,6 +37,7 @@
#include "lgdt330x.h"
#include "lgdt3305.h"
+#include "lgdt3306a.h"
#include "zl10353.h"
#include "s5h1409.h"
#include "mt2060.h"
@@ -920,6 +921,17 @@ static struct tda18271_config pinnacle_80e_dvb_config = {
.role = TDA18271_MASTER,
};
+static struct lgdt3306a_config hauppauge_01595_lgdt3306a_config = {
+ .qam_if_khz = 4000,
+ .vsb_if_khz = 3250,
+ .spectral_inversion = 0,
+ .deny_i2c_rptr = 0,
+ .mpeg_mode = LGDT3306A_MPEG_SERIAL,
+ .tpclk_edge = LGDT3306A_TPCLK_RISING_EDGE,
+ .tpvalid_polarity = LGDT3306A_TP_VALID_HIGH,
+ .xtalMHz = 25,
+};
+
/* ------------------------------------------------------------------ */
static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
@@ -1950,6 +1962,68 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
break;
+ case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595:
+ {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info info = {};
+ struct lgdt3306a_config lgdt3306a_config;
+ struct si2157_config si2157_config = {};
+
+ /* attach demod */
+ lgdt3306a_config = hauppauge_01595_lgdt3306a_config;
+ lgdt3306a_config.fe = &dvb->fe[0];
+ lgdt3306a_config.i2c_adapter = &adapter;
+ strlcpy(info.type, "lgdt3306a", sizeof(info.type));
+ info.addr = 0x59;
+ info.platform_data = &lgdt3306a_config;
+ request_module(info.type);
+ client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus],
+ &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_demod = client;
+
+ /* attach tuner */
+ si2157_config.fe = dvb->fe[0];
+ si2157_config.if_port = 1;
+ si2157_config.inversion = 1;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ si2157_config.mdev = dev->media_dev;
+#endif
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", sizeof(info.type));
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module(info.type);
+
+ client = i2c_new_device(adapter, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_tuner = client;
+ }
+ break;
default:
dev_err(&dev->intf->dev,
"The frontend of your DVB/ATSC card isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 782ce095c8c5..eba75736e654 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -259,18 +259,21 @@ static int em2874_polling_getkey(struct em28xx_IR *ir,
break;
case RC_BIT_NEC:
- poll_result->protocol = RC_TYPE_RC5;
poll_result->scancode = msg[1] << 8 | msg[2];
- if ((msg[3] ^ msg[4]) != 0xff) /* 32 bits NEC */
+ if ((msg[3] ^ msg[4]) != 0xff) { /* 32 bits NEC */
+ poll_result->protocol = RC_TYPE_NEC32;
poll_result->scancode = RC_SCANCODE_NEC32((msg[1] << 24) |
(msg[2] << 16) |
(msg[3] << 8) |
(msg[4]));
- else if ((msg[1] ^ msg[2]) != 0xff) /* 24 bits NEC */
+ } else if ((msg[1] ^ msg[2]) != 0xff) { /* 24 bits NEC */
+ poll_result->protocol = RC_TYPE_NECX;
poll_result->scancode = RC_SCANCODE_NECX(msg[1] << 8 |
msg[2], msg[3]);
- else /* Normal NEC */
+ } else { /* Normal NEC */
+ poll_result->protocol = RC_TYPE_NEC;
poll_result->scancode = RC_SCANCODE_NEC(msg[1], msg[3]);
+ }
break;
case RC_BIT_RC6_0:
@@ -719,7 +722,7 @@ static int em28xx_ir_init(struct em28xx *dev)
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc)
goto error;
@@ -777,7 +780,7 @@ static int em28xx_ir_init(struct em28xx *dev)
case CHIP_ID_EM28178:
ir->get_key = em2874_polling_getkey;
rc->allowed_protocols = RC_BIT_RC5 | RC_BIT_NEC |
- RC_BIT_RC6_0;
+ RC_BIT_NECX | RC_BIT_NEC32 | RC_BIT_RC6_0;
break;
default:
err = -ENODEV;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index ca59e2d4fccf..e9f379959fa5 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -147,6 +147,7 @@
#define EM2884_BOARD_ELGATO_EYETV_HYBRID_2008 97
#define EM28178_BOARD_PLEX_PX_BCUD 98
#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99
+#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/autogain_functions.c b/drivers/media/usb/gspca/autogain_functions.c
index 0e9ee8b50bb7..427db745e027 100644
--- a/drivers/media/usb/gspca/autogain_functions.c
+++ b/drivers/media/usb/gspca/autogain_functions.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "gspca.h"
diff --git a/drivers/media/usb/gspca/benq.c b/drivers/media/usb/gspca/benq.c
index 5fa67b78ad49..60a728203b3b 100644
--- a/drivers/media/usb/gspca/benq.c
+++ b/drivers/media/usb/gspca/benq.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/conex.c b/drivers/media/usb/gspca/conex.c
index 2e15c80d6e3d..bdcdf7999c56 100644
--- a/drivers/media/usb/gspca/conex.c
+++ b/drivers/media/usb/gspca/conex.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index 52b88e9e656b..23d3285f182a 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/etoms.c b/drivers/media/usb/gspca/etoms.c
index 26c9ee1f1045..8f84292936e9 100644
--- a/drivers/media/usb/gspca/etoms.c
+++ b/drivers/media/usb/gspca/etoms.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/finepix.c b/drivers/media/usb/gspca/finepix.c
index ae9a55d7bbbb..7bb469aa61a7 100644
--- a/drivers/media/usb/gspca/finepix.c
+++ b/drivers/media/usb/gspca/finepix.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index fa2cbb981905..16bc1dde2c8c 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -15,10 +15,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c
index 19736e237b37..34e043b7d1bc 100644
--- a/drivers/media/usb/gspca/jeilinj.c
+++ b/drivers/media/usb/gspca/jeilinj.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index b12ecb72df4c..17c7a953564c 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define MODULE_NAME "jl2005bcd"
diff --git a/drivers/media/usb/gspca/jpeg.h b/drivers/media/usb/gspca/jpeg.h
index 0aa2b671faa4..d5ad7c96d039 100644
--- a/drivers/media/usb/gspca/jpeg.h
+++ b/drivers/media/usb/gspca/jpeg.h
@@ -18,10 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 3cb30a37d6ac..2f28b38c5479 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 40aaaa9c5f30..71f273377f83 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -22,10 +22,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/mars.c b/drivers/media/usb/gspca/mars.c
index 779a8785f421..25df55e840c7 100644
--- a/drivers/media/usb/gspca/mars.c
+++ b/drivers/media/usb/gspca/mars.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index 6dfb364094ec..8b0e32a649ac 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -34,10 +34,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
index 599f755e75b8..5d2d0bcb038d 100644
--- a/drivers/media/usb/gspca/nw80x.c
+++ b/drivers/media/usb/gspca/nw80x.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 4dbca54cf2a8..f4c41f043cda 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -31,10 +31,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 9266a5c9abc5..32849ff86b09 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -24,10 +24,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
index 47085cf2d723..b2a92e518118 100644
--- a/drivers/media/usb/gspca/ov534_9.c
+++ b/drivers/media/usb/gspca/ov534_9.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index 51e11248bbb8..01c185d367e5 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index be07a24c4518..595535e143e6 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -17,10 +17,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/drivers/media/usb/gspca/pac7311.c b/drivers/media/usb/gspca/pac7311.c
index 25f86b1e74a8..8bac2d9326bf 100644
--- a/drivers/media/usb/gspca/pac7311.c
+++ b/drivers/media/usb/gspca/pac7311.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* Some documentation about various registers as determined by trial and error.
diff --git a/drivers/media/usb/gspca/pac_common.h b/drivers/media/usb/gspca/pac_common.h
index fbc5e226c3e4..4047bcb6c2b5 100644
--- a/drivers/media/usb/gspca/pac_common.h
+++ b/drivers/media/usb/gspca/pac_common.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/* We calculate the autogain at the end of the transfer of a frame, at this
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index 5102cea50471..477da0664b7d 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/se401.h b/drivers/media/usb/gspca/se401.h
index 96d8ebf3cf59..7cc0728c1410 100644
--- a/drivers/media/usb/gspca/se401.h
+++ b/drivers/media/usb/gspca/se401.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define SE401_REQ_GET_CAMERA_DESCRIPTOR 0x06
diff --git a/drivers/media/usb/gspca/sn9c2028.c b/drivers/media/usb/gspca/sn9c2028.c
index 4f2050a5ec94..5d32dd359d84 100644
--- a/drivers/media/usb/gspca/sn9c2028.c
+++ b/drivers/media/usb/gspca/sn9c2028.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/sn9c2028.h b/drivers/media/usb/gspca/sn9c2028.h
index f85bc106bc52..85761aa7c8b2 100644
--- a/drivers/media/usb/gspca/sn9c2028.h
+++ b/drivers/media/usb/gspca/sn9c2028.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
static const unsigned char sn9c2028_sof_marker[] = {
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index e7430b06526a..c605f78d6186 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 6696b2ec34e9..5f3f2979540a 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* Some documentation on known sonixb registers:
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index d49d76ec1421..5eeaf16ac5e8 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
index f38fd8949609..327ec901abe1 100644
--- a/drivers/media/usb/gspca/spca1528.c
+++ b/drivers/media/usb/gspca/spca1528.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/spca500.c b/drivers/media/usb/gspca/spca500.c
index f011a309dd65..da2d9027914c 100644
--- a/drivers/media/usb/gspca/spca500.c
+++ b/drivers/media/usb/gspca/spca500.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/spca501.c b/drivers/media/usb/gspca/spca501.c
index d92fd17d6701..ae5a80987553 100644
--- a/drivers/media/usb/gspca/spca501.c
+++ b/drivers/media/usb/gspca/spca501.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/spca505.c b/drivers/media/usb/gspca/spca505.c
index 232b330d2dd3..1553cc766c04 100644
--- a/drivers/media/usb/gspca/spca505.c
+++ b/drivers/media/usb/gspca/spca505.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/spca506.c b/drivers/media/usb/gspca/spca506.c
index ee84863d27d4..843c93f5acf3 100644
--- a/drivers/media/usb/gspca/spca506.c
+++ b/drivers/media/usb/gspca/spca506.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define MODULE_NAME "spca506"
diff --git a/drivers/media/usb/gspca/spca508.c b/drivers/media/usb/gspca/spca508.c
index 75f2beb2ea5a..1e0ba6b24e21 100644
--- a/drivers/media/usb/gspca/spca508.c
+++ b/drivers/media/usb/gspca/spca508.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/spca561.c b/drivers/media/usb/gspca/spca561.c
index 403d71cd65d9..4ff704cf9ed6 100644
--- a/drivers/media/usb/gspca/spca561.c
+++ b/drivers/media/usb/gspca/spca561.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index 9424c33f0ddb..f1da34a10ce8 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index 6c45dcc44eb0..8b4e4948a0cb 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
index e274cf19a3ea..aa9a9411b801 100644
--- a/drivers/media/usb/gspca/sq930x.c
+++ b/drivers/media/usb/gspca/sq930x.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/stk014.c b/drivers/media/usb/gspca/stk014.c
index d324d001e114..daf45db6c404 100644
--- a/drivers/media/usb/gspca/stk014.c
+++ b/drivers/media/usb/gspca/stk014.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
index 48234c9a8b6c..3ab5ec2ca4bd 100644
--- a/drivers/media/usb/gspca/stk1135.c
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/stk1135.h b/drivers/media/usb/gspca/stk1135.h
index e1dd92ab49bb..bd144012f73a 100644
--- a/drivers/media/usb/gspca/stk1135.h
+++ b/drivers/media/usb/gspca/stk1135.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define STK1135_REG_GCTRL 0x000 /* GPIO control */
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index 7f94ec74282e..29a65d05cbb2 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index fef7a784b879..e72c3e1ab9ff 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.h b/drivers/media/usb/gspca/stv06xx/stv06xx.h
index 34957a4ec150..f9d74e4d7cf9 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
index 2220b70d47e6..28252f6c4afd 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.h
index 1ba9158d0102..d2da0de05236 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
index 8d785edcccf2..e1ce96e9405f 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.h b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.h
index 5071e5353fd3..33572d8bb368 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.h
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/usb/gspca/stv06xx/stv06xx_sensor.h
index 3a498c2495c6..747d07c877fe 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_sensor.h
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_sensor.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
index 515a9e121653..4b76070515b5 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.h
index 8f20fbf30f33..87324a69a0be 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.h
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.h
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef STV06XX_ST6422_H_
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
index f86cec091bf4..d265e6b00994 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
@@ -120,9 +116,6 @@ static int vv6410_init(struct sd *sd)
for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++)
stv06xx_write_bridge(sd, stv_bridge_init[i].addr, stv_bridge_init[i].data);
- if (err < 0)
- return err;
-
err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init,
ARRAY_SIZE(vv6410_sensor_init));
return (err < 0) ? err : 0;
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.h
index 53e67b40ca05..e8598893791e 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_vv6410.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index 38dc9e7aa313..8c2785aea3cd 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c
index bb52fc1fe598..42667710af92 100644
--- a/drivers/media/usb/gspca/t613.c
+++ b/drivers/media/usb/gspca/t613.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*Notes: * t613 + tas5130A
* * Focus to light do not balance well as in win.
* Quality in win is not good, but its kinda better.
diff --git a/drivers/media/usb/gspca/tv8532.c b/drivers/media/usb/gspca/tv8532.c
index d497ba38af0d..bc2720e9cc4f 100644
--- a/drivers/media/usb/gspca/tv8532.c
+++ b/drivers/media/usb/gspca/tv8532.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define MODULE_NAME "tv8532"
diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
index b4efb2fb36fa..b935febf7146 100644
--- a/drivers/media/usb/gspca/vc032x.c
+++ b/drivers/media/usb/gspca/vc032x.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index 8860510c2f9c..554b90ef2200 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -20,10 +20,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index 896f1b2b9179..728d2322c433 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -18,10 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/* Note this is not a stand alone driver, it gets included in ov519.c, this
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index d5ed9d36ce25..b600ea6460d3 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index d5d8c7e81762..e2d486bd8c28 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.c b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
index 3bac50a248d4..356afa250cd6 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "pvrusb2-audio.h"
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.h b/drivers/media/usb/pvrusb2/pvrusb2-audio.h
index 27cefb5cb170..4f3898473165 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-audio.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_AUDIO_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index c45f30715dcd..d9e8481e9e28 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "pvrusb2-context.h"
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.h b/drivers/media/usb/pvrusb2/pvrusb2-context.h
index 1c1d442d9ea3..13e00c529611 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.h
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_CONTEXT_H
#define __PVRUSB2_CONTEXT_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
index 7f29a0464f36..679f3ff3b0a5 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h
index 86c17bee56f9..90dfb8b3f3e5 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_CS53L32A_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
index 958db170a048..5f4ba84e5557 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "pvrusb2-ctrl.h"
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h
index c175571868a3..4b9152e36fe4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_CTRL_H
#define __PVRUSB2_CTRL_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
index 30eef97ef2ef..242b213b7599 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h
index 2eed7b7ee25e..dfddc88750d9 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_CX2584X_V4L_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debug.h b/drivers/media/usb/pvrusb2/pvrusb2-debug.h
index 4ef2ebcd97a5..5cd16292e2fa 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debug.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debug.h
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_DEBUG_H
#define __PVRUSB2_DEBUG_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
index 58ec706ebdb3..d3f3bd96885f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/string.h>
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h
index a8dfc55f136f..fcaaa8dd68b8 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_DEBUGIFC_H
#define __PVRUSB2_DEBUGIFC_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-devattr.c b/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
index 06c4c3dabcde..51b3312eaea1 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-devattr.h b/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
index 5aeefb6a991f..c1e7d4822cd1 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_DEVATTR_H
#define __PVRUSB2_DEVATTR_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
index 8c95793433e7..56c750535ee7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kthread.h>
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index 276b17fb9aad..4af2fb5c85d5 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/slab.h>
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h
index f1e33c807f46..1d81cac30f3d 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_EEPROM_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index f0483621d2a3..ca637074fa1f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/device.h> // for linux/firmware.h
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.h b/drivers/media/usb/pvrusb2/pvrusb2-encoder.h
index a2bfb48f1ecd..10d7f0b48264 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_ENCODER_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h b/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
index 06a15a68bcfd..0a01de4e54db 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _PVRUSB2_FX2_CMD_H_
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
index 23473a21319c..7a824196d5fa 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_HDW_INTERNAL_H
#define __PVRUSB2_HDW_INTERNAL_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index e3ed8ffee9f7..ad5b25b89699 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/errno.h>
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
index a82a00dd7329..25648add77e5 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_HDW_H
#define __PVRUSB2_HDW_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index cc63e5f4c26c..f727b54a53c6 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/i2c.h>
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h
index a10a3e8e9345..1c44dee7fd69 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_I2C_CORE_H
#define __PVRUSB2_I2C_CORE_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.c b/drivers/media/usb/pvrusb2/pvrusb2-io.c
index e3103ecd4828..6d153fc23ec2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "pvrusb2-io.h"
@@ -37,13 +33,13 @@ static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
if ((bp)->signature != BUFFER_SIG) { \
pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
"Buffer %p is bad at %s:%d", \
- (bp),__FILE__,__LINE__); \
- pvr2_buffer_describe(bp,"BadSig"); \
+ (bp), __FILE__, __LINE__); \
+ pvr2_buffer_describe(bp, "BadSig"); \
BUG(); \
} \
} while (0)
#else
-#define BUFFER_CHECK(bp) do {} while(0)
+#define BUFFER_CHECK(bp) do {} while (0)
#endif
struct pvr2_stream {
@@ -110,7 +106,7 @@ static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
}
#ifdef SANITY_CHECK_BUFFERS
-static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
+static void pvr2_buffer_describe(struct pvr2_buffer *bp, const char *msg)
{
pvr2_trace(PVR2_TRACE_INFO,
"buffer%s%s %p state=%s id=%d status=%d stream=%p purb=%p sig=0x%x",
@@ -156,7 +152,7 @@ static void pvr2_buffer_remove(struct pvr2_buffer *bp)
(*bcnt) -= ccnt;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
"/*---TRACE_FLOW---*/ bufferPool %8s dec cap=%07d cnt=%02d",
- pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
+ pvr2_buffer_state_decode(bp->state), *bcnt, *cnt);
bp->state = pvr2_buffer_state_none;
}
@@ -171,9 +167,9 @@ static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
bp,
pvr2_buffer_state_decode(bp->state),
pvr2_buffer_state_decode(pvr2_buffer_state_none));
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
pvr2_buffer_remove(bp);
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
}
static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
@@ -188,18 +184,18 @@ static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
bp,
pvr2_buffer_state_decode(bp->state),
pvr2_buffer_state_decode(pvr2_buffer_state_ready));
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
fl = (sp->r_count == 0);
pvr2_buffer_remove(bp);
- list_add_tail(&bp->list_overhead,&sp->ready_list);
+ list_add_tail(&bp->list_overhead, &sp->ready_list);
bp->state = pvr2_buffer_state_ready;
(sp->r_count)++;
sp->r_bcount += bp->used_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
"/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
- sp->r_bcount,sp->r_count);
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ sp->r_bcount, sp->r_count);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
return fl;
}
@@ -214,17 +210,17 @@ static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
bp,
pvr2_buffer_state_decode(bp->state),
pvr2_buffer_state_decode(pvr2_buffer_state_idle));
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
pvr2_buffer_remove(bp);
- list_add_tail(&bp->list_overhead,&sp->idle_list);
+ list_add_tail(&bp->list_overhead, &sp->idle_list);
bp->state = pvr2_buffer_state_idle;
(sp->i_count)++;
sp->i_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
"/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
- sp->i_bcount,sp->i_count);
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ sp->i_bcount, sp->i_count);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
}
static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
@@ -238,17 +234,17 @@ static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
bp,
pvr2_buffer_state_decode(bp->state),
pvr2_buffer_state_decode(pvr2_buffer_state_queued));
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
pvr2_buffer_remove(bp);
- list_add_tail(&bp->list_overhead,&sp->queued_list);
+ list_add_tail(&bp->list_overhead, &sp->queued_list);
bp->state = pvr2_buffer_state_queued;
(sp->q_count)++;
sp->q_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
"/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
- sp->q_bcount,sp->q_count);
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ sp->q_bcount, sp->q_count);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
}
static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
@@ -262,18 +258,18 @@ static int pvr2_buffer_init(struct pvr2_buffer *bp,
struct pvr2_stream *sp,
unsigned int id)
{
- memset(bp,0,sizeof(*bp));
+ memset(bp, 0, sizeof(*bp));
bp->signature = BUFFER_SIG;
bp->id = id;
pvr2_trace(PVR2_TRACE_BUF_POOL,
- "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp);
+ "/*---TRACE_FLOW---*/ bufferInit %p stream=%p", bp, sp);
bp->stream = sp;
bp->state = pvr2_buffer_state_none;
INIT_LIST_HEAD(&bp->list_overhead);
- bp->purb = usb_alloc_urb(0,GFP_KERNEL);
+ bp->purb = usb_alloc_urb(0, GFP_KERNEL);
if (! bp->purb) return -ENOMEM;
#ifdef SANITY_CHECK_BUFFERS
- pvr2_buffer_describe(bp,"create");
+ pvr2_buffer_describe(bp, "create");
#endif
return 0;
}
@@ -281,7 +277,7 @@ static int pvr2_buffer_init(struct pvr2_buffer *bp,
static void pvr2_buffer_done(struct pvr2_buffer *bp)
{
#ifdef SANITY_CHECK_BUFFERS
- pvr2_buffer_describe(bp,"delete");
+ pvr2_buffer_describe(bp, "delete");
#endif
pvr2_buffer_wipe(bp);
pvr2_buffer_set_none(bp);
@@ -292,7 +288,7 @@ static void pvr2_buffer_done(struct pvr2_buffer *bp)
bp);
}
-static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
+static int pvr2_stream_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
{
int ret;
unsigned int scnt;
@@ -312,10 +308,11 @@ static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
if (cnt > sp->buffer_total_count) {
if (scnt > sp->buffer_slot_count) {
struct pvr2_buffer **nb;
- nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
+
+ nb = kmalloc_array(scnt, sizeof(*nb), GFP_KERNEL);
if (!nb) return -ENOMEM;
if (sp->buffer_slot_count) {
- memcpy(nb,sp->buffers,
+ memcpy(nb, sp->buffers,
sp->buffer_slot_count * sizeof(*nb));
kfree(sp->buffers);
}
@@ -324,9 +321,9 @@ static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
}
while (sp->buffer_total_count < cnt) {
struct pvr2_buffer *bp;
- bp = kmalloc(sizeof(*bp),GFP_KERNEL);
+ bp = kmalloc(sizeof(*bp), GFP_KERNEL);
if (!bp) return -ENOMEM;
- ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count);
+ ret = pvr2_buffer_init(bp, sp, sp->buffer_total_count);
if (ret) {
kfree(bp);
return -ENOMEM;
@@ -369,10 +366,10 @@ static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
pvr2_trace(PVR2_TRACE_BUF_POOL,
"/*---TRACE_FLOW---*/ poolCheck stream=%p cur=%d tgt=%d",
- sp,sp->buffer_total_count,sp->buffer_target_count);
+ sp, sp->buffer_total_count, sp->buffer_target_count);
if (sp->buffer_total_count < sp->buffer_target_count) {
- return pvr2_stream_buffer_count(sp,sp->buffer_target_count);
+ return pvr2_stream_buffer_count(sp, sp->buffer_target_count);
}
cnt = 0;
@@ -382,7 +379,7 @@ static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
cnt++;
}
if (cnt) {
- pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt);
+ pvr2_stream_buffer_count(sp, sp->buffer_total_count - cnt);
}
return 0;
@@ -393,7 +390,7 @@ static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
struct list_head *lp;
struct pvr2_buffer *bp1;
while ((lp = sp->queued_list.next) != &sp->queued_list) {
- bp1 = list_entry(lp,struct pvr2_buffer,list_overhead);
+ bp1 = list_entry(lp, struct pvr2_buffer, list_overhead);
pvr2_buffer_wipe(bp1);
/* At this point, we should be guaranteed that no
completion callback may happen on this buffer. But it's
@@ -421,7 +418,7 @@ static void pvr2_stream_done(struct pvr2_stream *sp)
{
mutex_lock(&sp->mutex); do {
pvr2_stream_internal_flush(sp);
- pvr2_stream_buffer_count(sp,0);
+ pvr2_stream_buffer_count(sp, 0);
} while (0); mutex_unlock(&sp->mutex);
}
@@ -436,8 +433,8 @@ static void buffer_complete(struct urb *urb)
bp->status = 0;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
"/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
- bp,urb->status,urb->actual_length);
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ bp, urb->status, urb->actual_length);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
if ((!(urb->status)) ||
(urb->status == -ENOENT) ||
(urb->status == -ECONNRESET) ||
@@ -458,12 +455,12 @@ static void buffer_complete(struct urb *urb)
(sp->buffers_failed)++;
pvr2_trace(PVR2_TRACE_TOLERANCE,
"stream %p ignoring error %d - fail count increased to %u",
- sp,urb->status,sp->fail_count);
+ sp, urb->status, sp->fail_count);
} else {
(sp->buffers_failed)++;
bp->status = urb->status;
}
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
pvr2_buffer_set_ready(bp);
if (sp->callback_func) {
sp->callback_func(sp->callback_data);
@@ -473,9 +470,9 @@ static void buffer_complete(struct urb *urb)
struct pvr2_stream *pvr2_stream_create(void)
{
struct pvr2_stream *sp;
- sp = kzalloc(sizeof(*sp),GFP_KERNEL);
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
if (!sp) return sp;
- pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp);
+ pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_create: sp=%p", sp);
pvr2_stream_init(sp);
return sp;
}
@@ -483,7 +480,7 @@ struct pvr2_stream *pvr2_stream_create(void)
void pvr2_stream_destroy(struct pvr2_stream *sp)
{
if (!sp) return;
- pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp);
+ pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_destroy: sp=%p", sp);
pvr2_stream_done(sp);
kfree(sp);
}
@@ -498,7 +495,7 @@ void pvr2_stream_setup(struct pvr2_stream *sp,
sp->dev = dev;
sp->endpoint = endpoint;
sp->fail_tolerance = tolerance;
- } while(0); mutex_unlock(&sp->mutex);
+ } while (0); mutex_unlock(&sp->mutex);
}
void pvr2_stream_set_callback(struct pvr2_stream *sp,
@@ -508,11 +505,11 @@ void pvr2_stream_set_callback(struct pvr2_stream *sp,
unsigned long irq_flags;
mutex_lock(&sp->mutex);
do {
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
sp->callback_data = data;
sp->callback_func = func;
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
- } while(0);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
+ } while (0);
mutex_unlock(&sp->mutex);
}
@@ -521,7 +518,7 @@ void pvr2_stream_get_stats(struct pvr2_stream *sp,
int zero_counts)
{
unsigned long irq_flags;
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
if (stats) {
stats->buffers_in_queue = sp->q_count;
stats->buffers_in_idle = sp->i_count;
@@ -535,7 +532,7 @@ void pvr2_stream_get_stats(struct pvr2_stream *sp,
sp->buffers_failed = 0;
sp->bytes_processed = 0;
}
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
}
/* Query / set the nominal buffer count */
@@ -544,7 +541,7 @@ int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
return sp->buffer_target_count;
}
-int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
+int pvr2_stream_set_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
{
int ret;
if (sp->buffer_target_count == cnt) return 0;
@@ -552,7 +549,7 @@ int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
do {
sp->buffer_target_count = cnt;
ret = pvr2_stream_achieve_buffer_count(sp);
- } while(0);
+ } while (0);
mutex_unlock(&sp->mutex);
return ret;
}
@@ -561,17 +558,17 @@ struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
{
struct list_head *lp = sp->idle_list.next;
if (lp == &sp->idle_list) return NULL;
- return list_entry(lp,struct pvr2_buffer,list_overhead);
+ return list_entry(lp, struct pvr2_buffer, list_overhead);
}
struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
{
struct list_head *lp = sp->ready_list.next;
if (lp == &sp->ready_list) return NULL;
- return list_entry(lp,struct pvr2_buffer,list_overhead);
+ return list_entry(lp, struct pvr2_buffer, list_overhead);
}
-struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id)
+struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp, int id)
{
if (id < 0) return NULL;
if (id >= sp->buffer_total_count) return NULL;
@@ -595,7 +592,7 @@ void pvr2_stream_kill(struct pvr2_stream *sp)
if (sp->buffer_total_count != sp->buffer_target_count) {
pvr2_stream_achieve_buffer_count(sp);
}
- } while(0);
+ } while (0);
mutex_unlock(&sp->mutex);
}
@@ -629,18 +626,18 @@ int pvr2_buffer_queue(struct pvr2_buffer *bp)
usb_fill_bulk_urb(bp->purb, // struct urb *urb
sp->dev, // struct usb_device *dev
// endpoint (below)
- usb_rcvbulkpipe(sp->dev,sp->endpoint),
+ usb_rcvbulkpipe(sp->dev, sp->endpoint),
bp->ptr, // void *transfer_buffer
bp->max_count, // int buffer_length
buffer_complete,
bp);
- usb_submit_urb(bp->purb,GFP_KERNEL);
- } while(0);
+ usb_submit_urb(bp->purb, GFP_KERNEL);
+ } while (0);
mutex_unlock(&sp->mutex);
return ret;
}
-int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
+int pvr2_buffer_set_buffer(struct pvr2_buffer *bp, void *ptr, unsigned int cnt)
{
int ret = 0;
unsigned long irq_flags;
@@ -649,7 +646,7 @@ int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
sp = bp->stream;
mutex_lock(&sp->mutex);
do {
- spin_lock_irqsave(&sp->list_lock,irq_flags);
+ spin_lock_irqsave(&sp->list_lock, irq_flags);
if (bp->state != pvr2_buffer_state_idle) {
ret = -EPERM;
} else {
@@ -661,10 +658,10 @@ int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
"/*---TRACE_FLOW---*/ bufferPool %8s cap cap=%07d cnt=%02d",
pvr2_buffer_state_decode(
pvr2_buffer_state_idle),
- bp->stream->i_bcount,bp->stream->i_count);
+ bp->stream->i_bcount, bp->stream->i_count);
}
- spin_unlock_irqrestore(&sp->list_lock,irq_flags);
- } while(0);
+ spin_unlock_irqrestore(&sp->list_lock, irq_flags);
+ } while (0);
mutex_unlock(&sp->mutex);
return ret;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.h b/drivers/media/usb/pvrusb2/pvrusb2-io.h
index 0c47c6a95ab2..e769aeb9d529 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_IO_H
#define __PVRUSB2_IO_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
index 3c7ca2c2c108..602097bdcf14 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "pvrusb2-ioread.h"
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ioread.h b/drivers/media/usb/pvrusb2/pvrusb2-ioread.h
index 0b1f0fbc3438..5827ea09c5e3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ioread.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ioread.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_IOREAD_H
#define __PVRUSB2_IOREAD_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-main.c b/drivers/media/usb/pvrusb2/pvrusb2-main.c
index 86be902a0049..cbe2c3a22458 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-main.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-main.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index cd7bc18a1ba2..21bb20dba82c 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "pvrusb2-std.h"
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.h b/drivers/media/usb/pvrusb2/pvrusb2-std.h
index ed4ec0474429..b48304f41472 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_STD_H
#define __PVRUSB2_STD_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
index d977976b8d91..7bc6d090358e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/string.h>
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h
index 6f0579e1e07b..431f4fd19015 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_SYSFS_H
#define __PVRUSB2_SYSFS_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-util.h b/drivers/media/usb/pvrusb2/pvrusb2-util.h
index 5465bf9cd73e..b03ca3ef1ba0 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-util.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-util.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_UTIL_H
#define __PVRUSB2_UTIL_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index bbbe18d5275a..8f13c60198ed 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kernel.h>
@@ -1054,7 +1050,7 @@ static int pvr2_v4l2_open(struct file *file)
pvr2_trace(PVR2_TRACE_STRUCT,
"Destroying pvr_v4l2_fh id=%p (input mask error)",
fhp);
-
+ v4l2_fh_exit(&fhp->fh);
kfree(fhp);
return ret;
}
@@ -1071,6 +1067,7 @@ static int pvr2_v4l2_open(struct file *file)
pvr2_trace(PVR2_TRACE_STRUCT,
"Destroying pvr_v4l2_fh id=%p (input map failure)",
fhp);
+ v4l2_fh_exit(&fhp->fh);
kfree(fhp);
return -ENOMEM;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h
index e455c9515841..ec755ee8f86a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_V4L2_H
#define __PVRUSB2_V4L2_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
index 6fee367139aa..b68aec2124b2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h
index dacf3ec7f9e1..fa33f20655f4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_VIDEO_V4L_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
index 7993983de5a6..8f357f771ba7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h
index a4ee12e28d5c..c4ac7c2701d0 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_WM8775_H
diff --git a/drivers/media/usb/pvrusb2/pvrusb2.h b/drivers/media/usb/pvrusb2/pvrusb2.h
index 95f98a87abb3..955290ba2d54 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __PVRUSB2_H
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index f7bb78c1873c..a9d4484f7626 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -30,10 +30,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index a4dcaec31d02..8c1f926567ec 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -218,22 +218,30 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
static int smsusb_sendrequest(void *context, void *buffer, size_t size)
{
struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
- struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
- int dummy;
+ struct sms_msg_hdr *phdr;
+ int dummy, ret;
if (dev->state != SMSUSB_ACTIVE) {
pr_debug("Device not active yet\n");
return -ENOENT;
}
+ phdr = kmalloc(size, GFP_KERNEL);
+ if (!phdr)
+ return -ENOMEM;
+ memcpy(phdr, buffer, size);
+
pr_debug("sending %s(%d) size: %d\n",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
phdr->msg_length);
smsendian_handle_tx_message((struct sms_msg_data *) phdr);
- smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
- return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
- buffer, size, &dummy, 1000);
+ smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
+ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+ phdr, size, &dummy, 1000);
+
+ kfree(phdr);
+ return ret;
}
static char *smsusb1_fw_lkup[] = {
diff --git a/drivers/media/usb/stk1160/Kconfig b/drivers/media/usb/stk1160/Kconfig
index 95584c15dc5a..22dff4f3b921 100644
--- a/drivers/media/usb/stk1160/Kconfig
+++ b/drivers/media/usb/stk1160/Kconfig
@@ -8,17 +8,9 @@ config VIDEO_STK1160_COMMON
To compile this driver as a module, choose M here: the
module will be called stk1160
-config VIDEO_STK1160_AC97
- bool "STK1160 AC97 codec support"
- depends on VIDEO_STK1160_COMMON && SND
-
- ---help---
- Enables AC97 codec support for stk1160 driver.
-
config VIDEO_STK1160
tristate
- depends on (!VIDEO_STK1160_AC97 || (SND='n') || SND) && VIDEO_STK1160_COMMON
+ depends on VIDEO_STK1160_COMMON
default y
select VIDEOBUF2_VMALLOC
select VIDEO_SAA711X
- select SND_AC97_CODEC if SND
diff --git a/drivers/media/usb/stk1160/Makefile b/drivers/media/usb/stk1160/Makefile
index dfe3e90ff392..42d05463b353 100644
--- a/drivers/media/usb/stk1160/Makefile
+++ b/drivers/media/usb/stk1160/Makefile
@@ -1,10 +1,8 @@
-obj-stk1160-ac97-$(CONFIG_VIDEO_STK1160_AC97) := stk1160-ac97.o
-
stk1160-y := stk1160-core.o \
stk1160-v4l.o \
stk1160-video.o \
stk1160-i2c.o \
- $(obj-stk1160-ac97-y)
+ stk1160-ac97.o
obj-$(CONFIG_VIDEO_STK1160) += stk1160.o
diff --git a/drivers/media/usb/stk1160/stk1160-ac97.c b/drivers/media/usb/stk1160/stk1160-ac97.c
index 2dd308f9541f..2169be8a71dd 100644
--- a/drivers/media/usb/stk1160/stk1160-ac97.c
+++ b/drivers/media/usb/stk1160/stk1160-ac97.c
@@ -4,6 +4,9 @@
* Copyright (C) 2012 Ezequiel Garcia
* <elezegarcia--a.t--gmail.com>
*
+ * Copyright (C) 2016 Marcel Hasler
+ * <mahasler--a.t--gmail.com>
+ *
* Based on Easycap driver by R.M. Thomas
* Copyright (C) 2010 R.M. Thomas
* <rmthomas--a.t--sciolus.org>
@@ -20,20 +23,32 @@
*
*/
-#include <linux/module.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/ac97_codec.h>
+#include <linux/delay.h>
#include "stk1160.h"
#include "stk1160-reg.h"
-static struct snd_ac97 *stk1160_ac97;
-
-static void stk1160_write_ac97(struct snd_ac97 *ac97, u16 reg, u16 value)
+static int stk1160_ac97_wait_transfer_complete(struct stk1160 *dev)
{
- struct stk1160 *dev = ac97->private_data;
+ unsigned long timeout = jiffies + msecs_to_jiffies(STK1160_AC97_TIMEOUT);
+ u8 value;
+
+ /* Wait for AC97 transfer to complete */
+ while (time_is_after_jiffies(timeout)) {
+ stk1160_read_reg(dev, STK1160_AC97CTL_0, &value);
+
+ if (!(value & (STK1160_AC97CTL_0_CR | STK1160_AC97CTL_0_CW)))
+ return 0;
+ usleep_range(50, 100);
+ }
+
+ stk1160_err("AC97 transfer took too long, this should never happen!");
+ return -EBUSY;
+}
+
+static void stk1160_write_ac97(struct stk1160 *dev, u16 reg, u16 value)
+{
/* Set codec register address */
stk1160_write_reg(dev, STK1160_AC97_ADDR, reg);
@@ -41,28 +56,30 @@ static void stk1160_write_ac97(struct snd_ac97 *ac97, u16 reg, u16 value)
stk1160_write_reg(dev, STK1160_AC97_CMD, value & 0xff);
stk1160_write_reg(dev, STK1160_AC97_CMD + 1, (value & 0xff00) >> 8);
- /*
- * Set command write bit to initiate write operation.
- * The bit will be cleared when transfer is done.
- */
+ /* Set command write bit to initiate write operation */
stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8c);
+
+ /* Wait for command write bit to be cleared */
+ stk1160_ac97_wait_transfer_complete(dev);
}
-static u16 stk1160_read_ac97(struct snd_ac97 *ac97, u16 reg)
+#ifdef DEBUG
+static u16 stk1160_read_ac97(struct stk1160 *dev, u16 reg)
{
- struct stk1160 *dev = ac97->private_data;
u8 vall = 0;
u8 valh = 0;
/* Set codec register address */
stk1160_write_reg(dev, STK1160_AC97_ADDR, reg);
- /*
- * Set command read bit to initiate read operation.
- * The bit will be cleared when transfer is done.
- */
+ /* Set command read bit to initiate read operation */
stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8b);
+ /* Wait for command read bit to be cleared */
+ if (stk1160_ac97_wait_transfer_complete(dev) < 0)
+ return 0;
+
+
/* Retrieve register value */
stk1160_read_reg(dev, STK1160_AC97_CMD, &vall);
stk1160_read_reg(dev, STK1160_AC97_CMD + 1, &valh);
@@ -70,81 +87,79 @@ static u16 stk1160_read_ac97(struct snd_ac97 *ac97, u16 reg)
return (valh << 8) | vall;
}
-static void stk1160_reset_ac97(struct snd_ac97 *ac97)
+void stk1160_ac97_dump_regs(struct stk1160 *dev)
{
- struct stk1160 *dev = ac97->private_data;
- /* Two-step reset AC97 interface and hardware codec */
- stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x94);
- stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x88);
+ u16 value;
- /* Set 16-bit audio data and choose L&R channel*/
- stk1160_write_reg(dev, STK1160_AC97CTL_1 + 2, 0x01);
+ value = stk1160_read_ac97(dev, 0x12); /* CD volume */
+ stk1160_dbg("0x12 == 0x%04x", value);
+
+ value = stk1160_read_ac97(dev, 0x10); /* Line-in volume */
+ stk1160_dbg("0x10 == 0x%04x", value);
+
+ value = stk1160_read_ac97(dev, 0x0e); /* MIC volume (mono) */
+ stk1160_dbg("0x0e == 0x%04x", value);
+
+ value = stk1160_read_ac97(dev, 0x16); /* Aux volume */
+ stk1160_dbg("0x16 == 0x%04x", value);
+
+ value = stk1160_read_ac97(dev, 0x1a); /* Record select */
+ stk1160_dbg("0x1a == 0x%04x", value);
+
+ value = stk1160_read_ac97(dev, 0x02); /* Master volume */
+ stk1160_dbg("0x02 == 0x%04x", value);
+
+ value = stk1160_read_ac97(dev, 0x1c); /* Record gain */
+ stk1160_dbg("0x1c == 0x%04x", value);
}
+#endif
+
+static int stk1160_has_audio(struct stk1160 *dev)
+{
+ u8 value;
-static struct snd_ac97_bus_ops stk1160_ac97_ops = {
- .read = stk1160_read_ac97,
- .write = stk1160_write_ac97,
- .reset = stk1160_reset_ac97,
-};
+ stk1160_read_reg(dev, STK1160_POSV_L, &value);
+ return !(value & STK1160_POSV_L_ACDOUT);
+}
-int stk1160_ac97_register(struct stk1160 *dev)
+static int stk1160_has_ac97(struct stk1160 *dev)
{
- struct snd_card *card = NULL;
- struct snd_ac97_bus *ac97_bus;
- struct snd_ac97_template ac97_template;
- int rc;
-
- /*
- * Just want a card to access ac96 controls,
- * the actual capture interface will be handled by snd-usb-audio
- */
- rc = snd_card_new(dev->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
- THIS_MODULE, 0, &card);
- if (rc < 0)
- return rc;
-
- /* TODO: I'm not sure where should I get these names :-( */
- snprintf(card->shortname, sizeof(card->shortname),
- "stk1160-mixer");
- snprintf(card->longname, sizeof(card->longname),
- "stk1160 ac97 codec mixer control");
- strlcpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
-
- rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus);
- if (rc)
- goto err;
-
- /* We must set private_data before calling snd_ac97_mixer */
- memset(&ac97_template, 0, sizeof(ac97_template));
- ac97_template.private_data = dev;
- ac97_template.scaps = AC97_SCAP_SKIP_MODEM;
- rc = snd_ac97_mixer(ac97_bus, &ac97_template, &stk1160_ac97);
- if (rc)
- goto err;
-
- dev->snd_card = card;
- rc = snd_card_register(card);
- if (rc)
- goto err;
-
- return 0;
-
-err:
- dev->snd_card = NULL;
- snd_card_free(card);
- return rc;
+ u8 value;
+
+ stk1160_read_reg(dev, STK1160_POSV_L, &value);
+ return !(value & STK1160_POSV_L_ACSYNC);
}
-int stk1160_ac97_unregister(struct stk1160 *dev)
+void stk1160_ac97_setup(struct stk1160 *dev)
{
- struct snd_card *card = dev->snd_card;
+ if (!stk1160_has_audio(dev)) {
+ stk1160_info("Device doesn't support audio, skipping AC97 setup.");
+ return;
+ }
- /*
- * We need to check usb_device,
- * because ac97 release attempts to communicate with codec
- */
- if (card && dev->udev)
- snd_card_free(card);
+ if (!stk1160_has_ac97(dev)) {
+ stk1160_info("Device uses internal 8-bit ADC, skipping AC97 setup.");
+ return;
+ }
- return 0;
+ /* Two-step reset AC97 interface and hardware codec */
+ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x94);
+ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8c);
+
+ /* Set 16-bit audio data and choose L&R channel*/
+ stk1160_write_reg(dev, STK1160_AC97CTL_1 + 2, 0x01);
+ stk1160_write_reg(dev, STK1160_AC97CTL_1 + 3, 0x00);
+
+ /* Setup channels */
+ stk1160_write_ac97(dev, 0x12, 0x8808); /* CD volume */
+ stk1160_write_ac97(dev, 0x10, 0x0808); /* Line-in volume */
+ stk1160_write_ac97(dev, 0x0e, 0x0008); /* MIC volume (mono) */
+ stk1160_write_ac97(dev, 0x16, 0x0808); /* Aux volume */
+ stk1160_write_ac97(dev, 0x1a, 0x0404); /* Record select */
+ stk1160_write_ac97(dev, 0x02, 0x0000); /* Master volume */
+ stk1160_write_ac97(dev, 0x1c, 0x0808); /* Record gain */
+
+#ifdef DEBUG
+ stk1160_ac97_dump_regs(dev);
+#endif
}
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index bc029478065a..c86eb6164713 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -20,8 +20,7 @@
*
* TODO:
*
- * 1. (Try to) detect if we must register ac97 mixer
- * 2. Support stream at lower speed: lower frame rate or lower frame size.
+ * 1. Support stream at lower speed: lower frame rate or lower frame size.
*
*/
@@ -373,7 +372,7 @@ static int stk1160_probe(struct usb_interface *interface,
/* select default input */
stk1160_select_input(dev);
- stk1160_ac97_register(dev);
+ stk1160_ac97_setup(dev);
rc = stk1160_video_register(dev);
if (rc < 0)
@@ -411,9 +410,6 @@ static void stk1160_disconnect(struct usb_interface *interface)
/* Here is the only place where isoc get released */
stk1160_uninit_isoc(dev);
- /* ac97 unregister needs to be done before usb_device is cleared */
- stk1160_ac97_unregister(dev);
-
stk1160_clear_queue(dev);
video_unregister_device(&dev->vdev);
diff --git a/drivers/media/usb/stk1160/stk1160-reg.h b/drivers/media/usb/stk1160/stk1160-reg.h
index 81ff3a15d96e..7b08a3cc4504 100644
--- a/drivers/media/usb/stk1160/stk1160-reg.h
+++ b/drivers/media/usb/stk1160/stk1160-reg.h
@@ -26,6 +26,14 @@
/* Remote Wakup Control */
#define STK1160_RMCTL 0x00c
+/* Power-on Strapping Data */
+#define STK1160_POSVA 0x010
+#define STK1160_POSV_L 0x010
+#define STK1160_POSV_M 0x011
+#define STK1160_POSV_H 0x012
+#define STK1160_POSV_L_ACDOUT BIT(3)
+#define STK1160_POSV_L_ACSYNC BIT(2)
+
/*
* Decoder Control Register:
* This byte controls capture start/stop
@@ -114,6 +122,8 @@
/* AC97 Audio Control */
#define STK1160_AC97CTL_0 0x500
#define STK1160_AC97CTL_1 0x504
+#define STK1160_AC97CTL_0_CR BIT(1)
+#define STK1160_AC97CTL_0_CW BIT(2)
/* Use [0:6] bits of register 0x504 to set codec command address */
#define STK1160_AC97_ADDR 0x504
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 1ed1cc43cdb2..acd1c811db08 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -50,6 +50,8 @@
#define STK1160_MAX_INPUT 4
#define STK1160_SVIDEO_INPUT 4
+#define STK1160_AC97_TIMEOUT 50
+
#define STK1160_I2C_TIMEOUT 100
/* TODO: Print helpers
@@ -197,11 +199,4 @@ int stk1160_read_reg_req_len(struct stk1160 *dev, u8 req, u16 reg,
void stk1160_select_input(struct stk1160 *dev);
/* Provided by stk1160-ac97.c */
-#ifdef CONFIG_VIDEO_STK1160_AC97
-int stk1160_ac97_register(struct stk1160 *dev);
-int stk1160_ac97_unregister(struct stk1160 *dev);
-#else
-static inline int stk1160_ac97_register(struct stk1160 *dev) { return 0; }
-static inline int stk1160_ac97_unregister(struct stk1160 *dev) { return 0; }
-#endif
-
+void stk1160_ac97_setup(struct stk1160 *dev);
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index fbccbb2eed9f..985af9933c7e 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Controlling the sensor via the STK1125 vendor specific control interface:
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index a212248bc2a3..6e7fc36b658f 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 92bb48e3c74e..0284120ce246 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef STKWEBCAM_H
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c
index 8902ee36bc94..b293dea6554f 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/media/usb/tm6000/tm6000-cards.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 8d104e5c4be3..8c265bd80faa 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 70dbaec1219e..097ac321b7e1 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index b01d3ee56e77..cbcc1472f1c7 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 26b2ebb62547..4afd4655d562 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -39,7 +35,7 @@ MODULE_PARM_DESC(enable_ir, "enable ir (default is enable)");
static unsigned int ir_clock_mhz = 12;
module_param(ir_clock_mhz, int, 0644);
-MODULE_PARM_DESC(enable_ir, "ir clock, in MHz");
+MODULE_PARM_DESC(ir_clock_mhz, "ir clock, in MHz");
#define URB_SUBMIT_DELAY 100 /* ms - Delay to submit an URB request on retrial and init */
#define URB_INT_LED_DELAY 100 /* ms - Delay to turn led on again on int mode */
@@ -429,7 +425,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
return 0;
ir = kzalloc(sizeof(*ir), GFP_ATOMIC);
- rc = rc_allocate_device();
+ rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!ir || !rc)
goto out;
@@ -456,7 +452,6 @@ int tm6000_ir_init(struct tm6000_core *dev)
ir->polling = 50;
INIT_DELAYED_WORK(&ir->work, tm6000_ir_handle_key);
}
- rc->driver_type = RC_DRIVER_SCANCODE;
snprintf(ir->name, sizeof(ir->name), "tm5600/60x0 IR (%s)",
dev->name);
diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h
index a38c251ed57b..ab3fb74c476c 100644
--- a/drivers/media/usb/tm6000/tm6000-regs.h
+++ b/drivers/media/usb/tm6000/tm6000-regs.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/media/usb/tm6000/tm6000-stds.c
index 4064a5e8fae1..aa43810d17f9 100644
--- a/drivers/media/usb/tm6000/tm6000-stds.c
+++ b/drivers/media/usb/tm6000/tm6000-stds.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
index 99d15a55aa03..6a13a27c55d7 100644
--- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h
+++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/videodev2.h>
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index d9f3fa5db8dd..c4fdc1fa32ef 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -1375,8 +1371,11 @@ static int __tm6000_open(struct file *file)
/* initialize hardware on analog mode */
rc = tm6000_init_analog_mode(dev);
- if (rc < 0)
+ if (rc < 0) {
+ v4l2_fh_exit(&fh->fh);
+ kfree(fh);
return rc;
+ }
dev->mode = TM6000_MODE_ANALOG;
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index f2127944776f..7ec478d75f55 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/videodev2.h>
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index fc0219f1b7df..01c7e6d4481c 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/list.h>
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index 2d9444905fdb..09693caa15e2 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include "dvb_frontend.h"
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.h b/drivers/media/usb/ttusb-dec/ttusbdecfe.h
index 15ccc3d1a20e..5aff58c1b075 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.h
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef TTUSBDECFE_H
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index d3b6d3dfaa09..8135614f395a 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -757,6 +757,12 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
data[1] = -ctrl->val & 0xff;
}
break;
+ case V4L2_CID_SHARPNESS:
+ index = USBTV_BASE + 0x0239;
+ data[0] = 0;
+ data[1] = ctrl->val;
+ size = 2;
+ break;
default:
kfree(data);
return -EINVAL;
@@ -825,6 +831,8 @@ int usbtv_video_init(struct usbtv *usbtv)
V4L2_CID_SATURATION, 0, 0x3ff, 1, 0x200);
v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
V4L2_CID_HUE, -0xdff, 0xdff, 1, 0x000);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0x0, 0xff, 1, 0x60);
ret = usbtv->ctrl.error;
if (ret < 0) {
dev_warn(usbtv->dev, "Could not initialize controls\n");
diff --git a/drivers/media/usb/usbvision/usbvision-cards.c b/drivers/media/usb/usbvision/usbvision-cards.c
index 3103d0d020e8..fc2418b9f37c 100644
--- a/drivers/media/usb/usbvision/usbvision-cards.c
+++ b/drivers/media/usb/usbvision/usbvision-cards.c
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index bf041a9e69db..3f87fbc80be2 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -17,10 +17,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -1417,8 +1413,6 @@ static void usbvision_ctrl_urb_complete(struct urb *urb)
PDEBUG(DBG_IRQ, "");
usbvision->ctrl_urb_busy = 0;
- if (waitqueue_active(&usbvision->ctrl_urb_wq))
- wake_up_interruptible(&usbvision->ctrl_urb_wq);
}
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c
index 120de2e020e1..5a3f788ad033 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/media/usb/usbvision/usbvision-i2c.c
@@ -17,10 +17,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index a7529196c327..f5c635a67d74 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Let's call the version 0.... until compression decoding is completely
* implemented.
*
@@ -1340,7 +1336,6 @@ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev,
usbvision->ctrl_urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL);
if (usbvision->ctrl_urb == NULL)
goto err_unreg;
- init_waitqueue_head(&usbvision->ctrl_urb_wq);
return usbvision;
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 4f2e4fde38f2..6ecdcd58248f 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
@@ -370,7 +366,6 @@ struct usb_usbvision {
unsigned char ctrl_urb_buffer[8];
int ctrl_urb_busy;
struct usb_ctrlrequest ctrl_urb_setup;
- wait_queue_head_t ctrl_urb_wq; /* Processes waiting */
/* configuration part */
int have_tuner;
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index 14561a5abb79..368f8f8dfcb5 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -75,14 +75,14 @@ static const struct file_operations uvc_debugfs_stats_fops = {
static struct dentry *uvc_debugfs_root_dir;
-int uvc_debugfs_init_stream(struct uvc_streaming *stream)
+void uvc_debugfs_init_stream(struct uvc_streaming *stream)
{
struct usb_device *udev = stream->dev->udev;
struct dentry *dent;
char dir_name[32];
if (uvc_debugfs_root_dir == NULL)
- return -ENODEV;
+ return;
sprintf(dir_name, "%u-%u", udev->bus->busnum, udev->devnum);
@@ -90,7 +90,7 @@ int uvc_debugfs_init_stream(struct uvc_streaming *stream)
if (IS_ERR_OR_NULL(dent)) {
uvc_printk(KERN_INFO, "Unable to create debugfs %s "
"directory.\n", dir_name);
- return -ENODEV;
+ return;
}
stream->debugfs_dir = dent;
@@ -100,10 +100,8 @@ int uvc_debugfs_init_stream(struct uvc_streaming *stream)
if (IS_ERR_OR_NULL(dent)) {
uvc_printk(KERN_INFO, "Unable to create debugfs stats file.\n");
uvc_debugfs_cleanup_stream(stream);
- return -ENODEV;
+ return;
}
-
- return 0;
}
void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
@@ -115,18 +113,17 @@ void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
stream->debugfs_dir = NULL;
}
-int uvc_debugfs_init(void)
+void uvc_debugfs_init(void)
{
struct dentry *dir;
dir = debugfs_create_dir("uvcvideo", usb_debug_root);
if (IS_ERR_OR_NULL(dir)) {
uvc_printk(KERN_INFO, "Unable to create debugfs directory\n");
- return -ENODATA;
+ return;
}
uvc_debugfs_root_dir = dir;
- return 0;
}
void uvc_debugfs_cleanup(void)
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 77edd206d345..aa2199775cb8 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -43,6 +43,11 @@ uvc_queue_to_stream(struct uvc_video_queue *queue)
return container_of(queue, struct uvc_streaming, queue);
}
+static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf)
+{
+ return container_of(buf, struct uvc_buffer, buf);
+}
+
/*
* Return all queued buffers to videobuf2 in the requested state.
*
@@ -89,7 +94,7 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
+ struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
@@ -116,7 +121,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
+ struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
@@ -138,7 +143,7 @@ static void uvc_buffer_finish(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
struct uvc_streaming *stream = uvc_queue_to_stream(queue);
- struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
+ struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
if (vb->state == VB2_BUF_STATE_DONE)
uvc_video_clock_update(stream, vbuf, buf);
@@ -412,7 +417,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
nextbuf = NULL;
spin_unlock_irqrestore(&queue->irqlock, flags);
- buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
+ buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index f3c1c852e401..07a6c833ef7b 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1262,8 +1262,7 @@ static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
uvc_video_decode_end(stream, buf, stream->bulk.header,
stream->bulk.payload_size);
if (buf->state == UVC_BUF_STATE_READY)
- buf = uvc_queue_next_buffer(&stream->queue,
- buf);
+ uvc_queue_next_buffer(&stream->queue, buf);
}
stream->bulk.header_size = 0;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 3d6cc62f3cd2..4205e7a423f0 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -757,9 +757,9 @@ void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
struct uvc_buffer *buf);
/* debugfs and statistics */
-int uvc_debugfs_init(void);
+void uvc_debugfs_init(void);
void uvc_debugfs_cleanup(void);
-int uvc_debugfs_init_stream(struct uvc_streaming *stream);
+void uvc_debugfs_init_stream(struct uvc_streaming *stream);
void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream);
size_t uvc_video_stats_dump(struct uvc_streaming *stream, char *buf,
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 3950708cbb32..f2d6fc03dda0 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 5bada202b2d3..96cc733f35ef 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -42,7 +42,8 @@ static bool match_devname(struct v4l2_subdev *sd,
static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
- return sd->of_node == asd->match.of.node;
+ return !of_node_cmp(of_node_full_name(sd->of_node),
+ of_node_full_name(asd->match.of.node));
}
static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
@@ -99,18 +100,11 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
{
int ret;
- /* Remove from the waiting list */
- list_del(&asd->list);
- sd->asd = asd;
- sd->notifier = notifier;
-
if (notifier->bound) {
ret = notifier->bound(notifier, sd, asd);
if (ret < 0)
return ret;
}
- /* Move from the global subdevice list to notifier's done */
- list_move(&sd->async_list, &notifier->done);
ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
if (ret < 0) {
@@ -119,6 +113,14 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
return ret;
}
+ /* Remove from the waiting list */
+ list_del(&asd->list);
+ sd->asd = asd;
+ sd->notifier = notifier;
+
+ /* Move from the global subdevice list to notifier's done */
+ list_move(&sd->async_list, &notifier->done);
+
if (list_empty(&notifier->waiting) && notifier->complete)
return notifier->complete(notifier);
@@ -168,9 +170,6 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
mutex_lock(&list_lock);
- /* Keep also completed notifiers on the list */
- list_add(&notifier->list, &notifier_list);
-
list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
int ret;
@@ -185,6 +184,9 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
}
}
+ /* Keep also completed notifiers on the list */
+ list_add(&notifier->list, &notifier_list);
+
mutex_unlock(&list_lock);
return 0;
@@ -202,7 +204,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
if (!notifier->v4l2_dev)
return;
- dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
+ dev = kmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_err(notifier->v4l2_dev->dev,
"Failed to allocate device cache!\n");
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 47001e25fd9e..b9e08e3d6e0e 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -3367,6 +3367,9 @@ static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+ if (ctrl == NULL)
+ return;
+
v4l2_ctrl_lock(ctrl);
list_del(&sev->node);
v4l2_ctrl_unlock(ctrl);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 62bbed76dbbc..f364cc1b521d 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -253,6 +253,7 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
kfree(vdev);
goto clean_up;
}
+ sd->devnode = vdev;
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.info.dev.major = VIDEO_MAJOR;
sd->entity.info.dev.minor = vdev->minor;
@@ -270,7 +271,6 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
}
}
#endif
- sd->devnode = vdev;
}
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 8d3171c6bee8..a75df6cb141f 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -15,11 +15,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <media/v4l2-dev.h>
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index c183f0996fa1..3895999bf880 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -15,11 +15,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/bitops.h>
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 8bef4331bd51..303980b71aae 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -198,14 +198,20 @@ EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph);
int v4l_enable_media_source(struct video_device *vdev)
{
struct media_device *mdev = vdev->entity.graph_obj.mdev;
- int ret;
+ int ret = 0, err;
- if (!mdev || !mdev->enable_source)
+ if (!mdev)
return 0;
- ret = mdev->enable_source(&vdev->entity, &vdev->pipe);
- if (ret)
- return -EBUSY;
- return 0;
+
+ mutex_lock(&mdev->graph_mutex);
+ if (!mdev->enable_source)
+ goto end;
+ err = mdev->enable_source(&vdev->entity, &vdev->pipe);
+ if (err)
+ ret = -EBUSY;
+end:
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l_enable_media_source);
@@ -213,8 +219,12 @@ void v4l_disable_media_source(struct video_device *vdev)
{
struct media_device *mdev = vdev->entity.graph_obj.mdev;
- if (mdev && mdev->disable_source)
- mdev->disable_source(&vdev->entity);
+ if (mdev) {
+ mutex_lock(&mdev->graph_mutex);
+ if (mdev->disable_source)
+ mdev->disable_source(&vdev->entity);
+ mutex_unlock(&mdev->graph_mutex);
+ }
}
EXPORT_SYMBOL_GPL(v4l_disable_media_source);
@@ -256,13 +266,13 @@ EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
* Return the total number of users of all video device nodes in the pipeline.
*/
static int pipeline_pm_use_count(struct media_entity *entity,
- struct media_entity_graph *graph)
+ struct media_graph *graph)
{
int use = 0;
- media_entity_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, entity);
- while ((entity = media_entity_graph_walk_next(graph))) {
+ while ((entity = media_graph_walk_next(graph))) {
if (is_media_entity_v4l2_video_device(entity))
use += entity->use_count;
}
@@ -315,7 +325,7 @@ static int pipeline_pm_power_one(struct media_entity *entity, int change)
* Return 0 on success or a negative error code on failure.
*/
static int pipeline_pm_power(struct media_entity *entity, int change,
- struct media_entity_graph *graph)
+ struct media_graph *graph)
{
struct media_entity *first = entity;
int ret = 0;
@@ -323,18 +333,18 @@ static int pipeline_pm_power(struct media_entity *entity, int change,
if (!change)
return 0;
- media_entity_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, entity);
- while (!ret && (entity = media_entity_graph_walk_next(graph)))
+ while (!ret && (entity = media_graph_walk_next(graph)))
if (is_media_entity_v4l2_subdev(entity))
ret = pipeline_pm_power_one(entity, change);
if (!ret)
return ret;
- media_entity_graph_walk_start(graph, first);
+ media_graph_walk_start(graph, first);
- while ((first = media_entity_graph_walk_next(graph))
+ while ((first = media_graph_walk_next(graph))
&& first != entity)
if (is_media_entity_v4l2_subdev(first))
pipeline_pm_power_one(first, -change);
@@ -368,7 +378,7 @@ EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_use);
int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
unsigned int notification)
{
- struct media_entity_graph *graph = &link->graph_obj.mdev->pm_count_walk;
+ struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk;
struct media_entity *source = link->source->entity;
struct media_entity *sink = link->sink->entity;
int source_use;
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index 93b33681776c..4f59f442dd0a 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -26,7 +26,7 @@ static int v4l2_of_parse_csi_bus(const struct device_node *node,
struct v4l2_of_bus_mipi_csi2 *bus = &endpoint->bus.mipi_csi2;
struct property *prop;
bool have_clk_lane = false;
- unsigned int flags = 0;
+ unsigned int flags = 0, lanes_used = 0;
u32 v;
prop = of_find_property(node, "data-lanes", NULL);
@@ -38,6 +38,12 @@ static int v4l2_of_parse_csi_bus(const struct device_node *node,
lane = of_prop_next_u32(prop, lane, &v);
if (!lane)
break;
+
+ if (lanes_used & BIT(v))
+ pr_warn("%s: duplicated lane %u in data-lanes\n",
+ node->full_name, v);
+ lanes_used |= BIT(v);
+
bus->data_lanes[i] = v;
}
bus->num_data_lanes = i;
@@ -63,6 +69,11 @@ static int v4l2_of_parse_csi_bus(const struct device_node *node,
}
if (!of_property_read_u32(node, "clock-lanes", &v)) {
+ if (lanes_used & BIT(v))
+ pr_warn("%s: duplicated lane %u in clock-lanes\n",
+ node->full_name, v);
+ lanes_used |= BIT(v);
+
bus->clock_lane = v;
have_clk_lane = true;
}
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 34a1e7c8b306..da78497ae5ed 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/ioctl.h>
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a0547dbf9806..76382c858c35 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
struct ms_id_register id_reg;
if (!(*mrq)) {
- memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+ memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
sizeof(struct ms_id_register));
*mrq = &card->current_mrq;
return 0;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index f3512404bc52..99e651c27fb7 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2000,16 +2000,6 @@ static int msb_bd_getgeo(struct block_device *bdev,
return 0;
}
-static int msb_prepare_req(struct request_queue *q, struct request *req)
-{
- if (req->cmd_type != REQ_TYPE_FS) {
- blk_dump_rq_flags(req, "MS unsupported request");
- return BLKPREP_KILL;
- }
- req->rq_flags |= RQF_DONTPREP;
- return BLKPREP_OK;
-}
-
static void msb_submit_req(struct request_queue *q)
{
struct memstick_dev *card = q->queuedata;
@@ -2132,7 +2122,6 @@ static int msb_init_disk(struct memstick_dev *card)
}
msb->queue->queuedata = card;
- blk_queue_prep_rq(msb->queue, msb_prepare_req);
blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index fa0746d182ff..c00d8a266878 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -827,18 +827,6 @@ static void mspro_block_start(struct memstick_dev *card)
spin_unlock_irqrestore(&msb->q_lock, flags);
}
-static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
-{
- if (req->cmd_type != REQ_TYPE_FS) {
- blk_dump_rq_flags(req, "MSPro unsupported request");
- return BLKPREP_KILL;
- }
-
- req->rq_flags |= RQF_DONTPREP;
-
- return BLKPREP_OK;
-}
-
static void mspro_block_submit_req(struct request_queue *q)
{
struct memstick_dev *card = q->queuedata;
@@ -1228,7 +1216,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
}
msb->queue->queuedata = card;
- blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index add6a3a6ef0d..98eafae78576 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -119,6 +119,7 @@ static struct scsi_host_template mptfc_driver_template = {
.target_destroy = mptfc_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = mptfc_abort,
.eh_device_reset_handler = mptfc_dev_reset,
.eh_bus_reset_handler = mptfc_bus_reset,
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index 8946e19dbfc8..8a24494f8c4d 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -65,7 +65,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ee1667acde4..f6308ad35b19 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1983,6 +1983,7 @@ static struct scsi_host_template mptsas_driver_template = {
.target_destroy = mptsas_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
+ .eh_timed_out = mptsas_eh_timed_out,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
.eh_host_reset_handler = mptscsih_host_reset,
@@ -2320,10 +2321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
SmpPassthroughReply_t *smprep;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
- memcpy(req->sense, smprep, sizeof(*smprep));
- req->sense_len = sizeof(*smprep);
- req->resid_len = 0;
- rsp->resid_len -= smprep->ResponseDataLength;
+ memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep));
+ scsi_req(req)->sense_len = sizeof(*smprep);
+ scsi_req(req)->resid_len = 0;
+ scsi_req(rsp)->resid_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
@@ -5398,7 +5399,6 @@ mptsas_init(void)
sas_attach_transport(&mptsas_transport_functions);
if (!mptsas_transport_template)
return -ENODEV;
- mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
"mptscsih_io_done");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 1ef7575547e6..be42957a78e1 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -56,6 +56,7 @@
* document number TBD : Wildcat Point-LP
* document number TBD : 9 Series
* document number TBD : Lewisburg
+ * document number TBD : Apollo Lake SoC
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -83,6 +84,17 @@
#define ACPIBASE_GCS_OFF 0x3410
#define ACPIBASE_GCS_END 0x3414
+#define SPIBASE_BYT 0x54
+#define SPIBASE_BYT_SZ 512
+#define SPIBASE_BYT_EN BIT(1)
+
+#define SPIBASE_LPT 0x3800
+#define SPIBASE_LPT_SZ 512
+#define BCR 0xdc
+#define BCR_WPD BIT(0)
+
+#define SPIBASE_APL_SZ 4096
+
#define GPIOBASE_ICH0 0x58
#define GPIOCTRL_ICH0 0x5C
#define GPIOBASE_ICH6 0x48
@@ -133,6 +145,12 @@ static struct resource gpio_ich_res[] = {
},
};
+static struct resource intel_spi_res[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ }
+};
+
static struct mfd_cell lpc_ich_wdt_cell = {
.name = "iTCO_wdt",
.num_resources = ARRAY_SIZE(wdt_ich_res),
@@ -147,6 +165,14 @@ static struct mfd_cell lpc_ich_gpio_cell = {
.ignore_resource_conflicts = true,
};
+
+static struct mfd_cell lpc_ich_spi_cell = {
+ .name = "intel-spi",
+ .num_resources = ARRAY_SIZE(intel_spi_res),
+ .resources = intel_spi_res,
+ .ignore_resource_conflicts = true,
+};
+
/* chipset related info */
enum lpc_chipsets {
LPC_ICH = 0, /* ICH */
@@ -216,6 +242,7 @@ enum lpc_chipsets {
LPC_BRASWELL, /* Braswell SoC */
LPC_LEWISBURG, /* Lewisburg */
LPC_9S, /* 9 Series */
+ LPC_APL, /* Apollo Lake SoC */
};
static struct lpc_ich_info lpc_chipset_info[] = {
@@ -494,10 +521,12 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.name = "Lynx Point",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
+ .spi_type = INTEL_SPI_LPT,
},
[LPC_LPT_LP] = {
.name = "Lynx Point_LP",
.iTCO_version = 2,
+ .spi_type = INTEL_SPI_LPT,
},
[LPC_WBG] = {
.name = "Wellsburg",
@@ -511,6 +540,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_BAYTRAIL] = {
.name = "Bay Trail SoC",
.iTCO_version = 3,
+ .spi_type = INTEL_SPI_BYT,
},
[LPC_COLETO] = {
.name = "Coleto Creek",
@@ -519,10 +549,12 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_WPT_LP] = {
.name = "Wildcat Point_LP",
.iTCO_version = 2,
+ .spi_type = INTEL_SPI_LPT,
},
[LPC_BRASWELL] = {
.name = "Braswell SoC",
.iTCO_version = 3,
+ .spi_type = INTEL_SPI_BYT,
},
[LPC_LEWISBURG] = {
.name = "Lewisburg",
@@ -533,6 +565,10 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
+ [LPC_APL] = {
+ .name = "Apollo Lake SoC",
+ .spi_type = INTEL_SPI_BXT,
+ },
};
/*
@@ -681,6 +717,7 @@ static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
{ PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
{ PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+ { PCI_VDEVICE(INTEL, 0x5ae8), LPC_APL},
{ PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
@@ -1056,6 +1093,94 @@ wdt_done:
return ret;
}
+static int lpc_ich_init_spi(struct pci_dev *dev)
+{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+ struct resource *res = &intel_spi_res[0];
+ struct intel_spi_boardinfo *info;
+ u32 spi_base, rcba, bcr;
+
+ info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->type = lpc_chipset_info[priv->chipset].spi_type;
+
+ switch (info->type) {
+ case INTEL_SPI_BYT:
+ pci_read_config_dword(dev, SPIBASE_BYT, &spi_base);
+ if (spi_base & SPIBASE_BYT_EN) {
+ res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
+ res->end = res->start + SPIBASE_BYT_SZ - 1;
+ }
+ break;
+
+ case INTEL_SPI_LPT:
+ pci_read_config_dword(dev, RCBABASE, &rcba);
+ if (rcba & 1) {
+ spi_base = round_down(rcba, SPIBASE_LPT_SZ);
+ res->start = spi_base + SPIBASE_LPT;
+ res->end = res->start + SPIBASE_LPT_SZ - 1;
+
+ /*
+ * Try to make the flash chip writeable now by
+ * setting BCR_WPD. It it fails we tell the driver
+ * that it can only read the chip.
+ */
+ pci_read_config_dword(dev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(dev, BCR, bcr);
+ pci_read_config_dword(dev, BCR, &bcr);
+ }
+ info->writeable = !!(bcr & BCR_WPD);
+ }
+ break;
+
+ case INTEL_SPI_BXT: {
+ unsigned int p2sb = PCI_DEVFN(13, 0);
+ unsigned int spi = PCI_DEVFN(13, 2);
+ struct pci_bus *bus = dev->bus;
+
+ /*
+ * The P2SB is hidden by BIOS and we need to unhide it in
+ * order to read BAR of the SPI flash device. Once that is
+ * done we hide it again.
+ */
+ pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x0);
+ pci_bus_read_config_dword(bus, spi, PCI_BASE_ADDRESS_0,
+ &spi_base);
+ if (spi_base != ~0) {
+ res->start = spi_base & 0xfffffff0;
+ res->end = res->start + SPIBASE_APL_SZ - 1;
+
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_bus_write_config_dword(bus, spi, BCR, bcr);
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ }
+ info->writeable = !!(bcr & BCR_WPD);
+ }
+
+ pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!res->start)
+ return -ENODEV;
+
+ lpc_ich_spi_cell.platform_data = info;
+ lpc_ich_spi_cell.pdata_size = sizeof(*info);
+
+ return mfd_add_devices(&dev->dev, PLATFORM_DEVID_NONE,
+ &lpc_ich_spi_cell, 1, NULL, 0, NULL);
+}
+
static int lpc_ich_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
@@ -1099,6 +1224,12 @@ static int lpc_ich_probe(struct pci_dev *dev,
cell_added = true;
}
+ if (lpc_chipset_info[priv->chipset].spi_type) {
+ ret = lpc_ich_init_spi(dev);
+ if (!ret)
+ cell_added = true;
+ }
+
/*
* We only care if at least one or none of the cells registered
* successfully.
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 7f1b282d7d96..cb290b8ca0c8 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -1396,7 +1396,7 @@ int genwqe_device_remove(struct genwqe_dev *cd)
* application which will decrease this reference from
* 1/unused to 0/illegal and not from 2/used 1/empty.
*/
- rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount);
+ rc = kref_read(&cd->cdev_genwqe.kobj.kref);
if (rc != 1) {
dev_err(&pci_dev->dev,
"[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index cfa1039c62e7..67d27be60405 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -19,8 +19,12 @@ void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
-void lkdtm_ATOMIC_UNDERFLOW(void);
-void lkdtm_ATOMIC_OVERFLOW(void);
+void lkdtm_REFCOUNT_SATURATE_INC(void);
+void lkdtm_REFCOUNT_SATURATE_ADD(void);
+void lkdtm_REFCOUNT_ZERO_DEC(void);
+void lkdtm_REFCOUNT_ZERO_INC(void);
+void lkdtm_REFCOUNT_ZERO_SUB(void);
+void lkdtm_REFCOUNT_ZERO_ADD(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index 91edd0b55e5c..cba0837aee2e 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -6,6 +6,7 @@
*/
#include "lkdtm.h"
#include <linux/list.h>
+#include <linux/refcount.h>
#include <linux/sched.h>
struct lkdtm_list {
@@ -129,28 +130,86 @@ void lkdtm_HUNG_TASK(void)
schedule();
}
-void lkdtm_ATOMIC_UNDERFLOW(void)
+void lkdtm_REFCOUNT_SATURATE_INC(void)
{
- atomic_t under = ATOMIC_INIT(INT_MIN);
+ refcount_t over = REFCOUNT_INIT(UINT_MAX - 1);
- pr_info("attempting good atomic increment\n");
- atomic_inc(&under);
- atomic_dec(&under);
+ pr_info("attempting good refcount decrement\n");
+ refcount_dec(&over);
+ refcount_inc(&over);
- pr_info("attempting bad atomic underflow\n");
- atomic_dec(&under);
+ pr_info("attempting bad refcount inc overflow\n");
+ refcount_inc(&over);
+ refcount_inc(&over);
+ if (refcount_read(&over) == UINT_MAX)
+ pr_err("Correctly stayed saturated, but no BUG?!\n");
+ else
+ pr_err("Fail: refcount wrapped\n");
+}
+
+void lkdtm_REFCOUNT_SATURATE_ADD(void)
+{
+ refcount_t over = REFCOUNT_INIT(UINT_MAX - 1);
+
+ pr_info("attempting good refcount decrement\n");
+ refcount_dec(&over);
+ refcount_inc(&over);
+
+ pr_info("attempting bad refcount add overflow\n");
+ refcount_add(2, &over);
+ if (refcount_read(&over) == UINT_MAX)
+ pr_err("Correctly stayed saturated, but no BUG?!\n");
+ else
+ pr_err("Fail: refcount wrapped\n");
+}
+
+void lkdtm_REFCOUNT_ZERO_DEC(void)
+{
+ refcount_t zero = REFCOUNT_INIT(1);
+
+ pr_info("attempting bad refcount decrement to zero\n");
+ refcount_dec(&zero);
+ if (refcount_read(&zero) == 0)
+ pr_err("Stayed at zero, but no BUG?!\n");
+ else
+ pr_err("Fail: refcount went crazy\n");
}
-void lkdtm_ATOMIC_OVERFLOW(void)
+void lkdtm_REFCOUNT_ZERO_SUB(void)
{
- atomic_t over = ATOMIC_INIT(INT_MAX);
+ refcount_t zero = REFCOUNT_INIT(1);
+
+ pr_info("attempting bad refcount subtract past zero\n");
+ if (!refcount_sub_and_test(2, &zero))
+ pr_info("wrap attempt was noticed\n");
+ if (refcount_read(&zero) == 1)
+ pr_err("Correctly stayed above 0, but no BUG?!\n");
+ else
+ pr_err("Fail: refcount wrapped\n");
+}
- pr_info("attempting good atomic decrement\n");
- atomic_dec(&over);
- atomic_inc(&over);
+void lkdtm_REFCOUNT_ZERO_INC(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
- pr_info("attempting bad atomic overflow\n");
- atomic_inc(&over);
+ pr_info("attempting bad refcount increment from zero\n");
+ refcount_inc(&zero);
+ if (refcount_read(&zero) == 0)
+ pr_err("Stayed at zero, but no BUG?!\n");
+ else
+ pr_err("Fail: refcount went past zero\n");
+}
+
+void lkdtm_REFCOUNT_ZERO_ADD(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount addition from zero\n");
+ refcount_add(2, &zero);
+ if (refcount_read(&zero) == 0)
+ pr_err("Stayed at zero, but no BUG?!\n");
+ else
+ pr_err("Fail: refcount went past zero\n");
}
void lkdtm_CORRUPT_LIST_ADD(void)
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 7eeb71a75549..16e4cf110930 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -220,8 +220,12 @@ struct crashtype crashtypes[] = {
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
- CRASHTYPE(ATOMIC_UNDERFLOW),
- CRASHTYPE(ATOMIC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_SATURATE_INC),
+ CRASHTYPE(REFCOUNT_SATURATE_ADD),
+ CRASHTYPE(REFCOUNT_ZERO_DEC),
+ CRASHTYPE(REFCOUNT_ZERO_INC),
+ CRASHTYPE(REFCOUNT_ZERO_SUB),
+ CRASHTYPE(REFCOUNT_ZERO_ADD),
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index c6217a4993ad..a617aa5a3ad8 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -67,7 +67,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
me_cl->props.max_number_of_connections,
me_cl->props.max_msg_length,
me_cl->props.single_recv_buf,
- atomic_read(&me_cl->refcnt.refcount));
+ kref_read(&me_cl->refcnt));
mei_me_cl_put(me_cl);
}
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index cdfa8520a4b1..fc1ecdaaa9ca 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -12,6 +12,16 @@ config PWRSEQ_EMMC
This driver can also be built as a module. If so, the module
will be called pwrseq_emmc.
+config PWRSEQ_SD8787
+ tristate "HW reset support for SD8787 BT + Wifi module"
+ depends on OF && (MWIFIEX || BT_MRVL_SDIO)
+ help
+ This selects hardware reset support for the SD8787 BT + Wifi
+ module. By default this option is set to n.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_sd8787.
+
config PWRSEQ_SIMPLE
tristate "Simple HW reset support for MMC"
default y
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index b2a257dc644f..7e3ed1aeada2 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,9 +7,10 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- quirks.o slot-gpio.o
+ slot-gpio.o
mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_SD8787) += pwrseq_sd8787.o
obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index cb1698f268f1..1621fa08e206 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -47,6 +47,13 @@
#include "queue.h"
#include "block.h"
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+#include "mmc_ops.h"
+#include "quirks.h"
+#include "sd_ops.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
@@ -54,12 +61,6 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#define INAND_CMD38_ARG_EXT_CSD 113
-#define INAND_CMD38_ARG_ERASE 0x00
-#define INAND_CMD38_ARG_TRIM 0x01
-#define INAND_CMD38_ARG_SECERASE 0x80
-#define INAND_CMD38_ARG_SECTRIM1 0x81
-#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
@@ -84,7 +85,6 @@ static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
-static DEFINE_SPINLOCK(mmc_blk_lock);
/*
* There is one mmc_blk_data per slot.
@@ -157,11 +157,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
if (md->usage == 0) {
int devidx = mmc_get_devidx(md->disk);
blk_cleanup_queue(md->queue.queue);
-
- spin_lock(&mmc_blk_lock);
- ida_remove(&mmc_blk_ida, devidx);
- spin_unlock(&mmc_blk_lock);
-
+ ida_simple_remove(&mmc_blk_ida, devidx);
put_disk(md->disk);
kfree(md);
}
@@ -442,9 +438,9 @@ out:
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
- struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct mmc_request mrq = {};
struct scatterlist sg;
int err;
int is_rpmb = false;
@@ -762,15 +758,15 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
return 0;
}
-static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
{
int err;
u32 result;
__be32 *blocks;
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
@@ -780,9 +776,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err)
- return (u32)-1;
+ return err;
if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
- return (u32)-1;
+ return -EIO;
memset(&cmd, 0, sizeof(struct mmc_command));
@@ -802,7 +798,7 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
blocks = kmalloc(4, GFP_KERNEL);
if (!blocks)
- return (u32)-1;
+ return -ENOMEM;
sg_init_one(&sg, blocks, 4);
@@ -812,14 +808,16 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
kfree(blocks);
if (cmd.error || data.error)
- result = (u32)-1;
+ return -EIO;
+
+ *written_blocks = result;
- return result;
+ return 0;
}
static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SEND_STATUS;
@@ -884,7 +882,7 @@ static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
struct request *req, bool *gen_err, u32 *stop_status)
{
struct mmc_host *host = card->host;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
bool use_r1b_resp = rq_data_dir(req) == WRITE;
@@ -1143,7 +1141,7 @@ int mmc_access_rpmb(struct mmc_queue *mq)
return false;
}
-static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
@@ -1152,7 +1150,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
if (!mmc_can_erase(card)) {
err = -EOPNOTSUPP;
- goto out;
+ goto fail;
}
from = blk_rq_pos(req);
@@ -1164,29 +1162,26 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
arg = MMC_TRIM_ARG;
else
arg = MMC_ERASE_ARG;
-retry:
- if (card->quirks & MMC_QUIRK_INAND_CMD38) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- INAND_CMD38_ARG_EXT_CSD,
- arg == MMC_TRIM_ARG ?
- INAND_CMD38_ARG_TRIM :
- INAND_CMD38_ARG_ERASE,
- 0);
- if (err)
- goto out;
- }
- err = mmc_erase(card, from, nr, arg);
-out:
- if (err == -EIO && !mmc_blk_reset(md, card->host, type))
- goto retry;
+ do {
+ err = 0;
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0);
+ }
+ if (!err)
+ err = mmc_erase(card, from, nr, arg);
+ } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
if (!err)
mmc_blk_reset_success(md, type);
+fail:
blk_end_request(req, err, blk_rq_bytes(req));
-
- return err ? 0 : 1;
}
-static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
@@ -1249,11 +1244,9 @@ out_retry:
mmc_blk_reset_success(md, type);
out:
blk_end_request(req, err, blk_rq_bytes(req));
-
- return err ? 0 : 1;
}
-static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
@@ -1264,8 +1257,6 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
ret = -EIO;
blk_end_request_all(req, ret);
-
- return ret ? 0 : 1;
}
/*
@@ -1303,7 +1294,7 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
struct mmc_async_req *areq)
{
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
- mmc_active);
+ areq);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
int need_retune = card->host->need_retune;
@@ -1559,17 +1550,19 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->data.sg_len = i;
}
- mqrq->mmc_active.mrq = &brq->mrq;
- mqrq->mmc_active.err_check = mmc_blk_err_check;
+ mqrq->areq.mrq = &brq->mrq;
+ mqrq->areq.err_check = mmc_blk_err_check;
mmc_queue_bounce_pre(mqrq);
}
-static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
- struct mmc_blk_request *brq, struct request *req,
- int ret)
+static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+ struct mmc_blk_request *brq, struct request *req,
+ bool old_req_pending)
{
struct mmc_queue_req *mq_rq;
+ bool req_pending;
+
mq_rq = container_of(brq, struct mmc_queue_req, brq);
/*
@@ -1582,62 +1575,104 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
*/
if (mmc_card_sd(card)) {
u32 blocks;
+ int err;
- blocks = mmc_sd_num_wr_blocks(card);
- if (blocks != (u32)-1) {
- ret = blk_end_request(req, 0, blocks << 9);
- }
+ err = mmc_sd_num_wr_blocks(card, &blocks);
+ if (err)
+ req_pending = old_req_pending;
+ else
+ req_pending = blk_end_request(req, 0, blocks << 9);
} else {
- ret = blk_end_request(req, 0, brq->data.bytes_xfered);
+ req_pending = blk_end_request(req, 0, brq->data.bytes_xfered);
}
- return ret;
+ return req_pending;
+}
+
+static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req)
+{
+ if (mmc_card_removed(card))
+ req->rq_flags |= RQF_QUIET;
+ while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
+}
+
+/**
+ * mmc_blk_rw_try_restart() - tries to restart the current async request
+ * @mq: the queue with the card and host to restart
+ * @req: a new request that want to be started after the current one
+ */
+static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req)
+{
+ if (!req)
+ return;
+
+ /*
+ * If the card was removed, just cancel everything and return.
+ */
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ blk_end_request_all(req, -EIO);
+ return;
+ }
+ /* Else proceed and try to restart the current async request */
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, mq->card, 0, mq);
+ mmc_start_areq(mq->card->host, &mq->mqrq_cur->areq, NULL);
}
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq;
- int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
+ int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req;
- struct mmc_async_req *areq;
+ struct request *old_req;
+ struct mmc_async_req *new_areq;
+ struct mmc_async_req *old_areq;
+ bool req_pending = true;
- if (!rqc && !mq->mqrq_prev->req)
- return 0;
+ if (!new_req && !mq->mqrq_prev->req)
+ return;
do {
- if (rqc) {
+ if (new_req) {
/*
* When 4KB native sector is enabled, only 8 blocks
* multiple read or write is allowed
*/
if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
+ !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- rqc->rq_disk->disk_name);
- mq_rq = mq->mqrq_cur;
- req = rqc;
- rqc = NULL;
- goto cmd_abort;
+ new_req->rq_disk->disk_name);
+ mmc_blk_rw_cmd_abort(card, new_req);
+ return;
}
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
- areq = &mq->mqrq_cur->mmc_active;
+ new_areq = &mq->mqrq_cur->areq;
} else
- areq = NULL;
- areq = mmc_start_req(card->host, areq, &status);
- if (!areq) {
+ new_areq = NULL;
+
+ old_areq = mmc_start_areq(card->host, new_areq, &status);
+ if (!old_areq) {
+ /*
+ * We have just put the first request into the pipeline
+ * and there is nothing more to do until it is
+ * complete.
+ */
if (status == MMC_BLK_NEW_REQUEST)
- mq->flags |= MMC_QUEUE_NEW_REQUEST;
- return 0;
+ mq->new_request = true;
+ return;
}
- mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ /*
+ * An asynchronous request has been completed and we proceed
+ * to handle the result of it.
+ */
+ mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
brq = &mq_rq->brq;
- req = mq_rq->req;
- type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ old_req = mq_rq->req;
+ type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
switch (status) {
@@ -1648,28 +1683,32 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
*/
mmc_blk_reset_success(md, type);
- ret = blk_end_request(req, 0,
- brq->data.bytes_xfered);
-
+ req_pending = blk_end_request(old_req, 0,
+ brq->data.bytes_xfered);
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
* were returned by the host controller, it's a bug.
*/
- if (status == MMC_BLK_SUCCESS && ret) {
+ if (status == MMC_BLK_SUCCESS && req_pending) {
pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(req),
+ __func__, blk_rq_bytes(old_req),
brq->data.bytes_xfered);
- rqc = NULL;
- goto cmd_abort;
+ mmc_blk_rw_cmd_abort(card, old_req);
+ return;
}
break;
case MMC_BLK_CMD_ERR:
- ret = mmc_blk_cmd_err(md, card, brq, req, ret);
- if (mmc_blk_reset(md, card->host, type))
- goto cmd_abort;
- if (!ret)
- goto start_new_req;
+ req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
+ if (mmc_blk_reset(md, card->host, type)) {
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
+ if (!req_pending) {
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
break;
case MMC_BLK_RETRY:
retune_retry_done = brq->retune_retry_done;
@@ -1679,22 +1718,27 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
case MMC_BLK_ABORT:
if (!mmc_blk_reset(md, card->host, type))
break;
- goto cmd_abort;
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
case MMC_BLK_DATA_ERR: {
int err;
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV)
- goto cmd_abort;
+ if (err == -ENODEV) {
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
/* Fall through */
}
case MMC_BLK_ECC_ERR:
if (brq->data.blocks > 1) {
/* Redo read one sector at a time */
pr_warn("%s: retrying using single block read\n",
- req->rq_disk->disk_name);
+ old_req->rq_disk->disk_name);
disable_multi = 1;
break;
}
@@ -1703,57 +1747,40 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* time, so we only reach here after trying to
* read a single sector.
*/
- ret = blk_end_request(req, -EIO,
- brq->data.blksz);
- if (!ret)
- goto start_new_req;
+ req_pending = blk_end_request(old_req, -EIO,
+ brq->data.blksz);
+ if (!req_pending) {
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
break;
case MMC_BLK_NOMEDIUM:
- goto cmd_abort;
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
default:
pr_err("%s: Unhandled return value (%d)",
- req->rq_disk->disk_name, status);
- goto cmd_abort;
+ old_req->rq_disk->disk_name, status);
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
}
- if (ret) {
+ if (req_pending) {
/*
* In case of a incomplete request
* prepare it again and resend.
*/
mmc_blk_rw_rq_prep(mq_rq, card,
disable_multi, mq);
- mmc_start_req(card->host,
- &mq_rq->mmc_active, NULL);
+ mmc_start_areq(card->host,
+ &mq_rq->areq, NULL);
mq_rq->brq.retune_retry_done = retune_retry_done;
}
- } while (ret);
-
- return 1;
-
- cmd_abort:
- if (mmc_card_removed(card))
- req->rq_flags |= RQF_QUIET;
- while (ret)
- ret = blk_end_request(req, -EIO,
- blk_rq_cur_bytes(req));
-
- start_new_req:
- if (rqc) {
- if (mmc_card_removed(card)) {
- rqc->rq_flags |= RQF_QUIET;
- blk_end_request_all(rqc, -EIO);
- } else {
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
- mmc_start_req(card->host,
- &mq->mqrq_cur->mmc_active, NULL);
- }
- }
-
- return 0;
+ } while (req_pending);
}
-int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
struct mmc_blk_data *md = mq->blkdata;
@@ -1769,32 +1796,31 @@ int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (req) {
blk_end_request_all(req, -EIO);
}
- ret = 0;
goto out;
}
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ mq->new_request = false;
if (req && req_op(req) == REQ_OP_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- ret = mmc_blk_issue_discard_rq(mq, req);
+ mmc_blk_issue_discard_rq(mq, req);
} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
/* complete ongoing async transfer before issuing secure erase*/
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ mmc_blk_issue_secdiscard_rq(mq, req);
} else if (req && req_op(req) == REQ_OP_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- ret = mmc_blk_issue_flush(mq, req);
+ mmc_blk_issue_flush(mq, req);
} else {
- ret = mmc_blk_issue_rw_rq(mq, req);
+ mmc_blk_issue_rw_rq(mq, req);
}
out:
- if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
+ if ((!req && !mq->new_request) || req_is_special)
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
@@ -1802,7 +1828,6 @@ out:
* the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
*/
mmc_put_card(card);
- return ret;
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -1821,23 +1846,9 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
-again:
- if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
- return ERR_PTR(-ENOMEM);
-
- spin_lock(&mmc_blk_lock);
- ret = ida_get_new(&mmc_blk_ida, &devidx);
- spin_unlock(&mmc_blk_lock);
-
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ERR_PTR(ret);
-
- if (devidx >= max_devices) {
- ret = -ENOSPC;
- goto out;
- }
+ devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return ERR_PTR(devidx);
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
@@ -1926,9 +1937,7 @@ again:
err_kfree:
kfree(md);
out:
- spin_lock(&mmc_blk_lock);
- ida_remove(&mmc_blk_ida, devidx);
- spin_unlock(&mmc_blk_lock);
+ ida_simple_remove(&mmc_blk_ida, devidx);
return ERR_PTR(ret);
}
@@ -2093,80 +2102,6 @@ force_ro_fail:
return ret;
}
-static const struct mmc_fixup blk_fixups[] =
-{
- MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
-
- /*
- * Some MMC cards experience performance degradation with CMD23
- * instead of CMD12-bounded multiblock transfers. For now we'll
- * black list what's bad...
- * - Certain Toshiba cards.
- *
- * N.B. This doesn't affect SD cards.
- */
- MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
-
- /*
- * Some MMC cards need longer data read timeout than indicated in CSD.
- */
- MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
- MMC_QUIRK_LONG_READ_TIME),
- MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_LONG_READ_TIME),
-
- /*
- * On these Samsung MoviNAND parts, performing secure erase or
- * secure trim can result in unrecoverable corruption due to a
- * firmware bug.
- */
- MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-
- /*
- * On Some Kingston eMMCs, performing trim can result in
- * unrecoverable data conrruption occasionally due to a firmware bug.
- */
- MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_TRIM_BROKEN),
- MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_TRIM_BROKEN),
-
- END_FIXUP
-};
-
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md, *part_md;
@@ -2178,7 +2113,7 @@ static int mmc_blk_probe(struct mmc_card *card)
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV;
- mmc_fixup_device(card, blk_fixups);
+ mmc_fixup_device(card, mmc_blk_fixups);
md = mmc_blk_alloc(card);
if (IS_ERR(md))
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index cdabb2ee74be..860ca7c8df86 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -1 +1,9 @@
-int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+#ifndef _MMC_CORE_BLOCK_H
+#define _MMC_CORE_BLOCK_H
+
+struct mmc_queue;
+struct request;
+
+void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+
+#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c64266f5a399..301246513a37 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -23,6 +23,8 @@
#include <linux/mmc/host.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "sdio_cis.h"
#include "bus.h"
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 00a19710b6b4..72b0ef03f10a 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -11,6 +11,11 @@
#ifndef _MMC_CORE_BUS_H
#define _MMC_CORE_BUS_H
+#include <linux/device.h>
+
+struct mmc_host;
+struct mmc_card;
+
#define MMC_DEV_ATTR(name, fmt, args...) \
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
@@ -27,5 +32,14 @@ void mmc_remove_card(struct mmc_card *card);
int mmc_register_bus(void);
void mmc_unregister_bus(void);
-#endif
+struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *card);
+ void (*remove)(struct mmc_card *card);
+ void (*shutdown)(struct mmc_card *card);
+};
+int mmc_register_driver(struct mmc_driver *drv);
+void mmc_unregister_driver(struct mmc_driver *drv);
+
+#endif
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
new file mode 100644
index 000000000000..f06cd91964ce
--- /dev/null
+++ b/drivers/mmc/core/card.h
@@ -0,0 +1,221 @@
+/*
+ * Private header for the mmc subsystem
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _MMC_CORE_CARD_H
+#define _MMC_CORE_CARD_H
+
+#include <linux/mmc/card.h>
+
+#define mmc_card_name(c) ((c)->cid.prod_name)
+#define mmc_card_id(c) (dev_name(&(c)->dev))
+#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
+
+/* Card states */
+#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
+#define MMC_STATE_READONLY (1<<1) /* card is read-only */
+#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */
+#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
+#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
+#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */
+#define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */
+
+#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
+#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
+#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
+#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
+#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
+#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+
+#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
+#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
+#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
+#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
+#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
+#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
+#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+
+/*
+ * The world is not perfect and supplies us with broken mmc/sdio devices.
+ * For at least some of these bugs we need a work-around.
+ */
+struct mmc_fixup {
+ /* CID-specific fields. */
+ const char *name;
+
+ /* Valid revision range */
+ u64 rev_start, rev_end;
+
+ unsigned int manfid;
+ unsigned short oemid;
+
+ /* SDIO-specific fields. You can use SDIO_ANY_ID here of course */
+ u16 cis_vendor, cis_device;
+
+ /* for MMC cards */
+ unsigned int ext_csd_rev;
+
+ void (*vendor_fixup)(struct mmc_card *card, int data);
+ int data;
+};
+
+#define CID_MANFID_ANY (-1u)
+#define CID_OEMID_ANY ((unsigned short) -1)
+#define CID_NAME_ANY (NULL)
+
+#define EXT_CSD_REV_ANY (-1u)
+
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_KINGSTON 0x70
+#define CID_MANFID_HYNIX 0x90
+
+#define END_FIXUP { NULL }
+
+#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _cis_vendor, _cis_device, \
+ _fixup, _data, _ext_csd_rev) \
+ { \
+ .name = (_name), \
+ .manfid = (_manfid), \
+ .oemid = (_oemid), \
+ .rev_start = (_rev_start), \
+ .rev_end = (_rev_end), \
+ .cis_vendor = (_cis_vendor), \
+ .cis_device = (_cis_device), \
+ .vendor_fixup = (_fixup), \
+ .data = (_data), \
+ .ext_csd_rev = (_ext_csd_rev), \
+ }
+
+#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _fixup, _data, _ext_csd_rev) \
+ _FIXUP_EXT(_name, _manfid, \
+ _oemid, _rev_start, _rev_end, \
+ SDIO_ANY_ID, SDIO_ANY_ID, \
+ _fixup, _data, _ext_csd_rev) \
+
+#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ EXT_CSD_REV_ANY)
+
+#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \
+ _ext_csd_rev) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ _ext_csd_rev)
+
+#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
+ CID_OEMID_ANY, 0, -1ull, \
+ _vendor, _device, \
+ _fixup, _data, EXT_CSD_REV_ANY) \
+
+#define cid_rev(hwrev, fwrev, year, month) \
+ (((u64) hwrev) << 40 | \
+ ((u64) fwrev) << 32 | \
+ ((u64) year) << 16 | \
+ ((u64) month))
+
+#define cid_rev_card(card) \
+ cid_rev(card->cid.hwrev, \
+ card->cid.fwrev, \
+ card->cid.year, \
+ card->cid.month)
+
+/*
+ * Unconditionally quirk add/remove.
+ */
+static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+ card->quirks &= ~data;
+}
+
+/*
+ * Quirk add/remove for MMC products.
+ */
+static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks &= ~data;
+}
+
+/*
+ * Quirk add/remove for SD products.
+ */
+static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks &= ~data;
+}
+
+static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LENIENT_FN0;
+}
+
+static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
+}
+
+static inline int mmc_card_disable_cd(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_DISABLE_CD;
+}
+
+static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+}
+
+static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
+}
+
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
+static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
+}
+
+static inline int mmc_card_broken_hpi(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_HPI;
+}
+
+#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1076b9d89df3..926e0fde07d7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -40,6 +40,7 @@
#include <trace/events/mmc.h>
#include "core.h"
+#include "card.h"
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
@@ -630,10 +631,41 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
}
/**
- * mmc_start_req - start a non-blocking request
+ * mmc_finalize_areq() - finalize an asynchronous request
+ * @host: MMC host to finalize any ongoing request on
+ *
+ * Returns the status of the ongoing asynchronous request, but
+ * MMC_BLK_SUCCESS if no request was going on.
+ */
+static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
+{
+ enum mmc_blk_status status;
+
+ if (!host->areq)
+ return MMC_BLK_SUCCESS;
+
+ status = mmc_wait_for_data_req_done(host, host->areq->mrq);
+ if (status == MMC_BLK_NEW_REQUEST)
+ return status;
+
+ /*
+ * Check BKOPS urgency for each R1 response
+ */
+ if (host->card && mmc_card_mmc(host->card) &&
+ ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
+ (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
+ (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
+ mmc_start_bkops(host->card, true);
+ }
+
+ return status;
+}
+
+/**
+ * mmc_start_areq - start an asynchronous request
* @host: MMC host to start command
- * @areq: async request to start
- * @error: out parameter returns 0 for success, otherwise non zero
+ * @areq: asynchronous request to start
+ * @ret_stat: out parameter for status
*
* Start a new MMC custom command request for a host.
* If there is on ongoing async request wait for completion
@@ -645,11 +677,11 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
* return the completed request. If there is no ongoing request, NULL
* is returned without waiting. NULL is not an error condition.
*/
-struct mmc_async_req *mmc_start_req(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat)
+struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
+ struct mmc_async_req *areq,
+ enum mmc_blk_status *ret_stat)
{
- enum mmc_blk_status status = MMC_BLK_SUCCESS;
+ enum mmc_blk_status status;
int start_err = 0;
struct mmc_async_req *data = host->areq;
@@ -657,44 +689,25 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
if (areq)
mmc_pre_req(host, areq->mrq);
- if (host->areq) {
- status = mmc_wait_for_data_req_done(host, host->areq->mrq);
- if (status == MMC_BLK_NEW_REQUEST) {
- if (ret_stat)
- *ret_stat = status;
- /*
- * The previous request was not completed,
- * nothing to return
- */
- return NULL;
- }
- /*
- * Check BKOPS urgency for each R1 response
- */
- if (host->card && mmc_card_mmc(host->card) &&
- ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
- (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
- (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
-
- /* Cancel the prepared request */
- if (areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- mmc_start_bkops(host->card, true);
+ /* Finalize previous request */
+ status = mmc_finalize_areq(host);
- /* prepare the request again */
- if (areq)
- mmc_pre_req(host, areq->mrq);
- }
+ /* The previous request is still going on... */
+ if (status == MMC_BLK_NEW_REQUEST) {
+ if (ret_stat)
+ *ret_stat = status;
+ return NULL;
}
+ /* Fine so far, start the new request! */
if (status == MMC_BLK_SUCCESS && areq)
start_err = __mmc_start_data_req(host, areq->mrq);
+ /* Postprocess the old request at this point */
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
- /* Cancel a prepared request if it was not started. */
+ /* Cancel a prepared request if it was not started. */
if ((status != MMC_BLK_SUCCESS || start_err) && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
@@ -707,7 +720,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
*ret_stat = status;
return data;
}
-EXPORT_SYMBOL(mmc_start_req);
+EXPORT_SYMBOL(mmc_start_areq);
/**
* mmc_wait_for_req - start a request and wait for completion
@@ -807,7 +820,7 @@ EXPORT_SYMBOL(mmc_interrupt_hpi);
*/
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
- struct mmc_request mrq = {NULL};
+ struct mmc_request mrq = {};
WARN_ON(!host->claimed);
@@ -1630,7 +1643,7 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
return ocr;
}
-int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{
int err = 0;
int old_signal_voltage = host->ios.signal_voltage;
@@ -1646,20 +1659,13 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
+int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err = 0;
u32 clock;
/*
- * Send CMD11 only if the request is to switch the card to
- * 1.8V signalling.
- */
- if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- return __mmc_set_signal_voltage(host, signal_voltage);
-
- /*
* If we cannot switch voltages, return failure so the caller
* can continue without UHS mode
*/
@@ -1697,7 +1703,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
host->ios.clock = 0;
mmc_set_ios(host);
- if (__mmc_set_signal_voltage(host, signal_voltage)) {
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
@@ -1806,11 +1812,11 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
mmc_set_initial_state(host);
/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
- if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
+ if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
- else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
+ else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
- else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
+ else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
/*
@@ -2129,7 +2135,7 @@ static unsigned int mmc_erase_timeout(struct mmc_card *card,
static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
bool use_r1b_resp = false;
unsigned long timeout;
@@ -2551,7 +2557,7 @@ EXPORT_SYMBOL(mmc_calc_max_discard);
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
mmc_card_hs400(card) || mmc_card_hs400es(card))
@@ -2567,7 +2573,7 @@ EXPORT_SYMBOL(mmc_set_blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SET_BLOCK_COUNT;
cmd.arg = blockcount & 0x0000FFFF;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 0fa86a2afc26..55f543fd37c4 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -12,6 +12,11 @@
#define _MMC_CORE_CORE_H
#include <linux/delay.h>
+#include <linux/sched.h>
+
+struct mmc_host;
+struct mmc_card;
+struct mmc_request;
#define MMC_CMD_RETRIES 3
@@ -43,8 +48,8 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr);
-int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -69,6 +74,7 @@ void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
int _mmc_detect_card_removed(struct mmc_host *host);
+int mmc_detect_card_removed(struct mmc_host *host);
int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
@@ -98,5 +104,38 @@ static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
#endif
-#endif
+void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
+bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg);
+int mmc_can_erase(struct mmc_card *card);
+int mmc_can_trim(struct mmc_card *card);
+int mmc_can_discard(struct mmc_card *card);
+int mmc_can_sanitize(struct mmc_card *card);
+int mmc_can_secure_erase_trim(struct mmc_card *card);
+int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+ unsigned int nr);
+unsigned int mmc_calc_max_discard(struct mmc_card *card);
+
+int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
+int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write);
+
+int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+void mmc_release_host(struct mmc_host *host);
+void mmc_get_card(struct mmc_card *card);
+void mmc_put_card(struct mmc_card *card);
+
+/**
+ * mmc_claim_host - exclusively claim a host
+ * @host: mmc host to claim
+ *
+ * Claim a host for a set of operations.
+ */
+static inline void mmc_claim_host(struct mmc_host *host)
+{
+ __mmc_claim_host(host, NULL);
+}
+#endif
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 30623b8b86a4..a1fba5732d66 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -20,6 +20,8 @@
#include <linux/mmc/host.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "mmc_ops.h"
#ifdef CONFIG_FAIL_MMC_REQUEST
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 98f25ffb4258..3f8c85d5aa09 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -34,14 +34,11 @@
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
static DEFINE_IDA(mmc_host_ida);
-static DEFINE_SPINLOCK(mmc_host_lock);
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
- spin_lock(&mmc_host_lock);
- ida_remove(&mmc_host_ida, host->index);
- spin_unlock(&mmc_host_lock);
+ ida_simple_remove(&mmc_host_ida, host->index);
kfree(host);
}
@@ -301,6 +298,8 @@ int mmc_of_parse(struct mmc_host *host)
if (of_property_read_bool(np, "wakeup-source") ||
of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+ if (of_property_read_bool(np, "mmc-ddr-3_3v"))
+ host->caps |= MMC_CAP_3_3V_DDR;
if (of_property_read_bool(np, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR;
if (of_property_read_bool(np, "mmc-ddr-1_2v"))
@@ -354,22 +353,13 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
-again:
- if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) {
+ err = ida_simple_get(&mmc_host_ida, 0, 0, GFP_KERNEL);
+ if (err < 0) {
kfree(host);
return NULL;
}
- spin_lock(&mmc_host_lock);
- err = ida_get_new(&mmc_host_ida, &host->index);
- spin_unlock(&mmc_host_lock);
-
- if (err == -EAGAIN) {
- goto again;
- } else if (err) {
- kfree(host);
- return NULL;
- }
+ host->index = err;
dev_set_name(&host->class_dev, "mmc%d", host->index);
@@ -381,6 +371,8 @@ again:
if (mmc_gpio_alloc(host)) {
put_device(&host->class_dev);
+ ida_simple_remove(&mmc_host_ida, host->index);
+ kfree(host);
return NULL;
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 992bf5397633..fb6a76a03833 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,6 +10,7 @@
*/
#ifndef _MMC_CORE_HOST_H
#define _MMC_CORE_HOST_H
+
#include <linux/mmc/host.h>
int mmc_register_host_class(void);
@@ -20,6 +21,53 @@ void mmc_retune_disable(struct mmc_host *host);
void mmc_retune_hold(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
+void mmc_retune_pause(struct mmc_host *host);
+void mmc_retune_unpause(struct mmc_host *host);
+
+static inline void mmc_retune_recheck(struct mmc_host *host)
+{
+ if (host->hold_retune <= 1)
+ host->retune_now = 1;
+}
+
+static inline int mmc_host_cmd23(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_CMD23;
+}
+
+static inline int mmc_boot_partition_access(struct mmc_host *host)
+{
+ return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
+}
+
+static inline int mmc_host_uhs(struct mmc_host *host)
+{
+ return host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50);
+}
+
+static inline bool mmc_card_hs200(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS200;
+}
+
+static inline bool mmc_card_ddr52(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_DDR52;
+}
+
+static inline bool mmc_card_hs400(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS400;
+}
+
+static inline bool mmc_card_hs400es(struct mmc_card *card)
+{
+ return card->host->ios.enhanced_strobe;
+}
+
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b61b52f9da3d..7fd722868875 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -21,9 +21,11 @@
#include <linux/mmc/mmc.h>
#include "core.h"
+#include "card.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "quirks.h"
#include "sd_ops.h"
#define DEFAULT_CMD6_TIMEOUT_MS 500
@@ -47,17 +49,6 @@ static const unsigned int tacc_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
-static const struct mmc_fixup mmc_ext_csd_fixups[] = {
- /*
- * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
- * is used so disable the HPI feature for such buggy cards.
- */
- MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
- 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
-
- END_FIXUP
-};
-
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
@@ -212,7 +203,7 @@ static void mmc_select_card_type(struct mmc_card *card)
avail_type |= EXT_CSD_CARD_TYPE_HS_52;
}
- if (caps & MMC_CAP_1_8V_DDR &&
+ if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
@@ -307,6 +298,18 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
}
}
+static void mmc_part_add(struct mmc_card *card, unsigned int size,
+ unsigned int part_cfg, char *name, int idx, bool ro,
+ int area_type)
+{
+ card->part[card->nr_parts].size = size;
+ card->part[card->nr_parts].part_cfg = part_cfg;
+ sprintf(card->part[card->nr_parts].name, name, idx);
+ card->part[card->nr_parts].force_ro = ro;
+ card->part[card->nr_parts].area_type = area_type;
+ card->nr_parts++;
+}
+
static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
@@ -530,8 +533,14 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
EXT_CSD_MANUAL_BKOPS_MASK);
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
- if (!card->ext_csd.man_bkops_en)
- pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
+ if (card->ext_csd.man_bkops_en)
+ pr_debug("%s: MAN_BKOPS_EN bit is set\n",
+ mmc_hostname(card->host));
+ card->ext_csd.auto_bkops_en =
+ (ext_csd[EXT_CSD_BKOPS_EN] &
+ EXT_CSD_AUTO_BKOPS_MASK);
+ if (card->ext_csd.auto_bkops_en)
+ pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
mmc_hostname(card->host));
}
@@ -617,6 +626,12 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.ffu_capable =
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+
+ card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
+ card->ext_csd.device_life_time_est_typ_a =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
+ card->ext_csd.device_life_time_est_typ_b =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
}
/* eMMC v5.1 or later */
@@ -764,6 +779,10 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
+ card->ext_csd.device_life_time_est_typ_a,
+ card->ext_csd.device_life_time_est_typ_b);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
@@ -817,6 +836,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_pre_eol_info.attr,
+ &dev_attr_life_time.attr,
&dev_attr_serial.attr,
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
@@ -1095,16 +1116,19 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
*
* WARNING: eMMC rules are NOT the same as SD DDR
*/
- err = -EINVAL;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ if (!err)
+ return 0;
+ }
- if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
+ host->caps & MMC_CAP_1_8V_DDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* make sure vccq is 3.3v after switching disaster */
if (err)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
return err;
}
@@ -1271,10 +1295,10 @@ static int mmc_select_hs400es(struct mmc_card *card)
}
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
@@ -1380,10 +1404,10 @@ static int mmc_select_hs200(struct mmc_card *card)
old_signal_voltage = host->ios.signal_voltage;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
@@ -1425,7 +1449,7 @@ static int mmc_select_hs200(struct mmc_card *card)
err:
if (err) {
/* fall back to the old signal voltage, if fails report error */
- if (__mmc_set_signal_voltage(host, old_signal_voltage))
+ if (mmc_set_signal_voltage(host, old_signal_voltage))
err = -EIO;
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
@@ -1706,10 +1730,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_hs400(card);
if (err)
goto free_card;
- } else if (mmc_card_hs(card)) {
+ } else {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (err > 0) {
+ if (err > 0 && mmc_card_hs(card)) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
@@ -1805,7 +1829,7 @@ static int mmc_can_sleep(struct mmc_card *card)
static int mmc_sleep(struct mmc_host *host)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
struct mmc_card *card = host->card;
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
int err;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index b11c3455b040..fe80f26d6971 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -57,7 +57,7 @@ static const u8 tuning_blk_pattern_8bit[] = {
int mmc_send_status(struct mmc_card *card, u32 *status)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
@@ -79,7 +79,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SELECT_CARD;
@@ -115,7 +115,7 @@ int mmc_deselect_cards(struct mmc_host *host)
*/
int mmc_set_dsr(struct mmc_host *host)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SET_DSR;
@@ -128,7 +128,7 @@ int mmc_set_dsr(struct mmc_host *host)
int mmc_go_idle(struct mmc_host *host)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
/*
* Non-SPI hosts need to prevent chipselect going active during
@@ -164,7 +164,7 @@ int mmc_go_idle(struct mmc_host *host)
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int i, err = 0;
cmd.opcode = MMC_SEND_OP_COND;
@@ -203,7 +203,7 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_ALL_SEND_CID;
cmd.arg = 0;
@@ -220,7 +220,7 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
int mmc_set_relative_addr(struct mmc_card *card)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SET_RELATIVE_ADDR;
cmd.arg = card->rca << 16;
@@ -233,7 +233,7 @@ static int
mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = opcode;
cmd.arg = arg;
@@ -256,9 +256,9 @@ static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
u32 opcode, void *buf, unsigned len)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
mrq.cmd = &cmd;
@@ -387,7 +387,7 @@ EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SPI_READ_OCR;
@@ -402,7 +402,7 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SPI_CRC_ON_OFF;
@@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
}
} while (busy);
- if (host->ops->card_busy && send_status)
- return mmc_switch_status(card);
-
return 0;
}
@@ -533,7 +530,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
{
struct mmc_host *host = card->host;
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
bool use_r1b_resp = use_busy_signal;
unsigned char old_timing = host->ios.timing;
@@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
goto out;
- /* Switch to new timing before poll and check switch status. */
- if (timing)
- mmc_set_timing(host, timing);
-
/*If SPI or used HW busy detection above, then we don't need to poll. */
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
- mmc_host_is_spi(host)) {
- if (send_status)
- err = mmc_switch_status(card);
+ mmc_host_is_spi(host))
goto out_tim;
- }
/* Let's try to poll to find out when the command is completed. */
err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+ if (err)
+ goto out;
out_tim:
- if (err && timing)
- mmc_set_timing(host, old_timing);
+ /* Switch to new timing before check switch status. */
+ if (timing)
+ mmc_set_timing(host, timing);
+
+ if (send_status) {
+ err = mmc_switch_status(card);
+ if (err && timing)
+ mmc_set_timing(host, old_timing);
+ }
out:
mmc_retune_release(host);
@@ -611,9 +610,9 @@ EXPORT_SYMBOL_GPL(mmc_switch);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
struct mmc_ios *ios = &host->ios;
const u8 *tuning_block_pattern;
@@ -680,7 +679,7 @@ EXPORT_SYMBOL_GPL(mmc_send_tuning);
int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
/*
* eMMC specification specifies that CMD12 can be used to stop a tuning
@@ -707,9 +706,9 @@ static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
u8 *data_buf;
u8 *test_buf;
@@ -803,7 +802,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
unsigned int opcode;
int err;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index abd525ed74be..74beea8a9c7e 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -12,6 +12,11 @@
#ifndef _MMC_MMC_OPS_H
#define _MMC_MMC_OPS_H
+#include <linux/types.h>
+
+struct mmc_host;
+struct mmc_card;
+
int mmc_select_card(struct mmc_card *card);
int mmc_deselect_cards(struct mmc_host *host);
int mmc_set_dsr(struct mmc_host *host);
@@ -26,12 +31,21 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
+int mmc_interrupt_hpi(struct mmc_card *card);
int mmc_can_ext_csd(struct mmc_card *card);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card);
int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
bool use_busy_signal, bool send_status, bool retry_crc_err);
+int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms);
+int mmc_stop_bkops(struct mmc_card *card);
+int mmc_read_bkops_status(struct mmc_card *card);
+void mmc_start_bkops(struct mmc_card *card, bool from_exception);
+int mmc_can_reset(struct mmc_card *card);
+int mmc_flush_cache(struct mmc_card *card);
#endif
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 3ab6e52d106c..f99ac3123fd2 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -22,6 +22,11 @@
#include <linux/seq_file.h>
#include <linux/module.h>
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+
#define RESULT_OK 0
#define RESULT_FAIL 1
#define RESULT_UNSUP_HOST 2
@@ -260,7 +265,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
static int mmc_test_wait_busy(struct mmc_test_card *test)
{
int ret, busy;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
busy = 0;
do {
@@ -277,8 +282,7 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- pr_info("%s: Warning: Host did not "
- "wait for busy state to end.\n",
+ pr_info("%s: Warning: Host did not wait for busy state to end.\n",
mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
@@ -292,10 +296,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
u8 *buffer, unsigned addr, unsigned blksz, int write)
{
- struct mmc_request mrq = {0};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
struct scatterlist sg;
@@ -357,12 +361,11 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
if (max_segs > max_page_cnt)
max_segs = max_page_cnt;
- mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return NULL;
- mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
- GFP_KERNEL);
+ mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
if (!mem->arr)
goto out_free;
@@ -546,7 +549,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
if (!test->gr)
return;
- tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
+ tr = kmalloc(sizeof(*tr), GFP_KERNEL);
if (!tr)
return;
@@ -641,11 +644,11 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
if (write)
memset(test->buffer, 0xDF, 512);
else {
- for (i = 0;i < 512;i++)
+ for (i = 0; i < 512; i++)
test->buffer[i] = i;
}
- for (i = 0;i < BUFFER_SIZE / 512;i++) {
+ for (i = 0; i < BUFFER_SIZE / 512; i++) {
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
@@ -674,7 +677,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
memset(test->buffer, 0, 512);
- for (i = 0;i < BUFFER_SIZE / 512;i++) {
+ for (i = 0; i < BUFFER_SIZE / 512; i++) {
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
@@ -850,7 +853,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
for (i = 0; i < count; i++) {
mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
blocks, blksz, write);
- done_areq = mmc_start_req(test->card->host, cur_areq, &status);
+ done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
ret = RESULT_FAIL;
@@ -869,7 +872,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
dev_addr += blocks;
}
- done_areq = mmc_start_req(test->card->host, NULL, &status);
+ done_areq = mmc_start_areq(test->card->host, NULL, &status);
if (status != MMC_BLK_SUCCESS)
ret = RESULT_FAIL;
@@ -885,10 +888,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
- struct mmc_request mrq = {0};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
mrq.cmd = &cmd;
mrq.data = &data;
@@ -910,10 +913,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
static int mmc_test_broken_transfer(struct mmc_test_card *test,
unsigned blocks, unsigned blksz, int write)
{
- struct mmc_request mrq = {0};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
struct scatterlist sg;
@@ -946,7 +949,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
unsigned long flags;
if (write) {
- for (i = 0;i < blocks * blksz;i++)
+ for (i = 0; i < blocks * blksz; i++)
test->scratch[i] = i;
} else {
memset(test->scratch, 0, BUFFER_SIZE);
@@ -980,7 +983,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
memset(test->buffer, 0, sectors * 512);
- for (i = 0;i < sectors;i++) {
+ for (i = 0; i < sectors; i++) {
ret = mmc_test_buffer_transfer(test,
test->buffer + i * 512,
dev_addr + i, 512, 0);
@@ -988,12 +991,12 @@ static int mmc_test_transfer(struct mmc_test_card *test,
return ret;
}
- for (i = 0;i < blocks * blksz;i++) {
+ for (i = 0; i < blocks * blksz; i++) {
if (test->buffer[i] != (u8)i)
return RESULT_FAIL;
}
- for (;i < sectors * 512;i++) {
+ for (; i < sectors * 512; i++) {
if (test->buffer[i] != 0xDF)
return RESULT_FAIL;
}
@@ -1001,7 +1004,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
local_irq_save(flags);
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
local_irq_restore(flags);
- for (i = 0;i < blocks * blksz;i++) {
+ for (i = 0; i < blocks * blksz; i++) {
if (test->scratch[i] != (u8)i)
return RESULT_FAIL;
}
@@ -1086,7 +1089,7 @@ static int mmc_test_multi_write(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, size);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read(struct mmc_test_card *test)
@@ -1107,7 +1110,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, size);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
static int mmc_test_pow2_write(struct mmc_test_card *test)
@@ -1118,7 +1121,7 @@ static int mmc_test_pow2_write(struct mmc_test_card *test)
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
- for (i = 1; i < 512;i <<= 1) {
+ for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
@@ -1136,7 +1139,7 @@ static int mmc_test_pow2_read(struct mmc_test_card *test)
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
- for (i = 1; i < 512;i <<= 1) {
+ for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
@@ -1154,7 +1157,7 @@ static int mmc_test_weird_write(struct mmc_test_card *test)
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
- for (i = 3; i < 512;i += 7) {
+ for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
@@ -1172,7 +1175,7 @@ static int mmc_test_weird_read(struct mmc_test_card *test)
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
- for (i = 3; i < 512;i += 7) {
+ for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
@@ -1231,7 +1234,7 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test)
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
if (ret)
return ret;
}
@@ -1258,7 +1261,7 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
if (ret)
return ret;
}
@@ -1357,7 +1360,7 @@ static int mmc_test_multi_write_high(struct mmc_test_card *test)
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read_high(struct mmc_test_card *test)
@@ -1379,7 +1382,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
#else
@@ -1533,7 +1536,7 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
/*
* Initialize an area for testing large transfers. The test area is set to the
- * middle of the card because cards may have different charateristics at the
+ * middle of the card because cards may have different characteristics at the
* front (for FAT file system optimization). Optionally, the area is erased
* (if the card supports it) which may improve write performance. Optionally,
* the area is filled with data for subsequent read tests.
@@ -1579,7 +1582,7 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
if (!t->mem)
return -ENOMEM;
- t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
+ t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
if (!t->sg) {
ret = -ENOMEM;
goto out_free;
@@ -2147,7 +2150,7 @@ static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
int i;
for (i = 0 ; i < rw->len && ret == 0; i++) {
- ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+ ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
rw->sg_len[i]);
if (ret)
break;
@@ -2399,7 +2402,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_req(host, &test_areq.areq, &blkstat);
+ mmc_start_areq(host, &test_areq.areq, &blkstat);
if (blkstat != MMC_BLK_SUCCESS) {
ret = RESULT_FAIL;
goto out_free;
@@ -2437,7 +2440,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Wait for data request to complete */
if (use_areq) {
- mmc_start_req(host, NULL, &blkstat);
+ mmc_start_areq(host, NULL, &blkstat);
if (blkstat != MMC_BLK_SUCCESS)
ret = RESULT_FAIL;
} else {
@@ -2954,7 +2957,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_claim_host(test->card->host);
- for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
struct mmc_test_general_result *gr;
if (testcase && ((i + 1) != testcase))
@@ -2967,16 +2970,14 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (mmc_test_cases[i].prepare) {
ret = mmc_test_cases[i].prepare(test);
if (ret) {
- pr_info("%s: Result: Prepare "
- "stage failed! (%d)\n",
+ pr_info("%s: Result: Prepare stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
continue;
}
}
- gr = kzalloc(sizeof(struct mmc_test_general_result),
- GFP_KERNEL);
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
if (gr) {
INIT_LIST_HEAD(&gr->tr_lst);
@@ -3005,13 +3006,11 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_HOST:
- pr_info("%s: Result: UNSUPPORTED "
- "(by host)\n",
+ pr_info("%s: Result: UNSUPPORTED (by host)\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_CARD:
- pr_info("%s: Result: UNSUPPORTED "
- "(by card)\n",
+ pr_info("%s: Result: UNSUPPORTED (by card)\n",
mmc_hostname(test->card->host));
break;
default:
@@ -3026,8 +3025,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
- pr_info("%s: Warning: Cleanup "
- "stage failed! (%d)\n",
+ pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
}
@@ -3113,7 +3111,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
if (ret)
return ret;
- test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
+ test = kzalloc(sizeof(*test), GFP_KERNEL);
if (!test)
return -ENOMEM;
@@ -3163,9 +3161,9 @@ static int mtf_testlist_show(struct seq_file *sf, void *data)
mutex_lock(&mmc_test_lock);
- seq_printf(sf, "0:\tRun all tests\n");
+ seq_puts(sf, "0:\tRun all tests\n");
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
- seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+ seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
mutex_unlock(&mmc_test_lock);
@@ -3218,7 +3216,7 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
return -ENODEV;
}
- df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
+ df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
debugfs_remove(file);
dev_err(&card->dev,
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index d69e751f148b..39c911aa6ebb 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,7 +8,11 @@
#ifndef _MMC_CORE_PWRSEQ_H
#define _MMC_CORE_PWRSEQ_H
-#include <linux/mmc/host.h>
+#include <linux/types.h>
+
+struct mmc_host;
+struct device;
+struct module;
struct mmc_pwrseq_ops {
void (*pre_power_on)(struct mmc_host *host);
diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
new file mode 100644
index 000000000000..1a21e14458d3
--- /dev/null
+++ b/drivers/mmc/core/pwrseq_sd8787.c
@@ -0,0 +1,117 @@
+/*
+ * pwrseq_sd8787.c - power sequence support for Marvell SD8787 BT + Wifi chip
+ *
+ * Copyright (C) 2016 Matt Ranostay <matt@ranostay.consulting>
+ *
+ * Based on the original work pwrseq_simple.c
+ * Copyright (C) 2014 Linaro Ltd
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_sd8787 {
+ struct mmc_pwrseq pwrseq;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *pwrdn_gpio;
+};
+
+#define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
+
+static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+
+ msleep(300);
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
+}
+
+static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+}
+
+static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
+ .pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
+ .power_off = mmc_pwrseq_sd8787_power_off,
+};
+
+static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
+ { .compatible = "mmc-pwrseq-sd8787",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
+
+static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq;
+ struct device *dev = &pdev->dev;
+
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
+ pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->pwrdn_gpio))
+ return PTR_ERR(pwrseq->pwrdn_gpio);
+
+ pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
+
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = platform_get_drvdata(pdev);
+
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static struct platform_driver mmc_pwrseq_sd8787_driver = {
+ .probe = mmc_pwrseq_sd8787_probe,
+ .remove = mmc_pwrseq_sd8787_remove,
+ .driver = {
+ .name = "pwrseq_sd8787",
+ .of_match_table = mmc_pwrseq_sd8787_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_sd8787_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index a6496d8027bc..493eb10ce580 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -20,6 +20,8 @@
#include "queue.h"
#include "block.h"
+#include "core.h"
+#include "card.h"
#define MMC_QUEUE_BOUNCESZ 65536
@@ -30,15 +32,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- /*
- * We only like normal block requests and discards.
- */
- if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
- req_op(req) != REQ_OP_SECURE_ERASE) {
- blk_dump_rq_flags(req, "MMC bad request");
- return BLKPREP_KILL;
- }
-
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
@@ -84,8 +77,8 @@ static int mmc_queue_thread(void *d)
set_current_state(TASK_RUNNING);
mmc_blk_issue_rq(mq, req);
cond_resched();
- if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (mq->new_request) {
+ mq->new_request = false;
continue; /* fetch again */
}
@@ -152,7 +145,7 @@ static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
{
struct scatterlist *sg;
- sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
if (!sg)
*err = -ENOMEM;
else {
@@ -399,8 +392,8 @@ void mmc_queue_suspend(struct mmc_queue *mq)
struct request_queue *q = mq->queue;
unsigned long flags;
- if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
- mq->flags |= MMC_QUEUE_SUSPENDED;
+ if (!mq->suspended) {
+ mq->suspended |= true;
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
@@ -419,8 +412,8 @@ void mmc_queue_resume(struct mmc_queue *mq)
struct request_queue *q = mq->queue;
unsigned long flags;
- if (mq->flags & MMC_QUEUE_SUSPENDED) {
- mq->flags &= ~MMC_QUEUE_SUSPENDED;
+ if (mq->suspended) {
+ mq->suspended = false;
up(&mq->thread_sem);
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index dac8c3d010dd..e298f100101b 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -1,6 +1,11 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+
static inline bool mmc_req_is_special(struct request *req)
{
return req &&
@@ -9,7 +14,6 @@ static inline bool mmc_req_is_special(struct request *req)
req_op(req) == REQ_OP_SECURE_ERASE);
}
-struct request;
struct task_struct;
struct mmc_blk_data;
@@ -29,16 +33,15 @@ struct mmc_queue_req {
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
- struct mmc_async_req mmc_active;
+ struct mmc_async_req areq;
};
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
- unsigned int flags;
-#define MMC_QUEUE_SUSPENDED (1 << 0)
-#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+ bool new_request;
+ bool suspended;
bool asleep;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
deleted file mode 100644
index ca9cade317c7..000000000000
--- a/drivers/mmc/core/quirks.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * This file contains work-arounds for many known SD/MMC
- * and SDIO hardware bugs.
- *
- * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
- * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
- * Inspired from pci fixup code:
- * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_ids.h>
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
-#ifndef SDIO_VENDOR_ID_STE
-#define SDIO_VENDOR_ID_STE 0x0020
-#endif
-
-#ifndef SDIO_DEVICE_ID_STE_CW1200
-#define SDIO_DEVICE_ID_STE_CW1200 0x2280
-#endif
-
-#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0
-#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
-#endif
-
-static const struct mmc_fixup mmc_fixup_methods[] = {
- SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
- add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
-
- SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
- add_quirk, MMC_QUIRK_DISABLE_CD),
-
- SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
- add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
-
- SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
- add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
-
- END_FIXUP
-};
-
-void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
-{
- const struct mmc_fixup *f;
- u64 rev = cid_rev_card(card);
-
- /* Non-core specific workarounds. */
- if (!table)
- table = mmc_fixup_methods;
-
- for (f = table; f->vendor_fixup; f++) {
- if ((f->manfid == CID_MANFID_ANY ||
- f->manfid == card->cid.manfid) &&
- (f->oemid == CID_OEMID_ANY ||
- f->oemid == card->cid.oemid) &&
- (f->name == CID_NAME_ANY ||
- !strncmp(f->name, card->cid.prod_name,
- sizeof(card->cid.prod_name))) &&
- (f->cis_vendor == card->cis.vendor ||
- f->cis_vendor == (u16) SDIO_ANY_ID) &&
- (f->cis_device == card->cis.device ||
- f->cis_device == (u16) SDIO_ANY_ID) &&
- (f->ext_csd_rev == EXT_CSD_REV_ANY ||
- f->ext_csd_rev == card->ext_csd.rev) &&
- rev >= f->rev_start && rev <= f->rev_end) {
- dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
- f->vendor_fixup(card, f->data);
- }
- }
-}
-EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
new file mode 100644
index 000000000000..fb725934fa21
--- /dev/null
+++ b/drivers/mmc/core/quirks.h
@@ -0,0 +1,148 @@
+/*
+ * This file contains work-arounds for many known SD/MMC
+ * and SDIO hardware bugs.
+ *
+ * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
+ * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
+ * Inspired from pci fixup code:
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ */
+
+#include <linux/mmc/sdio_ids.h>
+
+#include "card.h"
+
+static const struct mmc_fixup mmc_blk_fixups[] = {
+#define INAND_CMD38_ARG_EXT_CSD 113
+#define INAND_CMD38_ARG_ERASE 0x00
+#define INAND_CMD38_ARG_TRIM 0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+ /* CMD38 argument is passed through EXT_CSD[113] */
+ MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+
+ /*
+ * Some MMC cards experience performance degradation with CMD23
+ * instead of CMD12-bounded multiblock transfers. For now we'll
+ * black list what's bad...
+ * - Certain Toshiba cards.
+ *
+ * N.B. This doesn't affect SD cards.
+ */
+ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some MMC cards need longer data read timeout than indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
+ /*
+ * On Some Kingston eMMCs, performing trim can result in
+ * unrecoverable data conrruption occasionally due to a firmware bug.
+ */
+ MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+ MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ END_FIXUP
+};
+
+static const struct mmc_fixup mmc_ext_csd_fixups[] = {
+ /*
+ * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
+ * is used so disable the HPI feature for such buggy cards.
+ */
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
+ 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+
+ END_FIXUP
+};
+
+static const struct mmc_fixup sdio_fixup_methods[] = {
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
+ add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
+ add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+
+ END_FIXUP
+};
+
+static inline void mmc_fixup_device(struct mmc_card *card,
+ const struct mmc_fixup *table)
+{
+ const struct mmc_fixup *f;
+ u64 rev = cid_rev_card(card);
+
+ for (f = table; f->vendor_fixup; f++) {
+ if ((f->manfid == CID_MANFID_ANY ||
+ f->manfid == card->cid.manfid) &&
+ (f->oemid == CID_OEMID_ANY ||
+ f->oemid == card->cid.oemid) &&
+ (f->name == CID_NAME_ANY ||
+ !strncmp(f->name, card->cid.prod_name,
+ sizeof(card->cid.prod_name))) &&
+ (f->cis_vendor == card->cis.vendor ||
+ f->cis_vendor == (u16) SDIO_ANY_ID) &&
+ (f->cis_device == card->cis.device ||
+ f->cis_device == (u16) SDIO_ANY_ID) &&
+ (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+ f->ext_csd_rev == card->ext_csd.rev) &&
+ rev >= f->rev_start && rev <= f->rev_end) {
+ dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+ }
+ }
+}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index a614f37faf27..89531b48ae84 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -22,6 +22,8 @@
#include <linux/mmc/sd.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#include "sd.h"
@@ -786,8 +788,7 @@ try_again:
*/
if (!mmc_host_is_spi(host) && rocr &&
((*rocr & 0x41000000) == 0x41000000)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- pocr);
+ err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
goto try_again;
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index aab824a9a7f3..1ada9808c329 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -1,10 +1,13 @@
#ifndef _MMC_CORE_SD_H
#define _MMC_CORE_SD_H
-#include <linux/mmc/card.h>
+#include <linux/types.h>
extern struct device_type sd_type;
+struct mmc_host;
+struct mmc_card;
+
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
void mmc_decode_cid(struct mmc_card *card);
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index de125a41aa7a..9d5824a37586 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -25,7 +25,7 @@
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
if (WARN_ON(card && card->host != host))
return -EINVAL;
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(mmc_app_cmd);
int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
struct mmc_command *cmd, int retries)
{
- struct mmc_request mrq = {NULL};
+ struct mmc_request mrq = {};
int i, err;
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = SD_APP_SET_BUS_WIDTH;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -141,7 +141,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int i, err = 0;
cmd.opcode = SD_APP_OP_COND;
@@ -185,7 +185,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
static const u8 test_pattern = 0xAA;
u8 result_pattern;
@@ -217,7 +217,7 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = SD_SEND_RELATIVE_ADDR;
cmd.arg = 0;
@@ -235,9 +235,9 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
{
int err;
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
void *data_buf;
@@ -290,9 +290,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
/* NOTE: caller guarantees resp is heap-allocated */
@@ -332,9 +332,9 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
int err;
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
/* NOTE: caller guarantees ssr is heap-allocated */
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index ffc2305d905f..784f8e6b6baa 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -12,6 +12,12 @@
#ifndef _MMC_SD_OPS_H
#define _MMC_SD_OPS_H
+#include <linux/types.h>
+
+struct mmc_card;
+struct mmc_host;
+struct mmc_command;
+
int mmc_app_set_bus_width(struct mmc_card *card, int width);
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
@@ -20,6 +26,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr);
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp);
int mmc_app_sd_status(struct mmc_card *card, void *ssr);
+int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
+int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
+ struct mmc_command *cmd, int retries);
#endif
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index ecbc52981ba5..fae732c870a9 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -20,7 +20,10 @@
#include <linux/mmc/sdio_ids.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "bus.h"
+#include "quirks.h"
#include "sd.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
@@ -541,6 +544,15 @@ out:
return err;
}
+static void mmc_sdio_resend_if_cond(struct mmc_host *host,
+ struct mmc_card *card)
+{
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+ mmc_remove_card(card);
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -624,24 +636,21 @@ try_again:
* to switch to 1.8V signaling level. No 1.8v signalling if
* UHS mode is not enabled to maintain compatibility and some
* systems that claim 1.8v signalling in fact do not support
- * it.
+ * it. Per SDIO spec v3, section 3.1.2, if the voltage is already
+ * 1.8v, the card sets S18A to 0 in the R4 response. So it will
+ * fails to check rocr & R4_18V_PRESENT, but we still need to
+ * try to init uhs card. sdio_read_cccr will take over this task
+ * to make sure which speed mode should work.
*/
if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- ocr_card);
+ err = mmc_set_uhs_voltage(host, ocr_card);
if (err == -EAGAIN) {
- sdio_reset(host);
- mmc_go_idle(host);
- mmc_send_if_cond(host, host->ocr_avail);
- mmc_remove_card(card);
+ mmc_sdio_resend_if_cond(host, card);
retries--;
goto try_again;
} else if (err) {
ocr &= ~R4_18V_PRESENT;
}
- err = 0;
- } else {
- ocr &= ~R4_18V_PRESENT;
}
/*
@@ -698,11 +707,20 @@ try_again:
}
/*
- * Read the common registers.
+ * Read the common registers. Note that we should try to
+ * validate whether UHS would work or not.
*/
err = sdio_read_cccr(card, ocr);
- if (err)
- goto remove;
+ if (err) {
+ mmc_sdio_resend_if_cond(host, card);
+ if (ocr & R4_18V_PRESENT) {
+ /* Retry init sequence, but without R4_18V_PRESENT. */
+ retries = 0;
+ goto try_again;
+ } else {
+ goto remove;
+ }
+ }
/*
* Read the common CIS tuples.
@@ -721,7 +739,7 @@ try_again:
card = oldcard;
}
card->ocr = ocr_card;
- mmc_fixup_device(card, NULL);
+ mmc_fixup_device(card, sdio_fixup_methods);
if (card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 86f5b3223aae..e992a7f8a16f 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include "core.h"
+#include "card.h"
#include "sdio_cis.h"
#include "sdio_bus.h"
diff --git a/drivers/mmc/core/sdio_bus.h b/drivers/mmc/core/sdio_bus.h
index 567a76821ba7..b69a2540a076 100644
--- a/drivers/mmc/core/sdio_bus.h
+++ b/drivers/mmc/core/sdio_bus.h
@@ -11,6 +11,9 @@
#ifndef _MMC_CORE_SDIO_BUS_H
#define _MMC_CORE_SDIO_BUS_H
+struct mmc_card;
+struct sdio_func;
+
struct sdio_func *sdio_alloc_func(struct mmc_card *card);
int sdio_add_func(struct sdio_func *func);
void sdio_remove_func(struct sdio_func *func);
diff --git a/drivers/mmc/core/sdio_cis.h b/drivers/mmc/core/sdio_cis.h
index 4d903c2e425e..16aa563faa00 100644
--- a/drivers/mmc/core/sdio_cis.h
+++ b/drivers/mmc/core/sdio_cis.h
@@ -14,6 +14,9 @@
#ifndef _MMC_SDIO_CIS_H
#define _MMC_SDIO_CIS_H
+struct mmc_card;
+struct sdio_func;
+
int sdio_read_common_cis(struct mmc_card *card);
void sdio_free_common_cis(struct mmc_card *card);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 406e5f037e32..74195d772f5a 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -16,6 +16,8 @@
#include <linux/mmc/sdio_func.h>
#include "sdio_ops.h"
+#include "core.h"
+#include "card.h"
/**
* sdio_claim_host - exclusively claim a bus for a certain SDIO function
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index f1faf9acc007..d29faf2addfe 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -27,6 +27,8 @@
#include <linux/mmc/sdio_func.h>
#include "sdio_ops.h"
+#include "core.h"
+#include "card.h"
static int process_sdio_pending_irqs(struct mmc_host *host)
{
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 90fe5545c677..3c0d3ab4324c 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -21,7 +21,7 @@
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int i, err = 0;
cmd.opcode = SD_IO_SEND_OP_COND;
@@ -66,7 +66,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
if (fn > 7)
@@ -118,9 +118,9 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg, *sg_ptr;
struct sg_table sgtable;
unsigned int nents, left_size, i;
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index 5660c7f459e9..bed8a8377fec 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -12,14 +12,19 @@
#ifndef _MMC_SDIO_OPS_H
#define _MMC_SDIO_OPS_H
+#include <linux/types.h>
#include <linux/mmc/sdio.h>
+struct mmc_host;
+struct mmc_card;
+
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
unsigned addr, u8 in, u8* out);
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
int sdio_reset(struct mmc_host *host);
+unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
static inline bool mmc_is_io_op(u32 opcode)
{
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index babe591aea96..a8450a8701e4 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -235,9 +235,6 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
struct gpio_desc *desc;
int ret;
- if (!con_id)
- con_id = ctx->cd_label;
-
desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -289,9 +286,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
struct gpio_desc *desc;
int ret;
- if (!con_id)
- con_id = ctx->ro_label;
-
desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
if (IS_ERR(desc))
return PTR_ERR(desc);
diff --git a/drivers/mmc/core/slot-gpio.h b/drivers/mmc/core/slot-gpio.h
index 8c1854dc5d58..a06fd843f025 100644
--- a/drivers/mmc/core/slot-gpio.h
+++ b/drivers/mmc/core/slot-gpio.h
@@ -8,6 +8,8 @@
#ifndef _MMC_CORE_SLOTGPIO_H
#define _MMC_CORE_SLOTGPIO_H
+struct mmc_host;
+
int mmc_gpio_alloc(struct mmc_host *host);
#endif
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2eb97014dc3f..f08691a58d7e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -683,6 +683,15 @@ config MMC_DW_ROCKCHIP
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on RK3066, RK3188 and RK3288 SoC's.
+config MMC_DW_ZX
+ tristate "ZTE specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW && ARCH_ZX
+ select MMC_DW_PLTFM
+ help
+ This selects support for ZTE SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on ZX296718 SoC's.
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on HAS_DMA
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ccc9c4cba154..6d548c4ee2fa 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
+obj-$(CONFIG_MMC_DW_ZX) += dw_mmc-zx.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 36b5af8eadb8..1e2600da105f 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -36,6 +36,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mmc/slot-gpio.h>
+#include <linux/interrupt.h>
#include <linux/platform_data/mmc-davinci.h>
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index e1335289316c..25691cca1881 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 9821e6bd5d5e..e38fb0020bb1 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -11,7 +11,6 @@
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index ab82796b01e2..ab8713297edb 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
-#include <linux/mmc/dw_mmc.h>
#include "dw_mmc.h"
#define PCI_BAR_NO 2
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 1236d49ba36e..58c13e21bd5a 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/of.h>
#include <linux/clk.h>
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 9a46e4694227..372fb6e948c1 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -11,7 +11,6 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/of_address.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
new file mode 100644
index 000000000000..d38e94ae2b85
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-zx.c
@@ -0,0 +1,241 @@
+/*
+ * ZX Specific Extensions for Synopsys DW Multimedia Card Interface driver
+ *
+ * Copyright (C) 2016, Linaro Ltd.
+ * Copyright (C) 2016, ZTE Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+#include "dw_mmc-zx.h"
+
+struct dw_mci_zx_priv_data {
+ struct regmap *sysc_base;
+};
+
+enum delay_type {
+ DELAY_TYPE_READ, /* read dqs delay */
+ DELAY_TYPE_CLK, /* clk sample delay */
+};
+
+static int dw_mci_zx_emmc_set_delay(struct dw_mci *host, unsigned int delay,
+ enum delay_type dflag)
+{
+ struct dw_mci_zx_priv_data *priv = host->priv;
+ struct regmap *sysc_base = priv->sysc_base;
+ unsigned int clksel;
+ unsigned int loop = 1000;
+ int ret;
+
+ if (!sysc_base)
+ return -EINVAL;
+
+ ret = regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0,
+ PARA_HALF_CLK_MODE | PARA_DLL_BYPASS_MODE |
+ PARA_PHASE_DET_SEL_MASK |
+ PARA_DLL_LOCK_NUM_MASK |
+ DLL_REG_SET | PARA_DLL_START_MASK,
+ PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4));
+ if (ret)
+ return ret;
+
+ ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG1, &clksel);
+ if (ret)
+ return ret;
+
+ if (dflag == DELAY_TYPE_CLK) {
+ clksel &= ~CLK_SAMP_DELAY_MASK;
+ clksel |= CLK_SAMP_DELAY(delay);
+ } else {
+ clksel &= ~READ_DQS_DELAY_MASK;
+ clksel |= READ_DQS_DELAY(delay);
+ }
+
+ regmap_write(sysc_base, LB_AON_EMMC_CFG_REG1, clksel);
+ regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0,
+ PARA_DLL_START_MASK | PARA_DLL_LOCK_NUM_MASK |
+ DLL_REG_SET,
+ PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4) |
+ DLL_REG_SET);
+
+ do {
+ ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG2, &clksel);
+ if (ret)
+ return ret;
+
+ } while (--loop && !(clksel & ZX_DLL_LOCKED));
+
+ if (!loop) {
+ dev_err(host->dev, "Error: %s dll lock fail\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dw_mci_zx_emmc_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+ struct dw_mci *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ int ret, len = 0, start = 0, end = 0, delay, best = 0;
+
+ for (delay = 1; delay < 128; delay++) {
+ ret = dw_mci_zx_emmc_set_delay(host, delay, DELAY_TYPE_CLK);
+ if (!ret && mmc_send_tuning(mmc, opcode, NULL)) {
+ if (start >= 0) {
+ end = delay - 1;
+ /* check and update longest good range */
+ if ((end - start) > len) {
+ best = (start + end) >> 1;
+ len = end - start;
+ }
+ }
+ start = -1;
+ end = 0;
+ continue;
+ }
+ if (start < 0)
+ start = delay;
+ }
+
+ if (start >= 0) {
+ end = delay - 1;
+ if ((end - start) > len) {
+ best = (start + end) >> 1;
+ len = end - start;
+ }
+ }
+ if (best < 0)
+ return -EIO;
+
+ dev_info(host->dev, "%s best range: start %d end %d\n", __func__,
+ start, end);
+ return dw_mci_zx_emmc_set_delay(host, best, DELAY_TYPE_CLK);
+}
+
+static int dw_mci_zx_prepare_hs400_tuning(struct dw_mci *host,
+ struct mmc_ios *ios)
+{
+ int ret;
+
+ /* config phase shift as 90 degree */
+ ret = dw_mci_zx_emmc_set_delay(host, 32, DELAY_TYPE_READ);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int dw_mci_zx_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+ struct dw_mci *host = slot->host;
+
+ if (host->verid == 0x290a) /* only for emmc */
+ return dw_mci_zx_emmc_execute_tuning(slot, opcode);
+ /* TODO: Add 0x210a dedicated tuning for sd/sdio */
+
+ return 0;
+}
+
+static int dw_mci_zx_parse_dt(struct dw_mci *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct device_node *node;
+ struct dw_mci_zx_priv_data *priv;
+ struct regmap *sysc_base;
+ int ret;
+
+ /* syscon is needed only by emmc */
+ node = of_parse_phandle(np, "zte,aon-syscon", 0);
+ if (node) {
+ sysc_base = syscon_node_to_regmap(node);
+ of_node_put(node);
+
+ if (IS_ERR(sysc_base)) {
+ ret = PTR_ERR(sysc_base);
+ if (ret != -EPROBE_DEFER)
+ dev_err(host->dev, "Can't get syscon: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ return 0;
+ }
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->sysc_base = sysc_base;
+ host->priv = priv;
+
+ return 0;
+}
+
+static unsigned long zx_dwmmc_caps[3] = {
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+};
+
+static const struct dw_mci_drv_data zx_drv_data = {
+ .caps = zx_dwmmc_caps,
+ .execute_tuning = dw_mci_zx_execute_tuning,
+ .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
+ .parse_dt = dw_mci_zx_parse_dt,
+};
+
+static const struct of_device_id dw_mci_zx_match[] = {
+ { .compatible = "zte,zx296718-dw-mshc", .data = &zx_drv_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_zx_match);
+
+static int dw_mci_zx_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(dw_mci_zx_match, pdev->dev.of_node);
+ drv_data = match->data;
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+static const struct dev_pm_ops dw_mci_zx_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver dw_mci_zx_pltfm_driver = {
+ .probe = dw_mci_zx_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_zx",
+ .of_match_table = dw_mci_zx_match,
+ .pm = &dw_mci_zx_dev_pm_ops,
+ },
+};
+
+module_platform_driver(dw_mci_zx_pltfm_driver);
+
+MODULE_DESCRIPTION("ZTE emmc/sd driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc-zx.h b/drivers/mmc/host/dw_mmc-zx.h
new file mode 100644
index 000000000000..f369997a39ec
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-zx.h
@@ -0,0 +1,31 @@
+#ifndef _DW_MMC_ZX_H_
+#define _DW_MMC_ZX_H_
+
+/* ZX296718 SoC specific DLL register offset. */
+#define LB_AON_EMMC_CFG_REG0 0x1B0
+#define LB_AON_EMMC_CFG_REG1 0x1B4
+#define LB_AON_EMMC_CFG_REG2 0x1B8
+
+/* LB_AON_EMMC_CFG_REG0 register defines */
+#define PARA_DLL_START(x) ((x) & 0xFF)
+#define PARA_DLL_START_MASK 0xFF
+#define DLL_REG_SET BIT(8)
+#define PARA_DLL_LOCK_NUM(x) (((x) & 7) << 16)
+#define PARA_DLL_LOCK_NUM_MASK (7 << 16)
+#define PARA_PHASE_DET_SEL(x) (((x) & 7) << 20)
+#define PARA_PHASE_DET_SEL_MASK (7 << 20)
+#define PARA_DLL_BYPASS_MODE BIT(23)
+#define PARA_HALF_CLK_MODE BIT(24)
+
+/* LB_AON_EMMC_CFG_REG1 register defines */
+#define READ_DQS_DELAY(x) ((x) & 0x7F)
+#define READ_DQS_DELAY_MASK (0x7F)
+#define READ_DQS_BYPASS_MODE BIT(7)
+#define CLK_SAMP_DELAY(x) (((x) & 0x7F) << 8)
+#define CLK_SAMP_DELAY_MASK (0x7F << 8)
+#define CLK_SAMP_BYPASS_MODE BIT(15)
+
+/* LB_AON_EMMC_CFG_REG2 register defines */
+#define ZX_DLL_LOCKED BIT(2)
+
+#endif /* _DW_MMC_ZX_H_ */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index b44306b886cb..a9ac0b457313 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -32,7 +32,6 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
@@ -1113,11 +1112,15 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
mci_writel(host, CTRL, temp);
/*
- * Use the initial fifoth_val for PIO mode.
+ * Use the initial fifoth_val for PIO mode. If wm_algined
+ * is set, we set watermark same as data size.
* If next issued data may be transfered by DMA mode,
* prev_blksz should be invalidated.
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
+ if (host->wm_aligned)
+ dw_mci_adjust_fifoth(host, data);
+ else
+ mci_writel(host, FIFOTH, host->fifoth_val);
host->prev_blksz = 0;
} else {
/*
@@ -1179,11 +1182,13 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
if ((clock != slot->__clk_old &&
!test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
force_clkinit) {
- dev_info(&slot->mmc->class_dev,
- "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
- slot->id, host->bus_hz, clock,
- div ? ((host->bus_hz / div) >> 1) :
- host->bus_hz, div);
+ /* Silent the verbose log if calling from PM context */
+ if (!force_clkinit)
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+ slot->id, host->bus_hz, clock,
+ div ? ((host->bus_hz / div) >> 1) :
+ host->bus_hz, div);
/*
* If card is polling, display the message only
@@ -2977,6 +2982,11 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
+ of_property_read_u32(np, "data-addr", &host->data_addr_override);
+
+ if (of_get_property(np, "fifo-watermark-aligned", NULL))
+ host->wm_aligned = true;
+
if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
pdata->bus_hz = clock_frequency;
@@ -3180,7 +3190,9 @@ int dw_mci_probe(struct dw_mci *host)
host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
dev_info(host->dev, "Version ID is %04x\n", host->verid);
- if (host->verid < DW_MMC_240A)
+ if (host->data_addr_override)
+ host->fifo_reg = host->regs + host->data_addr_override;
+ else if (host->verid < DW_MMC_240A)
host->fifo_reg = host->regs + DATA_OFFSET;
else
host->fifo_reg = host->regs + DATA_240A_OFFSET;
@@ -3354,10 +3366,11 @@ int dw_mci_runtime_resume(struct device *dev)
if (!slot)
continue;
- if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+ if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
- dw_mci_setup_bus(slot, true);
- }
+
+ /* Force setup bus to guarantee available clock output */
+ dw_mci_setup_bus(slot, true);
}
/* Now that slots are all setup, we can enable card detect */
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index c59465829387..ce347361f3dc 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -14,6 +14,269 @@
#ifndef _DW_MMC_H_
#define _DW_MMC_H_
+#include <linux/scatterlist.h>
+#include <linux/mmc/core.h>
+#include <linux/dmaengine.h>
+#include <linux/reset.h>
+#include <linux/interrupt.h>
+
+#define MAX_MCI_SLOTS 2
+
+enum dw_mci_state {
+ STATE_IDLE = 0,
+ STATE_SENDING_CMD,
+ STATE_SENDING_DATA,
+ STATE_DATA_BUSY,
+ STATE_SENDING_STOP,
+ STATE_DATA_ERROR,
+ STATE_SENDING_CMD11,
+ STATE_WAITING_CMD11_DONE,
+};
+
+enum {
+ EVENT_CMD_COMPLETE = 0,
+ EVENT_XFER_COMPLETE,
+ EVENT_DATA_COMPLETE,
+ EVENT_DATA_ERROR,
+};
+
+enum dw_mci_cookie {
+ COOKIE_UNMAPPED,
+ COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */
+ COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */
+};
+
+struct mmc_data;
+
+enum {
+ TRANS_MODE_PIO = 0,
+ TRANS_MODE_IDMAC,
+ TRANS_MODE_EDMAC
+};
+
+struct dw_mci_dma_slave {
+ struct dma_chan *ch;
+ enum dma_transfer_direction direction;
+};
+
+/**
+ * struct dw_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @irq_lock: Spinlock protecting the INTMASK setting.
+ * @regs: Pointer to MMIO registers.
+ * @fifo_reg: Pointer to MMIO registers for data FIFO
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @sg_miter: PIO mapping scatterlist iterator.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ * or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ * transfer is in progress.
+ * @stop_abort: The command currently prepared for stoping transfer.
+ * @prev_blksz: The former transfer blksz record.
+ * @timing: Record of current ios timing.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @using_dma: Whether DMA is in use for the current transfer.
+ * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ * @ring_size: Buffer size for idma descriptors.
+ * command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @dms: structure of slave-dma private data.
+ * @phy_regs: physical address of controller's register map
+ * @data_status: Snapshot of SR taken upon completion of the current
+ * data transfer. Only valid when EVENT_DATA_COMPLETE or
+ * EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ * to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ * to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ * processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ * rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @fifoth_val: The value of FIFOTH register.
+ * @verid: Denote Version ID.
+ * @dev: Device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @drv_data: Driver specific data for identified variant of the controller
+ * @priv: Implementation defined private data.
+ * @biu_clk: Pointer to bus interface unit clock instance.
+ * @ciu_clk: Pointer to card interface unit clock instance.
+ * @slot: Slots sharing this MMC controller.
+ * @fifo_depth: depth of FIFO.
+ * @data_addr_override: override fifo reg offset with this value.
+ * @wm_aligned: force fifo watermark equal with data length in PIO mode.
+ * Set as true if alignment is needed.
+ * @data_shift: log2 of FIFO item size.
+ * @part_buf_start: Start index in part_buf.
+ * @part_buf_count: Bytes of partial data in part_buf.
+ * @part_buf: Simple buffer for partial fifo reads/writes.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @vqmmc_enabled: Status of vqmmc, should be true or false.
+ * @irq_flags: The flags to be passed to request_irq.
+ * @irq: The irq value to be passed to request_irq.
+ * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
+ * @dto_timer: Timer for broken data transfer over scheme.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * @irq_lock is an irq-safe spinlock protecting the INTMASK register
+ * to allow the interrupt handler to modify it directly. Held for only long
+ * enough to read-modify-write INTMASK and no other locks are grabbed when
+ * holding this one.
+ *
+ * The @mrq field of struct dw_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interrupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct dw_mci {
+ spinlock_t lock;
+ spinlock_t irq_lock;
+ void __iomem *regs;
+ void __iomem *fifo_reg;
+ u32 data_addr_override;
+ bool wm_aligned;
+
+ struct scatterlist *sg;
+ struct sg_mapping_iter sg_miter;
+
+ struct dw_mci_slot *cur_slot;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_command stop_abort;
+ unsigned int prev_blksz;
+ unsigned char timing;
+
+ /* DMA interface members*/
+ int use_dma;
+ int using_dma;
+ int dma_64bit_address;
+
+ dma_addr_t sg_dma;
+ void *sg_cpu;
+ const struct dw_mci_dma_ops *dma_ops;
+ /* For idmac */
+ unsigned int ring_size;
+
+ /* For edmac */
+ struct dw_mci_dma_slave *dms;
+ /* Registers's physical base address */
+ resource_size_t phy_regs;
+
+ u32 cmd_status;
+ u32 data_status;
+ u32 stop_cmdr;
+ u32 dir_status;
+ struct tasklet_struct tasklet;
+ unsigned long pending_events;
+ unsigned long completed_events;
+ enum dw_mci_state state;
+ struct list_head queue;
+
+ u32 bus_hz;
+ u32 current_speed;
+ u32 num_slots;
+ u32 fifoth_val;
+ u16 verid;
+ struct device *dev;
+ struct dw_mci_board *pdata;
+ const struct dw_mci_drv_data *drv_data;
+ void *priv;
+ struct clk *biu_clk;
+ struct clk *ciu_clk;
+ struct dw_mci_slot *slot[MAX_MCI_SLOTS];
+
+ /* FIFO push and pull */
+ int fifo_depth;
+ int data_shift;
+ u8 part_buf_start;
+ u8 part_buf_count;
+ union {
+ u16 part_buf16;
+ u32 part_buf32;
+ u64 part_buf;
+ };
+ void (*push_data)(struct dw_mci *host, void *buf, int cnt);
+ void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+
+ bool vqmmc_enabled;
+ unsigned long irq_flags; /* IRQ flags */
+ int irq;
+
+ int sdio_id0;
+
+ struct timer_list cmd11_timer;
+ struct timer_list dto_timer;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct dw_mci_dma_ops {
+ /* DMA Ops */
+ int (*init)(struct dw_mci *host);
+ int (*start)(struct dw_mci *host, unsigned int sg_len);
+ void (*complete)(void *host);
+ void (*stop)(struct dw_mci *host);
+ void (*cleanup)(struct dw_mci *host);
+ void (*exit)(struct dw_mci *host);
+};
+
+struct dma_pdata;
+
+/* Board platform data */
+struct dw_mci_board {
+ u32 num_slots;
+
+ unsigned int bus_hz; /* Clock speed at the cclk_in pad */
+
+ u32 caps; /* Capabilities */
+ u32 caps2; /* More capabilities */
+ u32 pm_caps; /* PM capabilities */
+ /*
+ * Override fifo depth. If 0, autodetect it from the FIFOTH register,
+ * but note that this may not be reliable after a bootloader has used
+ * it.
+ */
+ unsigned int fifo_depth;
+
+ /* delay in mS before detecting cards after interrupt */
+ u32 detect_delay_ms;
+
+ struct reset_control *rstc;
+ struct dw_mci_dma_ops *dma_ops;
+ struct dma_pdata *data;
+};
+
#define DW_MMC_240A 0x240a
#define DW_MMC_280A 0x280a
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b352760c041e..5a959783304b 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -35,6 +35,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
#define DRIVER_NAME "meson-gx-mmc"
@@ -82,6 +83,7 @@
#define CFG_RC_CC_MASK 0xf
#define CFG_STOP_CLOCK BIT(22)
#define CFG_CLK_ALWAYS_ON BIT(18)
+#define CFG_CHK_DS BIT(20)
#define CFG_AUTO_CLK BIT(23)
#define SD_EMMC_STATUS 0x48
@@ -131,7 +133,7 @@ struct meson_host {
struct clk_mux mux;
struct clk *mux_clk;
struct clk *mux_parent[MUX_CLK_NUM_PARENTS];
- unsigned long mux_parent_rate[MUX_CLK_NUM_PARENTS];
+ unsigned long current_clock;
struct clk_divider cfg_div;
struct clk *cfg_div_clk;
@@ -178,7 +180,7 @@ struct sd_emmc_desc {
static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
{
struct mmc_host *mmc = host->mmc;
- int ret = 0;
+ int ret;
u32 cfg;
if (clk_rate) {
@@ -188,7 +190,7 @@ static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
clk_rate = mmc->f_min;
}
- if (clk_rate == mmc->actual_clock)
+ if (clk_rate == host->current_clock)
return 0;
/* stop clock */
@@ -201,29 +203,34 @@ static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
dev_dbg(host->dev, "change clock rate %u -> %lu\n",
mmc->actual_clock, clk_rate);
- if (clk_rate == 0) {
+ if (!clk_rate) {
mmc->actual_clock = 0;
+ host->current_clock = 0;
+ /* return with clock being stopped */
return 0;
}
ret = clk_set_rate(host->cfg_div_clk, clk_rate);
- if (ret)
- dev_warn(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
- clk_rate, ret);
- else if (clk_rate && clk_rate != clk_get_rate(host->cfg_div_clk))
- dev_warn(host->dev, "divider requested rate %lu != actual rate %lu: ret=%d\n",
- clk_rate, clk_get_rate(host->cfg_div_clk), ret);
- else
- mmc->actual_clock = clk_rate;
-
- /* (re)start clock, if non-zero */
- if (!ret && clk_rate) {
- cfg = readl(host->regs + SD_EMMC_CFG);
- cfg &= ~CFG_STOP_CLOCK;
- writel(cfg, host->regs + SD_EMMC_CFG);
+ if (ret) {
+ dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
+ clk_rate, ret);
+ return ret;
}
- return ret;
+ mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
+ host->current_clock = clk_rate;
+
+ if (clk_rate != mmc->actual_clock)
+ dev_dbg(host->dev,
+ "divider requested rate %lu != actual rate %u\n",
+ clk_rate, mmc->actual_clock);
+
+ /* (re)start clock */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ cfg &= ~CFG_STOP_CLOCK;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+
+ return 0;
}
/*
@@ -239,7 +246,6 @@ static int meson_mmc_clk_init(struct meson_host *host)
const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
unsigned int mux_parent_count = 0;
const char *clk_div_parents[1];
- unsigned int f_min = UINT_MAX;
u32 clk_reg, cfg;
/* get the mux parents */
@@ -256,20 +262,10 @@ static int meson_mmc_clk_init(struct meson_host *host)
return ret;
}
- host->mux_parent_rate[i] = clk_get_rate(host->mux_parent[i]);
mux_parent_names[i] = __clk_get_name(host->mux_parent[i]);
mux_parent_count++;
- if (host->mux_parent_rate[i] < f_min)
- f_min = host->mux_parent_rate[i];
}
- /* cacluate f_min based on input clocks, and max divider value */
- if (f_min != UINT_MAX)
- f_min = DIV_ROUND_UP(CLK_SRC_XTAL_RATE, CLK_DIV_MAX);
- else
- f_min = 4000000; /* default min: 400 MHz */
- host->mmc->f_min = f_min;
-
/* create the mux */
snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
init.name = clk_name;
@@ -324,9 +320,13 @@ static int meson_mmc_clk_init(struct meson_host *host)
writel(cfg, host->regs + SD_EMMC_CFG);
ret = clk_prepare_enable(host->cfg_div_clk);
- if (!ret)
- ret = meson_mmc_clk_set(host, f_min);
+ if (ret)
+ return ret;
+
+ /* Get the nearest minimum clock to 400KHz */
+ host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000);
+ ret = meson_mmc_clk_set(host, host->mmc->f_min);
if (!ret)
clk_disable_unprepare(host->cfg_div_clk);
@@ -378,7 +378,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
meson_mmc_clk_set(host, ios->clock);
/* Bus width */
- val = readl(host->regs + SD_EMMC_CFG);
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
bus_width = CFG_BUS_WIDTH_1;
@@ -393,7 +392,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
ios->bus_width);
bus_width = CFG_BUS_WIDTH_4;
- return;
}
val = readl(host->regs + SD_EMMC_CFG);
@@ -411,6 +409,16 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
val &= ~(CFG_RC_CC_MASK << CFG_RC_CC_SHIFT);
val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT;
+ val &= ~CFG_DDR;
+ if (ios->timing == MMC_TIMING_UHS_DDR50 ||
+ ios->timing == MMC_TIMING_MMC_DDR52 ||
+ ios->timing == MMC_TIMING_MMC_HS400)
+ val |= CFG_DDR;
+
+ val &= ~CFG_CHK_DS;
+ if (ios->timing == MMC_TIMING_MMC_HS400)
+ val |= CFG_CHK_DS;
+
writel(val, host->regs + SD_EMMC_CFG);
if (val != orig)
@@ -480,9 +488,9 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
blk_len >>= CFG_BLK_LEN_SHIFT;
if (blk_len != ilog2(cmd->data->blksz)) {
- dev_warn(host->dev, "%s: update blk_len %d -> %d\n",
+ dev_dbg(host->dev, "%s: update blk_len %d -> %d\n",
__func__, blk_len,
- ilog2(cmd->data->blksz));
+ ilog2(cmd->data->blksz));
blk_len = ilog2(cmd->data->blksz);
cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
cfg |= blk_len << CFG_BLK_LEN_SHIFT;
@@ -545,11 +553,6 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
/* Stop execution */
writel(0, host->regs + SD_EMMC_START);
- /* clear, ack, enable all interrupts */
- writel(0, host->regs + SD_EMMC_IRQ_EN);
- writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
- writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
-
host->mrq = mrq;
if (mrq->sbc)
@@ -578,13 +581,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_request *mrq;
- struct mmc_command *cmd = host->cmd;
+ struct mmc_command *cmd;
u32 irq_en, status, raw_status;
irqreturn_t ret = IRQ_HANDLED;
if (WARN_ON(!host))
return IRQ_NONE;
+ cmd = host->cmd;
+
mrq = host->mrq;
if (WARN_ON(!mrq))
@@ -667,23 +672,20 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
struct mmc_command *cmd = host->cmd;
struct mmc_data *data;
unsigned int xfer_bytes;
- int ret = IRQ_HANDLED;
if (WARN_ON(!mrq))
- ret = IRQ_NONE;
+ return IRQ_NONE;
if (WARN_ON(!cmd))
- ret = IRQ_NONE;
+ return IRQ_NONE;
data = cmd->data;
- if (data) {
+ if (data && data->flags & MMC_DATA_READ) {
xfer_bytes = data->blksz * data->blocks;
- if (data->flags & MMC_DATA_READ) {
- WARN_ON(xfer_bytes > host->bounce_buf_size);
- sg_copy_from_buffer(data->sg, data->sg_len,
- host->bounce_buf, xfer_bytes);
- data->bytes_xfered = xfer_bytes;
- }
+ WARN_ON(xfer_bytes > host->bounce_buf_size);
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
+ data->bytes_xfered = xfer_bytes;
}
meson_mmc_read_resp(host->mmc, cmd);
@@ -692,7 +694,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
else
meson_mmc_start_cmd(host->mmc, data->stop);
- return ret;
+ return IRQ_HANDLED;
}
/*
@@ -740,7 +742,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
ret = mmc_of_parse(mmc);
if (ret) {
- dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
goto free_host;
}
@@ -778,6 +781,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* clear, ack, enable all interrupts */
writel(0, host->regs + SD_EMMC_IRQ_EN);
writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
ret = devm_request_threaded_irq(&pdev->dev, host->irq,
meson_mmc_irq, meson_mmc_irq_thread,
@@ -785,8 +789,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
if (ret)
goto free_host;
+ mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
+ mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
+
/* data bounce buffer */
- host->bounce_buf_size = SZ_512K;
+ host->bounce_buf_size = mmc->max_req_size;
host->bounce_buf =
dma_alloc_coherent(host->dev, host->bounce_buf_size,
&host->bounce_dma_addr, GFP_KERNEL);
@@ -812,12 +819,11 @@ static int meson_mmc_remove(struct platform_device *pdev)
{
struct meson_host *host = dev_get_drvdata(&pdev->dev);
- if (WARN_ON(!host))
- return 0;
+ /* disable interrupts */
+ writel(0, host->regs + SD_EMMC_IRQ_EN);
- if (host->bounce_buf)
- dma_free_coherent(host->dev, host->bounce_buf_size,
- host->bounce_buf, host->bounce_dma_addr);
+ dma_free_coherent(host->dev, host->bounce_buf_size,
+ host->bounce_buf, host->bounce_dma_addr);
clk_disable_unprepare(host->cfg_div_clk);
clk_disable_unprepare(host->core_clk);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 01a804792f30..0c6420bb2f00 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -507,6 +507,7 @@ static void mmci_dma_data_error(struct mmci_host *host)
{
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
dmaengine_terminate_all(host->dma_current);
+ host->dma_in_progress = false;
host->dma_current = NULL;
host->dma_desc_current = NULL;
host->data->host_cookie = 0;
@@ -565,6 +566,7 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
mmci_dma_release(host);
}
+ host->dma_in_progress = false;
host->dma_current = NULL;
host->dma_desc_current = NULL;
}
@@ -665,6 +667,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
dev_vdbg(mmc_dev(host->mmc),
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
+ host->dma_in_progress = true;
dmaengine_submit(host->dma_desc_current);
dma_async_issue_pending(host->dma_current);
@@ -740,8 +743,10 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (host->dma_desc_current == next->dma_desc)
host->dma_desc_current = NULL;
- if (host->dma_current == next->dma_chan)
+ if (host->dma_current == next->dma_chan) {
+ host->dma_in_progress = false;
host->dma_current = NULL;
+ }
next->dma_desc = NULL;
next->dma_chan = NULL;
@@ -1023,7 +1028,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
if (!host->busy_status && busy_resp &&
!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
(readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
- /* Unmask the busy IRQ */
+
+ /* Clear the busy start IRQ */
+ writel(host->variant->busy_detect_mask,
+ host->base + MMCICLEAR);
+
+ /* Unmask the busy end IRQ */
writel(readl(base + MMCIMASK0) |
host->variant->busy_detect_mask,
base + MMCIMASK0);
@@ -1038,10 +1048,14 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
/*
* At this point we are not busy with a command, we have
- * not received a new busy request, mask the busy IRQ and
- * fall through to process the IRQ.
+ * not received a new busy request, clear and mask the busy
+ * end IRQ and fall through to process the IRQ.
*/
if (host->busy_status) {
+
+ writel(host->variant->busy_detect_mask,
+ host->base + MMCICLEAR);
+
writel(readl(base + MMCIMASK0) &
~host->variant->busy_detect_mask,
base + MMCIMASK0);
@@ -1283,12 +1297,21 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
}
/*
- * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
- * enabled) since the HW seems to be triggering the IRQ on both
- * edges while monitoring DAT0 for busy completion.
+ * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
+ * enabled) in mmci_cmd_irq() function where ST Micro busy
+ * detection variant is handled. Considering the HW seems to be
+ * triggering the IRQ on both edges while monitoring DAT0 for
+ * busy completion and that same status bit is used to monitor
+ * start and end of busy detection, special care must be taken
+ * to make sure that both start and end interrupts are always
+ * cleared one after the other.
*/
status &= readl(host->base + MMCIMASK0);
- writel(status, host->base + MMCICLEAR);
+ if (host->variant->busy_detect)
+ writel(status & ~host->variant->busy_detect_mask,
+ host->base + MMCICLEAR);
+ else
+ writel(status, host->base + MMCICLEAR);
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 56322c6afba4..4a8bef1aac8f 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -245,8 +245,9 @@ struct mmci_host {
struct dma_chan *dma_tx_channel;
struct dma_async_tx_descriptor *dma_desc_current;
struct mmci_host_next next_data;
+ bool dma_in_progress;
-#define dma_inprogress(host) ((host)->dma_current)
+#define dma_inprogress(host) ((host)->dma_in_progress)
#else
#define dma_inprogress(host) (0)
#endif
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 10ef2ae1d2f6..8e32580c12b5 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -28,6 +28,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
@@ -1074,11 +1075,8 @@ static int msdc_card_busy(struct mmc_host *mmc)
struct msdc_host *host = mmc_priv(mmc);
u32 status = readl(host->base + MSDC_PS);
- /* check if any pin between dat[0:3] is low */
- if (((status >> 16) & 0xf) != 0xf)
- return 1;
-
- return 0;
+ /* only check if data0 is low */
+ return !(status & BIT(16));
}
static void msdc_request_timeout(struct work_struct *work)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 44ecebd1ea8c..add1e70195ea 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -153,7 +153,11 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
}
}
- if (data) {
+ if (cmd == mrq->sbc) {
+ /* Finished CMD23, now send actual command. */
+ mxs_mmc_start_cmd(host, mrq->cmd);
+ return;
+ } else if (data) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, ssp->dma_dir);
/*
@@ -166,7 +170,7 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
data->bytes_xfered = 0;
host->data = NULL;
- if (mrq->stop) {
+ if (data->stop && (data->error || !mrq->sbc)) {
mxs_mmc_start_cmd(host, mrq->stop);
return;
}
@@ -309,6 +313,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +424,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
ssp->base + HW_SSP_BLOCK_SIZE);
}
- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
- (cmd->opcode == SD_IO_RW_EXTENDED))
+ if (cmd->opcode == SD_IO_RW_EXTENDED)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
@@ -493,7 +499,11 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
host->mrq = mrq;
- mxs_mmc_start_cmd(host, mrq->cmd);
+
+ if (mrq->sbc)
+ mxs_mmc_start_cmd(host, mrq->sbc);
+ else
+ mxs_mmc_start_cmd(host, mrq->cmd);
}
static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -640,7 +650,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
/* set mmc core parameters */
mmc->ops = &mxs_mmc_ops;
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
host->broken_cd = of_property_read_bool(np, "broken-cd");
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index be3c49fa7382..bd49f34d7654 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -893,7 +893,7 @@ static void mmc_omap_cover_handler(unsigned long param)
* If no card is inserted, we postpone polling until
* the cover has been closed.
*/
- if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
+ if (slot->mmc->card == NULL)
return;
mod_timer(&slot->cover_timer,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index ad11c4cc12ed..a58bd653ed8b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1162,7 +1162,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
if (status & ERR_EN) {
omap_hsmmc_dbg_report_irq(host, status);
- if (status & (CTO_EN | CCRC_EN))
+ if (status & (CTO_EN | CCRC_EN | CEB_EN))
end_cmd = 1;
if (host->data || host->response_busy) {
end_trans = !end_cmd;
@@ -1469,10 +1469,11 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
}
static void set_data_timeout(struct omap_hsmmc_host *host,
- unsigned int timeout_ns,
+ unsigned long long timeout_ns,
unsigned int timeout_clks)
{
- unsigned int timeout, cycle_ns;
+ unsigned long long timeout = timeout_ns;
+ unsigned int cycle_ns;
uint32_t reg, clkd, dto = 0;
reg = OMAP_HSMMC_READ(host->base, SYSCTL);
@@ -1481,7 +1482,7 @@ static void set_data_timeout(struct omap_hsmmc_host *host,
clkd = 1;
cycle_ns = 1000000000 / (host->clk_rate / clkd);
- timeout = timeout_ns / cycle_ns;
+ do_div(timeout, cycle_ns);
timeout += timeout_clks;
if (timeout) {
while ((timeout & 0x80000000) == 0) {
@@ -1527,16 +1528,24 @@ static int
omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
{
int ret;
+ unsigned long long timeout;
+
host->data = req->data;
if (req->data == NULL) {
OMAP_HSMMC_WRITE(host->base, BLK, 0);
- /*
- * Set an arbitrary 100ms data timeout for commands with
- * busy signal.
- */
- if (req->cmd->flags & MMC_RSP_BUSY)
- set_data_timeout(host, 100000000U, 0);
+ if (req->cmd->flags & MMC_RSP_BUSY) {
+ timeout = req->cmd->busy_timeout * NSEC_PER_MSEC;
+
+ /*
+ * Set an arbitrary 100ms data timeout for commands with
+ * busy signal and no indication of busy_timeout.
+ */
+ if (!timeout)
+ timeout = 100000000U;
+
+ set_data_timeout(host, timeout, 0);
+ }
return 0;
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index ecb99a8d2fa2..41b57713b620 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -707,7 +707,7 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
u8 opcode, u8 sample_point)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
err = sd_change_phase(host, sample_point, true);
if (err < 0)
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index dc1abd14acbc..12d2fbe9c520 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -682,7 +682,7 @@ static int sd_tuning_rx_cmd(struct rtsx_usb_sdmmc *host,
u8 opcode, u8 sample_point)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
err = sd_change_phase(host, sample_point, 0);
if (err)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 932a4b1fed33..7a173f8c455b 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 160f695cc09c..9dcb7048e3b1 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
list_for_each_entry(child, &device->children, node)
- acpi_device_fix_up_power(child);
+ if (child->status.present && child->status.enabled)
+ acpi_device_fix_up_power(child);
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
@@ -466,7 +467,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
- if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) {
+ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ goto err_free;
dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
}
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 4b0ecb981842..316cfec3f005 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -17,6 +17,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include "sdhci-pltfm.h"
@@ -25,7 +26,7 @@
#define SDHCI_CDNS_HRS04_ACK BIT(26)
#define SDHCI_CDNS_HRS04_RD BIT(25)
#define SDHCI_CDNS_HRS04_WR BIT(24)
-#define SDHCI_CDNS_HRS04_RDATA_SHIFT 12
+#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16
#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index de132e281753..ece8b37e51dd 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -24,30 +24,36 @@
SDHCI_QUIRK_PIO_NEEDS_DELAY | \
SDHCI_QUIRK_NO_HISPD_BIT)
-#define ESDHC_PROCTL 0x28
-
-#define ESDHC_SYSTEM_CONTROL 0x2c
-#define ESDHC_CLOCK_MASK 0x0000fff0
-#define ESDHC_PREDIV_SHIFT 8
-#define ESDHC_DIVIDER_SHIFT 4
-#define ESDHC_CLOCK_PEREN 0x00000004
-#define ESDHC_CLOCK_HCKEN 0x00000002
-#define ESDHC_CLOCK_IPGEN 0x00000001
-
/* pltfm-specific */
#define ESDHC_HOST_CONTROL_LE 0x20
/*
- * P2020 interpretation of the SDHCI_HOST_CONTROL register
+ * eSDHC register definition
*/
-#define ESDHC_CTRL_4BITBUS (0x1 << 1)
-#define ESDHC_CTRL_8BITBUS (0x2 << 1)
-#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
-
-/* OF-specific */
-#define ESDHC_DMA_SYSCTL 0x40c
-#define ESDHC_DMA_SNOOP 0x00000040
-#define ESDHC_HOST_CONTROL_RES 0x01
+/* Present State Register */
+#define ESDHC_PRSSTAT 0x24
+#define ESDHC_CLOCK_STABLE 0x00000008
+
+/* Protocol Control Register */
+#define ESDHC_PROCTL 0x28
+#define ESDHC_CTRL_4BITBUS (0x1 << 1)
+#define ESDHC_CTRL_8BITBUS (0x2 << 1)
+#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
+#define ESDHC_HOST_CONTROL_RES 0x01
+
+/* System Control Register */
+#define ESDHC_SYSTEM_CONTROL 0x2c
+#define ESDHC_CLOCK_MASK 0x0000fff0
+#define ESDHC_PREDIV_SHIFT 8
+#define ESDHC_DIVIDER_SHIFT 4
+#define ESDHC_CLOCK_SDCLKEN 0x00000008
+#define ESDHC_CLOCK_PEREN 0x00000004
+#define ESDHC_CLOCK_HCKEN 0x00000002
+#define ESDHC_CLOCK_IPGEN 0x00000001
+
+/* Control Register for DMA transfer */
+#define ESDHC_DMA_SYSCTL 0x40c
+#define ESDHC_DMA_SNOOP 0x00000040
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index d7046d67415a..3275d4995812 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -211,14 +211,19 @@ static const struct sdhci_iproc_data iproc_data = {
static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_MISSING_CAPS,
+ SDHCI_QUIRK_MISSING_CAPS |
+ SDHCI_QUIRK_NO_HISPD_BIT,
.ops = &sdhci_iproc_32only_ops,
};
static const struct sdhci_iproc_data bcm2835_data = {
.pdata = &sdhci_bcm2835_pltfm_data,
- .caps = SDHCI_CAN_VDD_330,
- .caps1 = 0x00000000,
+ .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_DO_HISPD,
+ .caps1 = SDHCI_DRIVER_TYPE_A |
+ SDHCI_DRIVER_TYPE_C,
.mmc_caps = 0x00000000,
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 32879b845b75..10cdc84d5113 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -69,6 +69,7 @@
#define CORE_DLL_CLOCK_DISABLE BIT(21)
#define CORE_VENDOR_SPEC 0x10c
+#define CORE_VENDOR_SPEC_POR_VAL 0xa1c
#define CORE_CLK_PWRSAVE BIT(1)
#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
@@ -102,6 +103,7 @@
#define CORE_DDR_200_CFG 0x184
#define CORE_CDC_T4_DLY_SEL BIT(0)
+#define CORE_CMDIN_RCLK_EN BIT(1)
#define CORE_START_CDC_TRAFFIC BIT(6)
#define CORE_VENDOR_SPEC3 0x1b0
#define CORE_PWRSAVE_DLL BIT(3)
@@ -138,6 +140,46 @@ struct sdhci_msm_host {
bool use_cdclp533;
};
+static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct mmc_ios ios = host->mmc->ios;
+ /*
+ * The SDHC requires internal clock frequency to be double the
+ * actual clock that will be set for DDR mode. The controller
+ * uses the faster clock(100/400MHz) for some of its parts and
+ * send the actual required clock (50/200MHz) to the card.
+ */
+ if (ios.timing == MMC_TIMING_UHS_DDR50 ||
+ ios.timing == MMC_TIMING_MMC_DDR52 ||
+ ios.timing == MMC_TIMING_MMC_HS400 ||
+ host->flags & SDHCI_HS400_TUNING)
+ clock *= 2;
+ return clock;
+}
+
+static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios curr_ios = host->mmc->ios;
+ int rc;
+
+ clock = msm_get_clock_rate_for_bus_mode(host, clock);
+ rc = clk_set_rate(msm_host->clk, clock);
+ if (rc) {
+ pr_err("%s: Failed to set clock at rate %u at timing %d\n",
+ mmc_hostname(host->mmc), clock,
+ curr_ios.timing);
+ return;
+ }
+ msm_host->clk_rate = clock;
+ pr_debug("%s: Setting clock at rate %lu at timing %d\n",
+ mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ curr_ios.timing);
+}
+
/* Platform specific tuning */
static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
{
@@ -464,6 +506,122 @@ static int msm_init_cm_dll(struct sdhci_host *host)
return 0;
}
+static void msm_hc_select_default(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 config;
+
+ if (!msm_host->use_cdclp533) {
+ config = readl_relaxed(host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ config &= ~CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_DFLT;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ /*
+ * Disable HC_SELECT_IN to be able to use the UHS mode select
+ * configuration from Host Control2 register for all other
+ * modes.
+ * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+ * in VENDOR_SPEC_FUNC
+ */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_SELECT_IN_EN;
+ config &= ~CORE_HC_SELECT_IN_MASK;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ /*
+ * Make sure above writes impacting free running MCLK are completed
+ * before changing the clk_rate at GCC.
+ */
+ wmb();
+}
+
+static void msm_hc_select_hs400(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios ios = host->mmc->ios;
+ u32 config, dll_lock;
+ int rc;
+
+ /* Select the divided clock (free running MCLK/2) */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_HS400;
+
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ /*
+ * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+ * register
+ */
+ if ((msm_host->tuning_done || ios.enhanced_strobe) &&
+ !msm_host->calibration_done) {
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config |= CORE_HC_SELECT_IN_HS400;
+ config |= CORE_HC_SELECT_IN_EN;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ }
+ if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
+ /*
+ * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
+ * CORE_DLL_STATUS to be set. This should get set
+ * within 15 us at 200 MHz.
+ */
+ rc = readl_relaxed_poll_timeout(host->ioaddr +
+ CORE_DLL_STATUS,
+ dll_lock,
+ (dll_lock &
+ (CORE_DLL_LOCK |
+ CORE_DDR_DLL_LOCK)), 10,
+ 1000);
+ if (rc == -ETIMEDOUT)
+ pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+ mmc_hostname(host->mmc), dll_lock);
+ }
+ /*
+ * Make sure above writes impacting free running MCLK are completed
+ * before changing the clk_rate at GCC.
+ */
+ wmb();
+}
+
+/*
+ * sdhci_msm_hc_select_mode :- In general all timing modes are
+ * controlled via UHS mode select in Host Control2 register.
+ * eMMC specific HS200/HS400 doesn't have their respective modes
+ * defined here, hence we use these values.
+ *
+ * HS200 - SDR104 (Since they both are equivalent in functionality)
+ * HS400 - This involves multiple configurations
+ * Initially SDR104 - when tuning is required as HS200
+ * Then when switching to DDR @ 400MHz (HS400) we use
+ * the vendor specific HC_SELECT_IN to control the mode.
+ *
+ * In addition to controlling the modes we also need to select the
+ * correct input clock for DLL depending on the mode.
+ *
+ * HS400 - divided clock (free running MCLK/2)
+ * All other modes - default (free running MCLK)
+ */
+void sdhci_msm_hc_select_mode(struct sdhci_host *host)
+{
+ struct mmc_ios ios = host->mmc->ios;
+
+ if (ios.timing == MMC_TIMING_MMC_HS400 ||
+ host->flags & SDHCI_HS400_TUNING)
+ msm_hc_select_hs400(host);
+ else
+ msm_hc_select_default(host);
+}
+
static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -506,19 +664,7 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
config &= ~CORE_START_CDC_TRAFFIC;
writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
- /*
- * Perform CDC Register Initialization Sequence
- *
- * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
- * CORE_CSR_CDC_CTLR_CFG1 0x3011111
- * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
- * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
- * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
- * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
- * CORE_CSR_CDC_DELAY_CFG 0x3AC
- * CORE_CDC_OFFSET_CFG 0x0
- * CORE_CDC_SLAVE_DDA_CFG 0x16334
- */
+ /* Perform CDC Register Initialization Sequence */
writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
@@ -526,7 +672,7 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
- writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+ writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
@@ -579,6 +725,7 @@ out:
static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
{
+ struct mmc_host *mmc = host->mmc;
u32 dll_status, config;
int ret;
@@ -593,6 +740,12 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
*/
writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
+ if (mmc->ios.enhanced_strobe) {
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config |= CORE_CMDIN_RCLK_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+ }
+
config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
config |= CORE_DDR_CAL_EN;
writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
@@ -627,6 +780,7 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = host->mmc;
int ret;
u32 config;
@@ -640,14 +794,17 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
if (ret)
goto out;
- /* Set the selected phase in delay line hw block */
- ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
- if (ret)
- goto out;
+ if (!mmc->ios.enhanced_strobe) {
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host,
+ msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CMD_DAT_TRACK_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ }
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
- config |= CORE_CMD_DAT_TRACK_SEL;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
if (msm_host->use_cdclp533)
ret = sdhci_msm_cdclp533_calibration(host);
else
@@ -658,12 +815,12 @@ out:
return ret;
}
-static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
+ struct sdhci_host *host = mmc_priv(mmc);
int tuning_seq_cnt = 3;
u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
int rc;
- struct mmc_host *mmc = host->mmc;
struct mmc_ios ios = host->mmc->ios;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
@@ -678,6 +835,17 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
ios.timing == MMC_TIMING_UHS_SDR104))
return 0;
+ /*
+ * For HS400 tuning in HS200 timing requires:
+ * - select MCLK/2 in VENDOR_SPEC
+ * - program MCLK to 400MHz (or nearest supported) in GCC
+ */
+ if (host->flags & SDHCI_HS400_TUNING) {
+ sdhci_msm_hc_select_mode(host);
+ msm_set_clock_rate_for_bus_mode(host, ios.clock);
+ host->flags &= ~SDHCI_HS400_TUNING;
+ }
+
retry:
/* First of all reset the tuning block */
rc = msm_init_cm_dll(host);
@@ -732,6 +900,30 @@ retry:
return rc;
}
+/*
+ * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
+ * This needs to be done for both tuning and enhanced_strobe mode.
+ * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
+ * fixed feedback clock is used.
+ */
+static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ if (host->clock > CORE_FREQ_100MHZ &&
+ (msm_host->tuning_done || ios->enhanced_strobe) &&
+ !msm_host->calibration_done) {
+ ret = sdhci_msm_hs400_dll_calibration(host);
+ if (!ret)
+ msm_host->calibration_done = true;
+ else
+ pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ }
+}
+
static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
unsigned int uhs)
{
@@ -800,12 +992,10 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
spin_unlock_irq(&host->lock);
- /* CDCLP533 HW calibration is only required for HS400 mode*/
- if (host->clock > CORE_FREQ_100MHZ &&
- msm_host->tuning_done && !msm_host->calibration_done &&
- mmc->ios.timing == MMC_TIMING_MMC_HS400)
- if (!sdhci_msm_hs400_dll_calibration(host))
- msm_host->calibration_done = true;
+
+ if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
+ sdhci_msm_hs400(host, &mmc->ios);
+
spin_lock_irq(&host->lock);
}
@@ -893,9 +1083,6 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- struct mmc_ios curr_ios = host->mmc->ios;
- u32 config, dll_lock;
- int rc;
if (!clock) {
msm_host->clk_rate = clock;
@@ -903,117 +1090,11 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
}
spin_unlock_irq(&host->lock);
- /*
- * The SDHC requires internal clock frequency to be double the
- * actual clock that will be set for DDR mode. The controller
- * uses the faster clock(100/400MHz) for some of its parts and
- * send the actual required clock (50/200MHz) to the card.
- */
- if (curr_ios.timing == MMC_TIMING_UHS_DDR50 ||
- curr_ios.timing == MMC_TIMING_MMC_DDR52 ||
- curr_ios.timing == MMC_TIMING_MMC_HS400)
- clock *= 2;
- /*
- * In general all timing modes are controlled via UHS mode select in
- * Host Control2 register. eMMC specific HS200/HS400 doesn't have
- * their respective modes defined here, hence we use these values.
- *
- * HS200 - SDR104 (Since they both are equivalent in functionality)
- * HS400 - This involves multiple configurations
- * Initially SDR104 - when tuning is required as HS200
- * Then when switching to DDR @ 400MHz (HS400) we use
- * the vendor specific HC_SELECT_IN to control the mode.
- *
- * In addition to controlling the modes we also need to select the
- * correct input clock for DLL depending on the mode.
- *
- * HS400 - divided clock (free running MCLK/2)
- * All other modes - default (free running MCLK)
- */
- if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
- /* Select the divided clock (free running MCLK/2) */
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
- config &= ~CORE_HC_MCLK_SEL_MASK;
- config |= CORE_HC_MCLK_SEL_HS400;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
- /*
- * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
- * register
- */
- if (msm_host->tuning_done && !msm_host->calibration_done) {
- /*
- * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
- * field in VENDOR_SPEC_FUNC
- */
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
- config |= CORE_HC_SELECT_IN_HS400;
- config |= CORE_HC_SELECT_IN_EN;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
- }
- if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
- /*
- * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
- * CORE_DLL_STATUS to be set. This should get set
- * within 15 us at 200 MHz.
- */
- rc = readl_relaxed_poll_timeout(host->ioaddr +
- CORE_DLL_STATUS,
- dll_lock,
- (dll_lock &
- (CORE_DLL_LOCK |
- CORE_DDR_DLL_LOCK)), 10,
- 1000);
- if (rc == -ETIMEDOUT)
- pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
- mmc_hostname(host->mmc), dll_lock);
- }
- } else {
- if (!msm_host->use_cdclp533) {
- config = readl_relaxed(host->ioaddr +
- CORE_VENDOR_SPEC3);
- config &= ~CORE_PWRSAVE_DLL;
- writel_relaxed(config, host->ioaddr +
- CORE_VENDOR_SPEC3);
- }
+ sdhci_msm_hc_select_mode(host);
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
- config &= ~CORE_HC_MCLK_SEL_MASK;
- config |= CORE_HC_MCLK_SEL_DFLT;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ msm_set_clock_rate_for_bus_mode(host, clock);
- /*
- * Disable HC_SELECT_IN to be able to use the UHS mode select
- * configuration from Host Control2 register for all other
- * modes.
- * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
- * in VENDOR_SPEC_FUNC
- */
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
- config &= ~CORE_HC_SELECT_IN_EN;
- config &= ~CORE_HC_SELECT_IN_MASK;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
- }
-
- /*
- * Make sure above writes impacting free running MCLK are completed
- * before changing the clk_rate at GCC.
- */
- wmb();
-
- rc = clk_set_rate(msm_host->clk, clock);
- if (rc) {
- pr_err("%s: Failed to set clock at rate %u at timing %d\n",
- mmc_hostname(host->mmc), clock,
- curr_ios.timing);
- goto out_lock;
- }
- msm_host->clk_rate = clock;
- pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
- curr_ios.timing);
-
-out_lock:
spin_lock_irq(&host->lock);
out:
__sdhci_msm_set_clock(host, clock);
@@ -1027,7 +1108,6 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
static const struct sdhci_ops sdhci_msm_ops = {
- .platform_execute_tuning = sdhci_msm_execute_tuning,
.reset = sdhci_reset,
.set_clock = sdhci_msm_set_clock,
.get_min_clock = sdhci_msm_get_min_clock,
@@ -1134,17 +1214,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
- config = readl_relaxed(msm_host->core_mem + CORE_POWER);
- config |= CORE_SW_RST;
- writel_relaxed(config, msm_host->core_mem + CORE_POWER);
-
- /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
- usleep_range(1000, 5000);
- if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
- dev_err(&pdev->dev, "Stuck in reset\n");
- ret = -ETIMEDOUT;
- goto clk_disable;
- }
+ /* Reset the vendor spec register to power on reset state */
+ writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
+ host->ioaddr + CORE_VENDOR_SPEC);
/* Set HC_MODE_EN bit in HC_MODE register */
writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
@@ -1210,6 +1282,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
MSM_MMC_AUTOSUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(&pdev->dev);
+ host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 9a6eb4492172..d3aa67142839 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -431,6 +431,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int pre_div = 1;
int div = 1;
+ u32 timeout;
u32 temp;
host->mmc->actual_clock = 0;
@@ -451,8 +452,8 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
}
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | ESDHC_CLOCK_MASK);
+ temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+ ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
@@ -472,7 +473,21 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
| (div << ESDHC_DIVIDER_SHIFT)
| (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- mdelay(1);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
+ if (timeout == 0) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ temp |= ESDHC_CLOCK_SDCLKEN;
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
}
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
@@ -569,16 +584,19 @@ static const struct sdhci_ops sdhci_esdhc_le_ops = {
};
static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
- | SDHCI_QUIRK_NO_CARD_NO_RESET
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks = ESDHC_DEFAULT_QUIRKS |
+#ifdef CONFIG_PPC
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+#endif
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_be_ops,
};
static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
- | SDHCI_QUIRK_NO_CARD_NO_RESET
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks = ESDHC_DEFAULT_QUIRKS |
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_le_ops,
};
@@ -643,8 +661,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
of_device_is_compatible(np, "fsl,p1020-esdhc") ||
- of_device_is_compatible(np, "fsl,t1040-esdhc") ||
- of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
+ of_device_is_compatible(np, "fsl,t1040-esdhc"))
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1a72d32af07f..982b3e349426 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -424,7 +424,6 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- slot->cd_con_id = NULL;
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
@@ -866,6 +865,86 @@ enum amd_chipset_gen {
AMD_CHIPSET_UNKNOWN,
};
+/* AMD registers */
+#define AMD_SD_AUTO_PATTERN 0xB8
+#define AMD_MSLEEP_DURATION 4
+#define AMD_SD_MISC_CONTROL 0xD0
+#define AMD_MAX_TUNE_VALUE 0x0B
+#define AMD_AUTO_TUNE_SEL 0x10800
+#define AMD_FIFO_PTR 0x30
+#define AMD_BIT_MASK 0x1F
+
+static void amd_tuning_reset(struct sdhci_host *host)
+{
+ unsigned int val;
+
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+}
+
+static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
+{
+ unsigned int val;
+
+ pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
+ val &= ~AMD_BIT_MASK;
+ val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
+ pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
+}
+
+static void amd_enable_manual_tuning(struct pci_dev *pdev)
+{
+ unsigned int val;
+
+ pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
+ val |= AMD_FIFO_PTR;
+ pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
+}
+
+static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u8 valid_win = 0;
+ u8 valid_win_max = 0;
+ u8 valid_win_end = 0;
+ u8 ctrl, tune_around;
+
+ amd_tuning_reset(host);
+
+ for (tune_around = 0; tune_around < 12; tune_around++) {
+ amd_config_tuning_phase(pdev, tune_around);
+
+ if (mmc_send_tuning(host->mmc, opcode, NULL)) {
+ valid_win = 0;
+ msleep(AMD_MSLEEP_DURATION);
+ ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
+ sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
+ } else if (++valid_win > valid_win_max) {
+ valid_win_max = valid_win;
+ valid_win_end = tune_around;
+ }
+ }
+
+ if (!valid_win_max) {
+ dev_err(&pdev->dev, "no tuning point found\n");
+ return -EIO;
+ }
+
+ amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
+
+ amd_enable_manual_tuning(pdev);
+
+ host->mmc->retune_period = 0;
+
+ return 0;
+}
+
static int amd_probe(struct sdhci_pci_chip *chip)
{
struct pci_dev *smbus_dev;
@@ -888,16 +967,24 @@ static int amd_probe(struct sdhci_pci_chip *chip)
}
}
- if ((gen == AMD_CHIPSET_BEFORE_ML) || (gen == AMD_CHIPSET_CZ)) {
+ if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
- chip->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
- }
return 0;
}
+static const struct sdhci_ops amd_sdhci_pci_ops = {
+ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .platform_execute_tuning = amd_execute_tuning,
+};
+
static const struct sdhci_pci_fixes sdhci_amd = {
.probe = amd_probe,
+ .ops = &amd_sdhci_pci_ops,
};
static const struct pci_device_id pci_ids[] = {
@@ -1817,7 +1904,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
if (slot->cd_idx >= 0) {
- ret = mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
+ ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
slot->cd_override_level, 0, NULL);
if (ret == -EPROBE_DEFER)
goto remove;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 4abdaed72bd4..36f743464fcc 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -81,7 +81,6 @@ struct sdhci_pci_slot {
int cd_gpio;
int cd_irq;
- char *cd_con_id;
int cd_idx;
bool cd_override_level;
diff --git a/drivers/mmc/host/sdhci-s3c-regs.h b/drivers/mmc/host/sdhci-s3c-regs.h
deleted file mode 100644
index e34049ad44cc..000000000000
--- a/drivers/mmc/host/sdhci-s3c-regs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* linux/arch/arm/plat-s3c/include/plat/regs-sdhci.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C Platform - SDHCI (HSMMC) register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_S3C_SDHCI_REGS_H
-#define __PLAT_S3C_SDHCI_REGS_H __FILE__
-
-#define S3C_SDHCI_CONTROL2 (0x80)
-#define S3C_SDHCI_CONTROL3 (0x84)
-#define S3C64XX_SDHCI_CONTROL4 (0x8C)
-
-#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR (1 << 31)
-#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK (1 << 30)
-#define S3C_SDHCI_CTRL2_CDINVRXD3 (1 << 29)
-#define S3C_SDHCI_CTRL2_SLCARDOUT (1 << 28)
-
-#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24)
-#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24)
-#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24)
-
-#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16)
-#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16)
-#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16)
-
-#define S3C_SDHCI_CTRL2_ENFBCLKTX (1 << 15)
-#define S3C_SDHCI_CTRL2_ENFBCLKRX (1 << 14)
-#define S3C_SDHCI_CTRL2_SDCDSEL (1 << 13)
-#define S3C_SDHCI_CTRL2_SDSIGPC (1 << 12)
-#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART (1 << 11)
-
-#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9)
-#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9)
-
-#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD (1 << 8)
-#define S3C_SDHCI_CTRL2_RWAITMODE (1 << 7)
-#define S3C_SDHCI_CTRL2_DISBUFRD (1 << 6)
-#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4)
-#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4)
-#define S3C_SDHCI_CTRL2_PWRSYNC (1 << 3)
-#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON (1 << 1)
-#define S3C_SDHCI_CTRL2_HWINITFIN (1 << 0)
-
-#define S3C_SDHCI_CTRL3_FCSEL3 (1 << 31)
-#define S3C_SDHCI_CTRL3_FCSEL2 (1 << 23)
-#define S3C_SDHCI_CTRL3_FCSEL1 (1 << 15)
-#define S3C_SDHCI_CTRL3_FCSEL0 (1 << 7)
-
-#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24)
-#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24)
-#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24)
-
-#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16)
-#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16)
-#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16)
-
-#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8)
-#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8)
-#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8)
-
-#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0)
-#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0)
-#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0)
-
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16)
-
-#define S3C64XX_SDHCI_CONTROL4_BUSY (1)
-
-#endif /* __PLAT_S3C_SDHCI_REGS_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index de219ca7ea7c..3e5c83d435ae 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -29,11 +29,80 @@
#include <linux/mmc/host.h>
-#include "sdhci-s3c-regs.h"
#include "sdhci.h"
#define MAX_BUS_CLK (4)
+#define S3C_SDHCI_CONTROL2 (0x80)
+#define S3C_SDHCI_CONTROL3 (0x84)
+#define S3C64XX_SDHCI_CONTROL4 (0x8C)
+
+#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR BIT(31)
+#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK BIT(30)
+#define S3C_SDHCI_CTRL2_CDINVRXD3 BIT(29)
+#define S3C_SDHCI_CTRL2_SLCARDOUT BIT(28)
+
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16)
+#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16)
+#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL2_ENFBCLKTX BIT(15)
+#define S3C_SDHCI_CTRL2_ENFBCLKRX BIT(14)
+#define S3C_SDHCI_CTRL2_SDCDSEL BIT(13)
+#define S3C_SDHCI_CTRL2_SDSIGPC BIT(12)
+#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART BIT(11)
+
+#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9)
+#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9)
+
+#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD BIT(8)
+#define S3C_SDHCI_CTRL2_RWAITMODE BIT(7)
+#define S3C_SDHCI_CTRL2_DISBUFRD BIT(6)
+
+#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4)
+#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4)
+#define S3C_SDHCI_CTRL2_PWRSYNC BIT(3)
+#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON BIT(1)
+#define S3C_SDHCI_CTRL2_HWINITFIN BIT(0)
+
+#define S3C_SDHCI_CTRL3_FCSEL3 BIT(31)
+#define S3C_SDHCI_CTRL3_FCSEL2 BIT(23)
+#define S3C_SDHCI_CTRL3_FCSEL1 BIT(15)
+#define S3C_SDHCI_CTRL3_FCSEL0 BIT(7)
+
+#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24)
+#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24)
+#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16)
+#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16)
+#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8)
+#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8)
+#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8)
+
+#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0)
+#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0)
+#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0)
+
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16)
+
+#define S3C64XX_SDHCI_CONTROL4_BUSY (1)
+
/**
* struct sdhci_s3c - S3C SDHCI instance
* @host: The SDHCI host created
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 23909804ffb8..6fdd7a70f229 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2021,8 +2021,8 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode,
unsigned long flags)
{
struct mmc_host *mmc = host->mmc;
- struct mmc_command cmd = {0};
- struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {};
+ struct mmc_request mrq = {};
cmd.opcode = opcode;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -2114,7 +2114,6 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
spin_lock_irqsave(&host->lock, flags);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
- host->flags &= ~SDHCI_HS400_TUNING;
if (host->tuning_mode == SDHCI_TUNING_MODE_1)
tuning_count = host->tuning_count;
@@ -2156,7 +2155,9 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->ops->platform_execute_tuning) {
spin_unlock_irqrestore(&host->lock, flags);
- return host->ops->platform_execute_tuning(host, opcode);
+ err = host->ops->platform_execute_tuning(host, opcode);
+ spin_lock_irqsave(&host->lock, flags);
+ goto out_unlock;
}
host->mmc->retune_period = tuning_count;
@@ -2167,6 +2168,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
sdhci_end_tuning(host);
out_unlock:
+ host->flags &= ~SDHCI_HS400_TUNING;
spin_unlock_irqrestore(&host->lock, flags);
return err;
@@ -2733,7 +2735,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
if (intmask & SDHCI_INT_RETUNE)
mmc_retune_needed(host->mmc);
- if (intmask & SDHCI_INT_CARD_INT) {
+ if ((intmask & SDHCI_INT_CARD_INT) &&
+ (host->ier & SDHCI_INT_CARD_INT)) {
sdhci_enable_sdio_irq_nolock(host, false);
host->thread_isr |= SDHCI_INT_CARD_INT;
result = IRQ_WAKE_THREAD;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0b66f210ae82..edf3adfbc213 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -17,6 +17,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/interrupt.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 900778421be6..4062d6bef3c8 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1079,26 +1079,10 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->state = STATE_IDLE;
}
-static int sh_mmcif_get_cd(struct mmc_host *mmc)
-{
- struct sh_mmcif_host *host = mmc_priv(mmc);
- struct device *dev = sh_mmcif_host_to_dev(host);
- struct sh_mmcif_plat_data *p = dev->platform_data;
- int ret = mmc_gpio_get_cd(mmc);
-
- if (ret >= 0)
- return ret;
-
- if (!p || !p->get_cd)
- return -ENOSYS;
- else
- return p->get_cd(host->pd);
-}
-
static struct mmc_host_ops sh_mmcif_ops = {
.request = sh_mmcif_request,
.set_ios = sh_mmcif_set_ios,
- .get_cd = sh_mmcif_get_cd,
+ .get_cd = mmc_gpio_get_cd,
};
static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
@@ -1443,8 +1427,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host->mmc = mmc;
host->addr = reg;
host->timeout = msecs_to_jiffies(10000);
- host->ccs_enable = !pd || !pd->ccs_unsupported;
- host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
+ host->ccs_enable = true;
+ host->clk_ctrl2_enable = false;
host->pd = pdev;
@@ -1509,12 +1493,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
}
}
- if (pd && pd->use_cd_gpio) {
- ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
- if (ret < 0)
- goto err_clk;
- }
-
mutex_init(&host->thread_lock);
ret = mmc_add_host(mmc);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index d46c2d00c182..bc6be0dbea39 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -143,6 +143,7 @@ MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
struct sh_mobile_sdhi {
struct clk *clk;
+ struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
struct pinctrl *pinctrl;
@@ -190,6 +191,12 @@ static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
if (ret < 0)
return ret;
+ ret = clk_prepare_enable(priv->clk_cd);
+ if (ret < 0) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
/*
* The clock driver may not know what maximum frequency
* actually works, so it should be set with the max-frequency
@@ -255,6 +262,7 @@ static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
struct sh_mobile_sdhi *priv = host_to_priv(host);
clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_cd);
}
static int sh_mobile_sdhi_card_busy(struct mmc_host *mmc)
@@ -335,9 +343,6 @@ static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
{
struct sh_mobile_sdhi *priv;
- if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
- return 0;
-
priv = host_to_priv(host);
/* set sampling clock selection range */
@@ -444,12 +449,7 @@ static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
{
- struct sh_mobile_sdhi *priv;
-
- if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
- return 0;
-
- priv = host_to_priv(host);
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
/* Check SCC error */
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
@@ -468,9 +468,6 @@ static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
{
struct sh_mobile_sdhi *priv;
- if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
- return;
-
priv = host_to_priv(host);
/* Reset SCC */
@@ -556,8 +553,7 @@ static void sh_mobile_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
static int sh_mobile_sdhi_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(sh_mobile_sdhi_of_match, &pdev->dev);
+ const struct sh_mobile_sdhi_of_data *of_data = of_device_get_match_data(&pdev->dev);
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
@@ -584,6 +580,21 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
+ /*
+ * Some controllers provide a 2nd clock just to run the internal card
+ * detection logic. Unfortunately, the existing driver architecture does
+ * not support a separation of clocks for runtime PM usage. When
+ * native hotplug is used, the tmio driver assumes that the core
+ * must continue to run for card detect to stay active, so we cannot
+ * disable it.
+ * Additionally, it is prohibited to supply a clock to the core but not
+ * to the card detect circuit. That leaves us with if separate clocks
+ * are presented, we must treat them both as virtually 1 clock.
+ */
+ priv->clk_cd = devm_clk_get(&pdev->dev, "cd");
+ if (IS_ERR(priv->clk_cd))
+ priv->clk_cd = NULL;
+
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
@@ -598,9 +609,8 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
- if (of_id && of_id->data) {
- const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
+ if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
mmc_data->ocr_mask = of_data->tmio_ocr_mask;
mmc_data->capabilities |= of_data->capabilities;
@@ -623,11 +633,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
host->card_busy = sh_mobile_sdhi_card_busy;
host->start_signal_voltage_switch =
sh_mobile_sdhi_start_signal_voltage_switch;
- host->init_tuning = sh_mobile_sdhi_init_tuning;
- host->prepare_tuning = sh_mobile_sdhi_prepare_tuning;
- host->select_tuning = sh_mobile_sdhi_select_tuning;
- host->check_scc_error = sh_mobile_sdhi_check_scc_error;
- host->hw_reset = sh_mobile_sdhi_hw_reset;
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -659,40 +664,40 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL;
- /*
- * All SDHI need SDIO_INFO1 reserved bit
- */
- mmc_data->flags |= TMIO_MMC_SDIO_STATUS_QUIRK;
+ /* All SDHI have SDIO status bits which must be 1 */
+ mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
ret = tmio_mmc_host_probe(host, mmc_data);
if (ret < 0)
goto efree;
- if (host->mmc->caps & MMC_CAP_UHS_SDR104) {
+ /* Enable tuning iff we have an SCC and a supported mode */
+ if (of_data && of_data->scc_offset &&
+ (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
+ host->mmc->caps2 & MMC_CAP2_HS200_1_8V_SDR)) {
+ const struct sh_mobile_sdhi_scc *taps = of_data->taps;
+ bool hit = false;
+
host->mmc->caps |= MMC_CAP_HW_RESET;
- if (of_id && of_id->data) {
- const struct sh_mobile_sdhi_of_data *of_data;
- const struct sh_mobile_sdhi_scc *taps;
- bool hit = false;
-
- of_data = of_id->data;
- taps = of_data->taps;
-
- for (i = 0; i < of_data->taps_num; i++) {
- if (taps[i].clk_rate == 0 ||
- taps[i].clk_rate == host->mmc->f_max) {
- host->scc_tappos = taps->tap;
- hit = true;
- break;
- }
+ for (i = 0; i < of_data->taps_num; i++) {
+ if (taps[i].clk_rate == 0 ||
+ taps[i].clk_rate == host->mmc->f_max) {
+ host->scc_tappos = taps->tap;
+ hit = true;
+ break;
}
+ }
- if (!hit)
- dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+ if (!hit)
+ dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
- priv->scc_ctl = host->ctl + of_data->scc_offset;
- }
+ priv->scc_ctl = host->ctl + of_data->scc_offset;
+ host->init_tuning = sh_mobile_sdhi_init_tuning;
+ host->prepare_tuning = sh_mobile_sdhi_prepare_tuning;
+ host->select_tuning = sh_mobile_sdhi_select_tuning;
+ host->check_scc_error = sh_mobile_sdhi_check_scc_error;
+ host->hw_reset = sh_mobile_sdhi_hw_reset;
}
i = 0;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index b1d1303389a7..6ffcd2838272 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -5,6 +5,7 @@
* (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
* (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
* (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
+ * (C) Copyright 2017 Sootech SA
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -101,6 +102,7 @@
(SDXC_SOFT_RESET | SDXC_FIFO_RESET | SDXC_DMA_RESET)
/* clock control bits */
+#define SDXC_MASK_DATA0 BIT(31)
#define SDXC_CARD_CLOCK_ON BIT(16)
#define SDXC_LOW_POWER_ON BIT(17)
@@ -253,6 +255,11 @@ struct sunxi_mmc_cfg {
/* does the IP block support autocalibration? */
bool can_calibrate;
+
+ /* Does DATA0 needs to be masked while the clock is updated */
+ bool mask_data0;
+
+ bool needs_new_timings;
};
struct sunxi_mmc_host {
@@ -654,11 +661,16 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
unsigned long expire = jiffies + msecs_to_jiffies(750);
u32 rval;
+ dev_dbg(mmc_dev(host->mmc), "%sabling the clock\n",
+ oclk_en ? "en" : "dis");
+
rval = mmc_readl(host, REG_CLKCR);
- rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON);
+ rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0);
if (oclk_en)
rval |= SDXC_CARD_CLOCK_ON;
+ if (host->cfg->mask_data0)
+ rval |= SDXC_MASK_DATA0;
mmc_writel(host, REG_CLKCR, rval);
@@ -678,46 +690,29 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
return -EIO;
}
+ if (host->cfg->mask_data0) {
+ rval = mmc_readl(host, REG_CLKCR);
+ mmc_writel(host, REG_CLKCR, rval & ~SDXC_MASK_DATA0);
+ }
+
return 0;
}
static int sunxi_mmc_calibrate(struct sunxi_mmc_host *host, int reg_off)
{
- u32 reg = readl(host->reg_base + reg_off);
- u32 delay;
- unsigned long timeout;
-
if (!host->cfg->can_calibrate)
return 0;
- reg &= ~(SDXC_CAL_DL_MASK << SDXC_CAL_DL_SW_SHIFT);
- reg &= ~SDXC_CAL_DL_SW_EN;
-
- writel(reg | SDXC_CAL_START, host->reg_base + reg_off);
-
- dev_dbg(mmc_dev(host->mmc), "calibration started\n");
-
- timeout = jiffies + HZ * SDXC_CAL_TIMEOUT;
-
- while (!((reg = readl(host->reg_base + reg_off)) & SDXC_CAL_DONE)) {
- if (time_before(jiffies, timeout))
- cpu_relax();
- else {
- reg &= ~SDXC_CAL_START;
- writel(reg, host->reg_base + reg_off);
-
- return -ETIMEDOUT;
- }
- }
-
- delay = (reg >> SDXC_CAL_DL_SHIFT) & SDXC_CAL_DL_MASK;
-
- reg &= ~SDXC_CAL_START;
- reg |= (delay << SDXC_CAL_DL_SW_SHIFT) | SDXC_CAL_DL_SW_EN;
-
- writel(reg, host->reg_base + reg_off);
-
- dev_dbg(mmc_dev(host->mmc), "calibration ended, reg is 0x%x\n", reg);
+ /*
+ * FIXME:
+ * This is not clear how the calibration is supposed to work
+ * yet. The best rate have been obtained by simply setting the
+ * delay to 0, as Allwinner does in its BSP.
+ *
+ * The only mode that doesn't have such a delay is HS400, that
+ * is in itself a TODO.
+ */
+ writel(SDXC_CAL_DL_SW_EN, host->reg_base + reg_off);
return 0;
}
@@ -745,6 +740,7 @@ static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host,
index = SDXC_CLK_50M_DDR;
}
} else {
+ dev_dbg(mmc_dev(host->mmc), "Invalid clock... returning\n");
return -EINVAL;
}
@@ -757,10 +753,21 @@ static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host,
static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
struct mmc_ios *ios)
{
+ struct mmc_host *mmc = host->mmc;
long rate;
u32 rval, clock = ios->clock;
int ret;
+ ret = sunxi_mmc_oclk_onoff(host, 0);
+ if (ret)
+ return ret;
+
+ /* Our clock is gated now */
+ mmc->actual_clock = 0;
+
+ if (!ios->clock)
+ return 0;
+
/* 8 bit DDR requires a higher module clock */
if (ios->timing == MMC_TIMING_MMC_DDR52 &&
ios->bus_width == MMC_BUS_WIDTH_8)
@@ -768,25 +775,21 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
rate = clk_round_rate(host->clk_mmc, clock);
if (rate < 0) {
- dev_err(mmc_dev(host->mmc), "error rounding clk to %d: %ld\n",
+ dev_err(mmc_dev(mmc), "error rounding clk to %d: %ld\n",
clock, rate);
return rate;
}
- dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %ld\n",
+ dev_dbg(mmc_dev(mmc), "setting clk to %d, rounded %ld\n",
clock, rate);
/* setting clock rate */
ret = clk_set_rate(host->clk_mmc, rate);
if (ret) {
- dev_err(mmc_dev(host->mmc), "error setting clk to %ld: %d\n",
+ dev_err(mmc_dev(mmc), "error setting clk to %ld: %d\n",
rate, ret);
return ret;
}
- ret = sunxi_mmc_oclk_onoff(host, 0);
- if (ret)
- return ret;
-
/* clear internal divider */
rval = mmc_readl(host, REG_CLKCR);
rval &= ~0xff;
@@ -798,6 +801,9 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
}
mmc_writel(host, REG_CLKCR, rval);
+ if (host->cfg->needs_new_timings)
+ mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
+
ret = sunxi_mmc_clk_set_phase(host, ios, rate);
if (ret)
return ret;
@@ -806,9 +812,22 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
if (ret)
return ret;
- /* TODO: enable calibrate on sdc2 SDXC_REG_DS_DL_REG of A64 */
+ /*
+ * FIXME:
+ *
+ * In HS400 we'll also need to calibrate the data strobe
+ * signal. This should only happen on the MMC2 controller (at
+ * least on the A64).
+ */
+
+ ret = sunxi_mmc_oclk_onoff(host, 1);
+ if (ret)
+ return ret;
+
+ /* And we just enabled our clock back */
+ mmc->actual_clock = rate;
- return sunxi_mmc_oclk_onoff(host, 1);
+ return 0;
}
static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -882,7 +901,7 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mmc_writel(host, REG_GCTRL, rval);
/* set up clock */
- if (ios->clock && ios->power_mode) {
+ if (ios->power_mode) {
host->ferror = sunxi_mmc_clk_set_rate(host, ios);
/* Android code had a usleep_range(50000, 55000); here */
}
@@ -1089,6 +1108,14 @@ static const struct sunxi_mmc_cfg sun50i_a64_cfg = {
.idma_des_size_bits = 16,
.clk_delays = NULL,
.can_calibrate = true,
+ .mask_data0 = true,
+ .needs_new_timings = true,
+};
+
+static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
+ .idma_des_size_bits = 13,
+ .clk_delays = NULL,
+ .can_calibrate = true,
};
static const struct of_device_id sunxi_mmc_of_match[] = {
@@ -1097,6 +1124,7 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9e20bcf3aa8d..2b349d48fb9a 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -24,6 +24,7 @@
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
@@ -90,6 +91,8 @@
#define TMIO_SDIO_STAT_EXWT 0x8000
#define TMIO_SDIO_MASK_ALL 0xc007
+#define TMIO_SDIO_SETBITS_MASK 0x0006
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 2064fa1a5bf1..6b789a739d4d 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -134,18 +134,25 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct tmio_mmc_host *host = mmc_priv(mmc);
if (enable && !host->sdio_irq_enabled) {
+ u16 sdio_status;
+
/* Keep device active while SDIO irq is enabled */
pm_runtime_get_sync(mmc_dev(mmc));
- host->sdio_irq_enabled = true;
+ host->sdio_irq_enabled = true;
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
~TMIO_SDIO_STAT_IOIRQ;
- sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+
+ /* Clear obsolete interrupts before enabling */
+ sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
+ if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
+ sdio_status |= TMIO_SDIO_SETBITS_MASK;
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
+
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
} else if (!enable && host->sdio_irq_enabled) {
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
- sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
host->sdio_irq_enabled = false;
pm_runtime_mark_last_busy(mmc_dev(mmc));
@@ -711,9 +718,8 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
return false;
}
-static void tmio_mmc_sdio_irq(int irq, void *devid)
+static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{
- struct tmio_mmc_host *host = devid;
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
unsigned int ireg, status;
@@ -726,8 +732,8 @@ static void tmio_mmc_sdio_irq(int irq, void *devid)
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
sdio_status = status & ~TMIO_SDIO_MASK_ALL;
- if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
- sdio_status |= 6;
+ if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
+ sdio_status |= TMIO_SDIO_SETBITS_MASK;
sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
@@ -754,7 +760,7 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
- tmio_mmc_sdio_irq(irq, devid);
+ __tmio_mmc_sdio_irq(host);
return IRQ_HANDLED;
}
@@ -902,6 +908,12 @@ static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
return host->clk_enable(host);
}
+static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
+{
+ if (host->clk_disable)
+ host->clk_disable(host);
+}
+
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -1145,7 +1157,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
ret = mmc_of_parse(mmc);
if (ret < 0)
- goto host_free;
+ return ret;
_host->pdata = pdata;
platform_set_drvdata(pdev, mmc);
@@ -1155,14 +1167,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
ret = tmio_mmc_init_ocr(_host);
if (ret < 0)
- goto host_free;
+ return ret;
_host->ctl = devm_ioremap(&pdev->dev,
res_ctl->start, resource_size(res_ctl));
- if (!_host->ctl) {
- ret = -ENOMEM;
- goto host_free;
- }
+ if (!_host->ctl)
+ return -ENOMEM;
tmio_mmc_ops.card_busy = _host->card_busy;
tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
@@ -1179,8 +1189,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
- !mmc_card_is_removable(mmc) ||
- mmc->slot.cd_irq >= 0);
+ !mmc_card_is_removable(mmc));
/*
* On Gen2+, eMMC with NONREMOVABLE currently fails because native
@@ -1200,10 +1209,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
* Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
* looping forever...
*/
- if (mmc->f_min == 0) {
- ret = -EINVAL;
- goto host_free;
- }
+ if (mmc->f_min == 0)
+ return -EINVAL;
/*
* While using internal tmio hardware logic for card detection, we need
@@ -1232,7 +1239,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
- sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
+ sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0001);
}
spin_lock_init(&_host->lock);
@@ -1268,10 +1275,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
}
return 0;
-
-host_free:
-
- return ret;
}
EXPORT_SYMBOL(tmio_mmc_host_probe);
@@ -1280,6 +1283,9 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
struct platform_device *pdev = host->pdev;
struct mmc_host *mmc = host->mmc;
+ if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+
if (!host->native_hotplug)
pm_runtime_get_sync(&pdev->dev);
@@ -1292,6 +1298,8 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ tmio_mmc_clk_disable(host);
}
EXPORT_SYMBOL(tmio_mmc_host_remove);
@@ -1306,8 +1314,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
if (host->clk_cache)
tmio_mmc_clk_stop(host);
- if (host->clk_disable)
- host->clk_disable(host);
+ tmio_mmc_clk_disable(host);
return 0;
}
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 63fac78b3d46..6380044c0628 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index bb3e0d1dd355..c061e7c704be 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -640,8 +640,6 @@ static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
mutex_lock(&vub300->irq_mutex);
if (vub300->irq_enabled)
mmc_signal_sdio_irq(vub300->mmc);
- else if (vub300->irqs_queued)
- vub300->irqs_queued += 1;
else
vub300->irqs_queued += 1;
vub300->irq_disabled = 0;
@@ -728,8 +726,7 @@ static void vub300_deadwork_thread(struct work_struct *work)
*/
} else if (vub300->card_present) {
check_vub300_port_status(vub300);
- } else if (vub300->mmc && vub300->mmc->card &&
- mmc_card_present(vub300->mmc->card)) {
+ } else if (vub300->mmc && vub300->mmc->card) {
/*
* the MMC core must not have responded
* to the previous indication - lets
@@ -1756,8 +1753,7 @@ static void vub300_cmndwork_thread(struct work_struct *work)
int data_length;
mutex_lock(&vub300->cmd_mutex);
init_completion(&vub300->command_complete);
- if (likely(vub300->vub_name[0]) || !vub300->mmc->card ||
- !mmc_card_present(vub300->mmc->card)) {
+ if (likely(vub300->vub_name[0]) || !vub300->mmc->card) {
/*
* the name of the EMPTY Pseudo firmware file
* is used as a flag to indicate that the file
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 80a3b11f3217..bd04e8bae010 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1437,11 +1437,14 @@ err:
static void wbsd_release_dma(struct wbsd_host *host)
{
- if (!dma_mapping_error(mmc_dev(host->mmc), host->dma_addr)) {
+ /*
+ * host->dma_addr is valid here iff host->dma_buffer is not NULL.
+ */
+ if (host->dma_buffer) {
dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+ kfree(host->dma_buffer);
}
- kfree(host->dma_buffer);
if (host->dma >= 0)
free_dma(host->dma);
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 5af00559e9d6..21ebba88679c 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 283ff7e17a0f..d10fa6c8f074 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/bcm47xx_nvram.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -83,6 +84,91 @@ out_default:
return "rootfs";
}
+static int bcm47xxpart_parse_trx(struct mtd_info *master,
+ struct mtd_partition *trx,
+ struct mtd_partition *parts,
+ size_t parts_len)
+{
+ struct trx_header header;
+ size_t bytes_read;
+ int curr_part = 0;
+ int i, err;
+
+ if (parts_len < 3) {
+ pr_warn("No enough space to add TRX partitions!\n");
+ return -ENOMEM;
+ }
+
+ err = mtd_read(master, trx->offset, sizeof(header), &bytes_read,
+ (uint8_t *)&header);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while reading TRX header: %d\n", err);
+ return err;
+ }
+
+ i = 0;
+
+ /* We have LZMA loader if offset[2] points to sth */
+ if (header.offset[2]) {
+ bcm47xxpart_add_part(&parts[curr_part++], "loader",
+ trx->offset + header.offset[i], 0);
+ i++;
+ }
+
+ if (header.offset[i]) {
+ bcm47xxpart_add_part(&parts[curr_part++], "linux",
+ trx->offset + header.offset[i], 0);
+ i++;
+ }
+
+ if (header.offset[i]) {
+ size_t offset = trx->offset + header.offset[i];
+ const char *name = bcm47xxpart_trx_data_part_name(master,
+ offset);
+
+ bcm47xxpart_add_part(&parts[curr_part++], name, offset, 0);
+ i++;
+ }
+
+ /*
+ * Assume that every partition ends at the beginning of the one it is
+ * followed by.
+ */
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset :
+ trx->offset + trx->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ }
+
+ return curr_part;
+}
+
+/**
+ * bcm47xxpart_bootpartition - gets index of TRX partition used by bootloader
+ *
+ * Some devices may have more than one TRX partition. In such case one of them
+ * is the main one and another a failsafe one. Bootloader may fallback to the
+ * failsafe firmware if it detects corruption of the main image.
+ *
+ * This function provides info about currently used TRX partition. It's the one
+ * containing kernel started by the bootloader.
+ */
+static int bcm47xxpart_bootpartition(void)
+{
+ char buf[4];
+ int bootpartition;
+
+ /* Check CFE environment variable */
+ if (bcm47xx_nvram_getenv("bootpartition", buf, sizeof(buf)) > 0) {
+ if (!kstrtoint(buf, 0, &bootpartition))
+ return bootpartition;
+ }
+
+ return 0;
+}
+
static int bcm47xxpart_parse(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
@@ -93,9 +179,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
size_t bytes_read;
uint32_t offset;
uint32_t blocksize = master->erasesize;
- struct trx_header *trx;
- int trx_part = -1;
- int last_trx_part = -1;
+ int trx_parts[2]; /* Array with indexes of TRX partitions */
+ int trx_num = 0; /* Number of found TRX partitions */
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
int err;
@@ -182,54 +267,18 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* TRX */
if (buf[0x000 / 4] == TRX_MAGIC) {
- if (BCM47XXPART_MAX_PARTS - curr_part < 4) {
- pr_warn("Not enough partitions left to register trx, scanning stopped!\n");
- break;
- }
-
- trx = (struct trx_header *)buf;
+ struct trx_header *trx;
- trx_part = curr_part;
+ if (trx_num >= ARRAY_SIZE(trx_parts))
+ pr_warn("No enough space to store another TRX found at 0x%X\n",
+ offset);
+ else
+ trx_parts[trx_num++] = curr_part;
bcm47xxpart_add_part(&parts[curr_part++], "firmware",
offset, 0);
- i = 0;
- /* We have LZMA loader if offset[2] points to sth */
- if (trx->offset[2]) {
- bcm47xxpart_add_part(&parts[curr_part++],
- "loader",
- offset + trx->offset[i],
- 0);
- i++;
- }
-
- if (trx->offset[i]) {
- bcm47xxpart_add_part(&parts[curr_part++],
- "linux",
- offset + trx->offset[i],
- 0);
- i++;
- }
-
- /*
- * Pure rootfs size is known and can be calculated as:
- * trx->length - trx->offset[i]. We don't fill it as
- * we want to have jffs2 (overlay) in the same mtd.
- */
- if (trx->offset[i]) {
- const char *name;
-
- name = bcm47xxpart_trx_data_part_name(master, offset + trx->offset[i]);
- bcm47xxpart_add_part(&parts[curr_part++],
- name,
- offset + trx->offset[i],
- 0);
- i++;
- }
-
- last_trx_part = curr_part - 1;
-
/* Jump to the end of TRX */
+ trx = (struct trx_header *)buf;
offset = roundup(offset + trx->length, blocksize);
/* Next loop iteration will increase the offset */
offset -= blocksize;
@@ -307,9 +356,23 @@ static int bcm47xxpart_parse(struct mtd_info *master,
parts[i + 1].offset : master->size;
parts[i].size = next_part_offset - parts[i].offset;
- if (i == last_trx_part && trx_part >= 0)
- parts[trx_part].size = next_part_offset -
- parts[trx_part].offset;
+ }
+
+ /* If there was TRX parse it now */
+ for (i = 0; i < trx_num; i++) {
+ struct mtd_partition *trx = &parts[trx_parts[i]];
+
+ if (i == bcm47xxpart_bootpartition()) {
+ int num_parts;
+
+ num_parts = bcm47xxpart_parse_trx(master, trx,
+ parts + curr_part,
+ BCM47XXPART_MAX_PARTS - curr_part);
+ if (num_parts > 0)
+ curr_part += num_parts;
+ } else {
+ trx->name = "failsafe";
+ }
}
*pparts = parts;
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 514be04c0b6c..e2bd81817df4 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -105,15 +105,33 @@ static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct bcm47xxsflash *b47s = mtd->priv;
+ size_t orig_len = len;
/* Check address range */
if ((from + len) > mtd->size)
return -EINVAL;
- memcpy_fromio(buf, b47s->window + from, len);
- *retlen = len;
+ /* Read as much as possible using fast MMIO window */
+ if (from < BCM47XXSFLASH_WINDOW_SZ) {
+ size_t memcpy_len;
- return len;
+ memcpy_len = min(len, (size_t)(BCM47XXSFLASH_WINDOW_SZ - from));
+ memcpy_fromio(buf, b47s->window + from, memcpy_len);
+ from += memcpy_len;
+ len -= memcpy_len;
+ buf += memcpy_len;
+ }
+
+ /* Use indirect access for content out of the window */
+ for (; len; len--) {
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, from++);
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_READ4B);
+ *buf++ = b47s->cc_read(b47s, BCMA_CC_FLASHDATA);
+ }
+
+ *retlen = orig_len;
+
+ return orig_len;
}
static int bcm47xxsflash_write_st(struct mtd_info *mtd, u32 offset, size_t len,
@@ -284,7 +302,6 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL);
if (!b47s)
return -ENOMEM;
- sflash->priv = b47s;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -334,6 +351,8 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
b47s->size = sflash->size;
bcm47xxsflash_fill_mtd(b47s, &pdev->dev);
+ platform_set_drvdata(pdev, b47s);
+
err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
@@ -349,8 +368,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
{
- struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
- struct bcm47xxsflash *b47s = sflash->priv;
+ struct bcm47xxsflash *b47s = platform_get_drvdata(pdev);
mtd_device_unregister(&b47s->mtd);
iounmap(b47s->window);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
index 1564b62b412e..b2d7b38f75fd 100644
--- a/drivers/mtd/devices/bcm47xxsflash.h
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -3,6 +3,8 @@
#include <linux/mtd/mtd.h>
+#define BCM47XXSFLASH_WINDOW_SZ SZ_16M
+
/* Used for ST flashes only. */
#define OPCODE_ST_WREN 0x0006 /* Write Enable */
#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */
@@ -16,6 +18,7 @@
#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */
#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */
#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */
+#define OPCODE_ST_READ4B 0x6313 /* Read Data Bytes in 4Byte addressing mode */
/* Used for Atmel flashes only. */
#define OPCODE_AT_READ 0x07e8
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 9cf7fcd28034..c4df3b1bded0 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -172,7 +172,8 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
t[1].rx_buf = buf;
t[1].rx_nbits = m25p80_rx_nbits(nor);
- t[1].len = min(len, spi_max_transfer_size(spi));
+ t[1].len = min3(len, spi_max_transfer_size(spi),
+ spi_max_message_size(spi) - t[0].len);
spi_message_add_tail(&t[1], &m);
ret = spi_sync(spi, &m);
@@ -288,7 +289,6 @@ static const struct spi_device_id m25p_ids[] = {
* should be kept for backward compatibility.
*/
{"at25df321a"}, {"at25df641"}, {"at26df081a"},
- {"mr25h256"},
{"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
{"mx25l25635e"},{"mx66l51235l"},
{"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
@@ -305,6 +305,11 @@ static const struct spi_device_id m25p_ids[] = {
{"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
+ /* Everspin MRAMs (non-JEDEC) */
+ { "mr25h256" }, /* 256 Kib, 40 MHz */
+ { "mr25h10" }, /* 1 Mib, 40 MHz */
+ { "mr25h40" }, /* 4 Mib, 40 MHz */
+
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
index f59a125295d0..8b81e15105dd 100644
--- a/drivers/mtd/devices/serial_flash_cmds.h
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -18,19 +18,12 @@
#define SPINOR_OP_RDVCR 0x85
/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
-#define SPINOR_OP_READ_1_2_2 0xbb /* DUAL I/O READ */
-#define SPINOR_OP_READ_1_4_4 0xeb /* QUAD I/O READ */
-
#define SPINOR_OP_WRITE 0x02 /* PAGE PROGRAM */
#define SPINOR_OP_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
#define SPINOR_OP_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
#define SPINOR_OP_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
#define SPINOR_OP_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
-/* READ commands with 32-bit addressing */
-#define SPINOR_OP_READ4_1_2_2 0xbc
-#define SPINOR_OP_READ4_1_4_4 0xec
-
/* Configuration flags */
#define FLASH_FLAG_SINGLE 0x000000ff
#define FLASH_FLAG_READ_WRITE 0x00000001
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 5454b4113589..804313a33f2b 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -507,13 +507,13 @@ static struct seq_rw_config n25q_read3_configs[] = {
* - 'FAST' variants configured for 8 dummy cycles (see note above.)
*/
static struct seq_rw_config n25q_read4_configs[] = {
- {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
- {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
- {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST_4B, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ_4B, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
/*
@@ -553,13 +553,13 @@ static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
* entering a state that is incompatible with the SPIBoot Controller.
*/
static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
- {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4},
- {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
- {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0},
- {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
- {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
- {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
- {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST_4B, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ_4B, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5bcc896a48c3..542fdf8e81fa 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -75,7 +75,7 @@ config MTD_PHYSMAP_OF
taken from OF device tree.
config MTD_PHYSMAP_OF_VERSATILE
- bool "Support ARM Versatile physmap OF"
+ bool "ARM Versatile OF-based physical memory map handling"
depends on MTD_PHYSMAP_OF
depends on MFD_SYSCON
default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || ARCH_REALVIEW)
@@ -84,6 +84,16 @@ config MTD_PHYSMAP_OF_VERSATILE
platforms, basically to add a VPP (write protection) callback so
the flash can be taken out of write protection.
+config MTD_PHYSMAP_OF_GEMINI
+ bool "Cortina Gemini OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on MFD_SYSCON
+ default ARCH_GEMINI
+ help
+ This provides some extra DT physmap parsing for the Gemini
+ platforms, some detection and setting up parallel mode on the
+ external interface.
+
config MTD_PMC_MSP_EVM
tristate "CFI Flash device mapped on PMC-Sierra MSP"
depends on PMC_MSP && MTD_CFI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 644f7d36d35d..aef1846b4de2 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -17,10 +17,13 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
-obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE
-obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of_versatile.o
+physmap_of-objs += physmap_of_versatile.o
+endif
+ifdef CONFIG_MTD_PHYSMAP_OF_GEMINI
+physmap_of-objs += physmap_of_gemini.o
endif
+obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index e17d02ae03f0..976d42f63aef 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -57,10 +57,12 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
{
struct ichxrom_map_info *map, *scratch;
u16 word;
+ int ret;
/* Disable writes through the rom window */
- pci_read_config_word(window->pdev, BIOS_CNTL, &word);
- pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+ ret = pci_read_config_word(window->pdev, BIOS_CNTL, &word);
+ if (!ret)
+ pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
pci_dev_put(window->pdev);
/* Free all of the mtd devices */
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index c8febb326fa6..3e33ab66eb24 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#include <linux/err.h>
@@ -209,5 +209,5 @@ static struct platform_driver ltq_mtd_driver = {
module_platform_driver(ltq_mtd_driver);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
MODULE_DESCRIPTION("Lantiq SoC NOR");
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 3fad35942895..14e8909c9955 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -24,6 +24,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include "physmap_of_gemini.h"
#include "physmap_of_versatile.h"
struct of_flash_list {
@@ -241,11 +242,13 @@ static int of_flash_probe(struct platform_device *dev)
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
info->list[i].map.device_node = dp;
+
+ err = of_flash_probe_gemini(dev, dp, &info->list[i].map);
+ if (err)
+ return err;
err = of_flash_probe_versatile(dev, dp, &info->list[i].map);
- if (err) {
- dev_err(&dev->dev, "Can't probe Versatile VPP\n");
+ if (err)
return err;
- }
err = -ENOMEM;
info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap_of_gemini.c
new file mode 100644
index 000000000000..9d371cd728ea
--- /dev/null
+++ b/drivers/mtd/maps/physmap_of_gemini.c
@@ -0,0 +1,117 @@
+/*
+ * Cortina Systems Gemini OF physmap add-on
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This SoC has an elaborate flash control register, so we need to
+ * detect and set it up when booting on this platform.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include "physmap_of_gemini.h"
+
+/*
+ * The Flash-relevant parts of the global status register
+ * These would also be relevant for a NAND driver.
+ */
+#define GLOBAL_STATUS 0x04
+#define FLASH_TYPE_MASK (0x3 << 24)
+#define FLASH_TYPE_NAND_2K (0x3 << 24)
+#define FLASH_TYPE_NAND_512 (0x2 << 24)
+#define FLASH_TYPE_PARALLEL (0x1 << 24)
+#define FLASH_TYPE_SERIAL (0x0 << 24)
+/* if parallel */
+#define FLASH_WIDTH_16BIT (1 << 23) /* else 8 bit */
+/* if serial */
+#define FLASH_ATMEL (1 << 23) /* else STM */
+
+#define FLASH_SIZE_MASK (0x3 << 21)
+#define NAND_256M (0x3 << 21) /* and more */
+#define NAND_128M (0x2 << 21)
+#define NAND_64M (0x1 << 21)
+#define NAND_32M (0x0 << 21)
+#define ATMEL_16M (0x3 << 21) /* and more */
+#define ATMEL_8M (0x2 << 21)
+#define ATMEL_4M_2M (0x1 << 21)
+#define ATMEL_1M (0x0 << 21) /* and less */
+#define STM_32M (1 << 22) /* and more */
+#define STM_16M (0 << 22) /* and less */
+
+#define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */
+
+/* Miscellaneous Control Register */
+#define GLOBAL_MISC_CTRL 0x30
+#define FLASH_PADS_MASK 0x07
+#define NAND_PADS_DISABLE BIT(2)
+#define PFLASH_PADS_DISABLE BIT(1)
+#define SFLASH_PADS_DISABLE BIT(0)
+
+static const struct of_device_id syscon_match[] = {
+ { .compatible = "cortina,gemini-syscon" },
+ { },
+};
+
+int of_flash_probe_gemini(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ static struct regmap *rmap;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ /* Multiplatform guard */
+ if (!of_device_is_compatible(np, "cortina,gemini-flash"))
+ return 0;
+
+ rmap = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(rmap)) {
+ dev_err(dev, "no syscon\n");
+ return PTR_ERR(rmap);
+ }
+
+ ret = regmap_read(rmap, GLOBAL_STATUS, &val);
+ if (ret) {
+ dev_err(dev, "failed to read global status register\n");
+ return -ENODEV;
+ }
+ dev_dbg(dev, "global status reg: %08x\n", val);
+
+ /*
+ * It would be contradictory if a physmap flash was NOT parallel.
+ */
+ if ((val & FLASH_TYPE_MASK) != FLASH_TYPE_PARALLEL) {
+ dev_err(dev, "flash is not parallel\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Complain if DT data and hardware definition is different.
+ */
+ if (val & FLASH_WIDTH_16BIT) {
+ if (map->bankwidth != 2)
+ dev_warn(dev, "flash hardware say flash is 16 bit wide but DT says it is %d bits wide\n",
+ map->bankwidth * 8);
+ } else {
+ if (map->bankwidth != 1)
+ dev_warn(dev, "flash hardware say flash is 8 bit wide but DT says it is %d bits wide\n",
+ map->bankwidth * 8);
+ }
+
+ /* Activate parallel (NOR flash) mode */
+ ret = regmap_update_bits(rmap, GLOBAL_MISC_CTRL,
+ FLASH_PADS_MASK,
+ SFLASH_PADS_DISABLE | NAND_PADS_DISABLE);
+ if (ret) {
+ dev_err(dev, "unable to set up physmap pads\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "initialized Gemini-specific physmap control\n");
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap_of_gemini.h b/drivers/mtd/maps/physmap_of_gemini.h
new file mode 100644
index 000000000000..c675025288dd
--- /dev/null
+++ b/drivers/mtd/maps/physmap_of_gemini.h
@@ -0,0 +1,16 @@
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_OF_GEMINI
+int of_flash_probe_gemini(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_gemini(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/physmap_of_versatile.c b/drivers/mtd/maps/physmap_of_versatile.c
index 0f39b2a015f4..8c6ccded9be8 100644
--- a/drivers/mtd/maps/physmap_of_versatile.c
+++ b/drivers/mtd/maps/physmap_of_versatile.c
@@ -252,4 +252,3 @@ int of_flash_probe_versatile(struct platform_device *pdev,
return 0;
}
-EXPORT_SYMBOL_GPL(of_flash_probe_versatile);
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f9fa3fad728e..2051f28ddac6 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -139,15 +139,13 @@ static int __init init_msp_flash(void)
}
msp_maps[i].bankwidth = 1;
- msp_maps[i].name = kmalloc(7, GFP_KERNEL);
+ msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
if (!msp_maps[i].name) {
iounmap(msp_maps[i].virt);
kfree(msp_parts[i]);
goto cleanup_loop;
}
- msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
-
for (j = 0; j < pcnt; j++) {
part_name[5] = '0' + i;
part_name[7] = '0' + j;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index df8a5ef334c0..6b8d5cd7dbf6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -84,9 +84,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = bio_data(req->bio);
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
if (req_op(req) == REQ_OP_FLUSH)
return tr->flush(dev);
@@ -94,16 +91,16 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
get_capacity(req->rq_disk))
return -EIO;
- if (req_op(req) == REQ_OP_DISCARD)
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
return tr->discard(dev, block, nsect);
-
- if (rq_data_dir(req) == READ) {
+ case REQ_OP_READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
return -EIO;
rq_flush_dcache_pages(req);
return 0;
- } else {
+ case REQ_OP_WRITE:
if (!tr->writesect)
return -EIO;
@@ -112,6 +109,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
+ default:
+ return -EIO;
}
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index ce5ccc573a9c..3568294d4854 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -451,7 +451,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
* data. For our userspace tools it is important to dump areas
* with ECC errors!
* For kernel internal usage it also might return -EUCLEAN
- * to signal the caller that a bitflip has occured and has
+ * to signal the caller that a bitflip has occurred and has
* been corrected by the ECC algorithm.
*
* Note: currently the standard NAND function, nand_read_oob_std,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 052772f7caef..66a9dedd1062 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1128,7 +1128,7 @@ EXPORT_SYMBOL_GPL(mtd_write_oob);
* @oobecc: OOB region struct filled with the appropriate ECC position
* information
*
- * This functions return ECC section information in the OOB area. I you want
+ * This function returns ECC section information in the OOB area. If you want
* to get all the ECC bytes information, then you should call
* mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
*
@@ -1160,7 +1160,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
* @oobfree: OOB region struct filled with the appropriate free position
* information
*
- * This functions return free bytes position in the OOB area. I you want
+ * This function returns free bytes position in the OOB area. If you want
* to get all the free bytes information, then you should call
* mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
*
@@ -1190,7 +1190,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
* @iter: iterator function. Should be either mtd_ooblayout_free or
* mtd_ooblayout_ecc depending on the region type you're searching for
*
- * This functions returns the section id and oobregion information of a
+ * This function returns the section id and oobregion information of a
* specific byte. For example, say you want to know where the 4th ECC byte is
* stored, you'll use:
*
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index fccdd49bb964..ea5e5307f667 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -349,6 +349,14 @@ static const struct mtd_ooblayout_ops part_ooblayout_ops = {
.free = part_ooblayout_free,
};
+static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct mtd_part *part = mtd_to_part(mtd);
+
+ return part->master->_max_bad_blocks(part->master,
+ ofs + part->offset, len);
+}
+
static inline void free_partition(struct mtd_part *p)
{
kfree(p->mtd.name);
@@ -424,6 +432,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
&master->dev :
master->dev.parent;
+ slave->mtd.dev.of_node = part->of_node;
slave->mtd._read = part_read;
slave->mtd._write = part_write;
@@ -475,6 +484,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd._block_isbad = part_block_isbad;
if (master->_block_markbad)
slave->mtd._block_markbad = part_block_markbad;
+ if (master->_max_bad_blocks)
+ slave->mtd._max_bad_blocks = part_max_bad_blocks;
if (master->_get_device)
slave->mtd._get_device = part_get_device;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 353a9ddf6b97..6d4d5672d1d8 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -426,6 +426,8 @@ config MTD_NAND_ORION
config MTD_NAND_OXNAS
tristate "NAND Flash support for Oxford Semiconductor SoC"
+ depends on ARCH_OXNAS || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the NAND flash controller on Oxford Semiconductor SoCs.
@@ -534,13 +536,14 @@ config MTD_NAND_JZ4780
config MTD_NAND_FSMC
tristate "Support for NAND on ST Micros FSMC"
+ depends on OF
depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
help
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
config MTD_NAND_XWAY
- tristate "Support for NAND on Lantiq XWAY SoC"
+ bool "Support for NAND on Lantiq XWAY SoC"
depends on LANTIQ && SOC_TYPE_XWAY
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 0a177b1bfe3e..d1570f512f0b 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -258,9 +258,15 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int bufnum = nctrl->page & priv->bufnum_mask;
int sector = bufnum * chip->ecc.steps;
int sector_end = sector + chip->ecc.steps - 1;
+ __be32 *eccstat_regs;
+
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ eccstat_regs = ifc->ifc_nand.v2_nand_eccstat;
+ else
+ eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
for (i = sector / 4; i <= sector_end / 4; i++)
- eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
+ eccstat[i] = ifc_in32(&eccstat_regs[i]);
for (i = sector; i <= sector_end; i++) {
errors = check_read_ecc(mtd, ctrl, eccstat, i);
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 4924b43977ef..bda1e4667138 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -35,10 +35,133 @@
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/mtd/fsmc.h>
#include <linux/amba/bus.h>
#include <mtd/mtd-abi.h>
+#define FSMC_NAND_BW8 1
+#define FSMC_NAND_BW16 2
+
+#define FSMC_MAX_NOR_BANKS 4
+#define FSMC_MAX_NAND_BANKS 4
+
+#define FSMC_FLASH_WIDTH8 1
+#define FSMC_FLASH_WIDTH16 2
+
+/* fsmc controller registers for NOR flash */
+#define CTRL 0x0
+ /* ctrl register definitions */
+ #define BANK_ENABLE (1 << 0)
+ #define MUXED (1 << 1)
+ #define NOR_DEV (2 << 2)
+ #define WIDTH_8 (0 << 4)
+ #define WIDTH_16 (1 << 4)
+ #define RSTPWRDWN (1 << 6)
+ #define WPROT (1 << 7)
+ #define WRT_ENABLE (1 << 12)
+ #define WAIT_ENB (1 << 13)
+
+#define CTRL_TIM 0x4
+ /* ctrl_tim register definitions */
+
+#define FSMC_NOR_BANK_SZ 0x8
+#define FSMC_NOR_REG_SIZE 0x40
+
+#define FSMC_NOR_REG(base, bank, reg) (base + \
+ FSMC_NOR_BANK_SZ * (bank) + \
+ reg)
+
+/* fsmc controller registers for NAND flash */
+#define PC 0x00
+ /* pc register definitions */
+ #define FSMC_RESET (1 << 0)
+ #define FSMC_WAITON (1 << 1)
+ #define FSMC_ENABLE (1 << 2)
+ #define FSMC_DEVTYPE_NAND (1 << 3)
+ #define FSMC_DEVWID_8 (0 << 4)
+ #define FSMC_DEVWID_16 (1 << 4)
+ #define FSMC_ECCEN (1 << 6)
+ #define FSMC_ECCPLEN_512 (0 << 7)
+ #define FSMC_ECCPLEN_256 (1 << 7)
+ #define FSMC_TCLR_1 (1)
+ #define FSMC_TCLR_SHIFT (9)
+ #define FSMC_TCLR_MASK (0xF)
+ #define FSMC_TAR_1 (1)
+ #define FSMC_TAR_SHIFT (13)
+ #define FSMC_TAR_MASK (0xF)
+#define STS 0x04
+ /* sts register definitions */
+ #define FSMC_CODE_RDY (1 << 15)
+#define COMM 0x08
+ /* comm register definitions */
+ #define FSMC_TSET_0 0
+ #define FSMC_TSET_SHIFT 0
+ #define FSMC_TSET_MASK 0xFF
+ #define FSMC_TWAIT_6 6
+ #define FSMC_TWAIT_SHIFT 8
+ #define FSMC_TWAIT_MASK 0xFF
+ #define FSMC_THOLD_4 4
+ #define FSMC_THOLD_SHIFT 16
+ #define FSMC_THOLD_MASK 0xFF
+ #define FSMC_THIZ_1 1
+ #define FSMC_THIZ_SHIFT 24
+ #define FSMC_THIZ_MASK 0xFF
+#define ATTRIB 0x0C
+#define IOATA 0x10
+#define ECC1 0x14
+#define ECC2 0x18
+#define ECC3 0x1C
+#define FSMC_NAND_BANK_SZ 0x20
+
+#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
+ (FSMC_NAND_BANK_SZ * (bank)) + \
+ reg)
+
+#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+
+struct fsmc_nand_timings {
+ uint8_t tclr;
+ uint8_t tar;
+ uint8_t thiz;
+ uint8_t thold;
+ uint8_t twait;
+ uint8_t tset;
+};
+
+enum access_mode {
+ USE_DMA_ACCESS = 1,
+ USE_WORD_ACCESS,
+};
+
+/**
+ * fsmc_nand_platform_data - platform specific NAND controller config
+ * @nand_timings: timing setup for the physical NAND interface
+ * @partitions: partition table for the platform, use a default fallback
+ * if this is NULL
+ * @nr_partitions: the number of partitions in the previous entry
+ * @options: different options for the driver
+ * @width: bus width
+ * @bank: default bank
+ * @select_bank: callback to select a certain bank, this is
+ * platform-specific. If the controller only supports one bank
+ * this may be set to NULL
+ */
+struct fsmc_nand_platform_data {
+ struct fsmc_nand_timings *nand_timings;
+ struct mtd_partition *partitions;
+ unsigned int nr_partitions;
+ unsigned int options;
+ unsigned int width;
+ unsigned int bank;
+
+ enum access_mode mode;
+
+ void (*select_bank)(uint32_t bank, uint32_t busw);
+
+ /* priv structures for dma accesses */
+ void *read_dma_priv;
+ void *write_dma_priv;
+};
+
static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
@@ -714,7 +837,6 @@ static bool filter(struct dma_chan *chan, void *slave)
return true;
}
-#ifdef CONFIG_OF
static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
@@ -757,13 +879,6 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
}
return 0;
}
-#else
-static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
-{
- return -ENOSYS;
-}
-#endif
/*
* fsmc_nand_probe - Probe function
@@ -782,19 +897,15 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
u32 pid;
int i;
- if (np) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- pdev->dev.platform_data = pdata;
- ret = fsmc_nand_probe_config_dt(pdev, np);
- if (ret) {
- dev_err(&pdev->dev, "no platform data\n");
- return -ENODEV;
- }
- }
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
- if (!pdata) {
- dev_err(&pdev->dev, "platform data is NULL\n");
- return -EINVAL;
+ pdev->dev.platform_data = pdata;
+ ret = fsmc_nand_probe_config_dt(pdev, np);
+ if (ret) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
}
/* Allocate memory for the device structure (and zero it) */
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5553a5d9efd1..846a66c1b133 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
init_completion(&host->comp_controller);
host->irq = platform_get_irq(pdev, 0);
- if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+ if (host->irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
goto err_exit3;
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 53bafe23ab39..a0669a33f8fe 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -797,22 +797,17 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
struct resource *rc;
int res;
- rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (rc == NULL) {
- dev_err(&pdev->dev, "No memory resource found for device\n");
- return -EBUSY;
- }
-
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
- host->io_base_dma = rc->start;
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
+ host->io_base_dma = rc->start;
if (pdev->dev.of_node)
host->ncfg = lpc32xx_parse_dt(&pdev->dev);
if (!host->ncfg) {
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index 6c3eed3c2094..6c517c682939 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -1383,7 +1383,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
nfc->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(nfc->regs)) {
ret = PTR_ERR(nfc->regs);
- dev_err(dev, "no nfi base\n");
goto release_ecc;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ec1c28aaaf23..1492c12906f6 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3263,6 +3263,42 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/**
+ * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: length of mtd
+ */
+static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u32 part_start_block;
+ u32 part_end_block;
+ u32 part_start_die;
+ u32 part_end_die;
+
+ /*
+ * max_bb_per_die and blocks_per_die used to determine
+ * the maximum bad block count.
+ */
+ if (!chip->max_bb_per_die || !chip->blocks_per_die)
+ return -ENOTSUPP;
+
+ /* Get the start and end of the partition in erase blocks. */
+ part_start_block = mtd_div_by_eb(ofs, mtd);
+ part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
+
+ /* Get the start and end LUNs of the partition. */
+ part_start_die = part_start_block / chip->blocks_per_die;
+ part_end_die = part_end_block / chip->blocks_per_die;
+
+ /*
+ * Look up the bad blocks per unit and multiply by the number of units
+ * that the partition spans.
+ */
+ return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
+}
+
+/**
* nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
* @mtd: MTD device structure
* @chip: nand chip info structure
@@ -3592,6 +3628,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
chip->bits_per_cell = p->bits_per_cell;
+ chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
+ chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
+
if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
*busw = NAND_BUSWIDTH_16;
else
@@ -4815,6 +4854,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_block_isreserved = nand_block_isreserved;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
+ mtd->_max_bad_blocks = nand_max_bad_blocks;
mtd->writebufsize = mtd->writesize;
/*
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index b3a332f37e14..4a2f75b0c200 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -185,6 +185,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_SANDISK, "SanDisk"},
{NAND_MFR_INTEL, "Intel"},
{NAND_MFR_ATO, "ATO"},
+ {NAND_MFR_WINBOND, "Winbond"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index e40482a65de6..0eeeb8b889ea 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -321,6 +321,10 @@ static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
writel(0, nfc->regs + NFC_REG_INT);
} else {
@@ -518,6 +522,8 @@ static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
u32 tmp;
while (len > offs) {
+ bool poll = false;
+
cnt = min(len - offs, NFC_SRAM_SIZE);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
@@ -528,7 +534,11 @@ static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
writel(tmp, nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ /* Arbitrary limit for polling mode */
+ if (cnt < 64)
+ poll = true;
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
if (ret)
break;
@@ -551,6 +561,8 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
u32 tmp;
while (len > offs) {
+ bool poll = false;
+
cnt = min(len - offs, NFC_SRAM_SIZE);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
@@ -563,7 +575,11 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
NFC_ACCESS_DIR;
writel(tmp, nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ /* Arbitrary limit for polling mode */
+ if (cnt < 64)
+ poll = true;
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
if (ret)
break;
@@ -588,10 +604,6 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
- ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
- if (ret)
- return;
-
if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
!(ctrl & (NAND_CLE | NAND_ALE))) {
u32 cmd = 0;
@@ -621,6 +633,10 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
writel(sunxi_nand->addr[1],
nfc->regs + NFC_REG_ADDR_HIGH);
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return;
+
writel(cmd, nfc->regs + NFC_REG_CMD);
sunxi_nand->addr[0] = 0;
sunxi_nand->addr[1] = 0;
@@ -957,7 +973,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
sunxi_nfc_randomizer_disable(mtd);
if (ret)
return ret;
@@ -1069,7 +1085,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
if (ret)
dmaengine_terminate_all(nfc->dmac);
@@ -1189,7 +1205,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
NFC_ACCESS_DIR | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
sunxi_nfc_randomizer_disable(mtd);
if (ret)
return ret;
@@ -1428,7 +1444,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
NFC_DATA_TRANS | NFC_ACCESS_DIR,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
if (ret)
dmaengine_terminate_all(nfc->dmac);
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 28c7f474be77..4a5e948c62df 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev)
if (IS_ERR(nfc->pbus_base))
return PTR_ERR(nfc->pbus_base);
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox");
+ nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
if (IS_ERR(nfc->chan))
return PTR_ERR(nfc->chan);
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 1f2948c0c458..ddee4005248c 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright © 2012 John Crispin <blogic@openwrt.org>
+ * Copyright © 2012 John Crispin <john@phrozen.org>
* Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de>
*/
@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
{ .compatible = "lantiq,nand-xway" },
{},
};
-MODULE_DEVICE_TABLE(of, xway_nand_match);
static struct platform_driver xway_nand_driver = {
.probe = xway_nand_probe,
@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
},
};
-module_platform_driver(xway_nand_driver);
-
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xway_nand_driver);
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index ede407d6e106..464470122493 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -108,6 +108,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,
parts[i].offset = of_read_number(reg, a_cells);
parts[i].size = of_read_number(reg + a_cells, s_cells);
+ parts[i].of_node = pp;
partname = of_get_property(pp, "label", &len);
if (!partname)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 4a682ee0f632..7252087ef407 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -29,6 +29,16 @@ config MTD_SPI_NOR_USE_4K_SECTORS
Please note that some tools/drivers/filesystems may not work with
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+config SPI_ASPEED_SMC
+ tristate "Aspeed flash controllers in SPI mode"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ This enables support for the Firmware Memory controller (FMC)
+ in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
+ and support for the SPI flash memory controller (SPI) for
+ the host firmware. The implementation only supports SPI NOR.
+
config SPI_ATMEL_QUADSPI
tristate "Atmel Quad SPI Controller"
depends on ARCH_AT91 || (ARM && COMPILE_TEST)
@@ -40,7 +50,7 @@ config SPI_ATMEL_QUADSPI
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
- depends on OF && ARM
+ depends on OF && (ARM || COMPILE_TEST)
help
Enable support for the Cadence Quad SPI Flash controller.
@@ -76,4 +86,24 @@ config SPI_NXP_SPIFI
Flash. Enable this option if you have a device with a SPIFI
controller and want to access the Flash as a mtd device.
+config SPI_INTEL_SPI
+ tristate
+
+config SPI_INTEL_SPI_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
+ depends on X86
+ select SPI_INTEL_SPI
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-platform.
+
endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 121695e83542..72238a793198 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,7 +1,10 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
+obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
+obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
new file mode 100644
index 000000000000..56051d30f000
--- /dev/null
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -0,0 +1,754 @@
+/*
+ * ASPEED Static Memory Controller driver
+ *
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+
+#define DEVICE_NAME "aspeed-smc"
+
+/*
+ * The driver only support SPI flash
+ */
+enum aspeed_smc_flash_type {
+ smc_type_nor = 0,
+ smc_type_nand = 1,
+ smc_type_spi = 2,
+};
+
+struct aspeed_smc_chip;
+
+struct aspeed_smc_info {
+ u32 maxsize; /* maximum size of chip window */
+ u8 nce; /* number of chip enables */
+ bool hastype; /* flash type field exists in config reg */
+ u8 we0; /* shift for write enable bit for CE0 */
+ u8 ctl0; /* offset in regs of ctl for CE0 */
+
+ void (*set_4b)(struct aspeed_smc_chip *chip);
+};
+
+static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip);
+static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip);
+
+static const struct aspeed_smc_info fmc_2400_info = {
+ .maxsize = 64 * 1024 * 1024,
+ .nce = 5,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .set_4b = aspeed_smc_chip_set_4b,
+};
+
+static const struct aspeed_smc_info spi_2400_info = {
+ .maxsize = 64 * 1024 * 1024,
+ .nce = 1,
+ .hastype = false,
+ .we0 = 0,
+ .ctl0 = 0x04,
+ .set_4b = aspeed_smc_chip_set_4b_spi_2400,
+};
+
+static const struct aspeed_smc_info fmc_2500_info = {
+ .maxsize = 256 * 1024 * 1024,
+ .nce = 3,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .set_4b = aspeed_smc_chip_set_4b,
+};
+
+static const struct aspeed_smc_info spi_2500_info = {
+ .maxsize = 128 * 1024 * 1024,
+ .nce = 2,
+ .hastype = false,
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .set_4b = aspeed_smc_chip_set_4b,
+};
+
+enum aspeed_smc_ctl_reg_value {
+ smc_base, /* base value without mode for other commands */
+ smc_read, /* command reg for (maybe fast) reads */
+ smc_write, /* command reg for writes */
+ smc_max,
+};
+
+struct aspeed_smc_controller;
+
+struct aspeed_smc_chip {
+ int cs;
+ struct aspeed_smc_controller *controller;
+ void __iomem *ctl; /* control register */
+ void __iomem *ahb_base; /* base of chip window */
+ u32 ctl_val[smc_max]; /* control settings */
+ enum aspeed_smc_flash_type type; /* what type of flash */
+ struct spi_nor nor;
+};
+
+struct aspeed_smc_controller {
+ struct device *dev;
+
+ struct mutex mutex; /* controller access mutex */
+ const struct aspeed_smc_info *info; /* type info of controller */
+ void __iomem *regs; /* controller registers */
+ void __iomem *ahb_base; /* per-chip windows resource */
+
+ struct aspeed_smc_chip *chips[0]; /* pointers to attached chips */
+};
+
+/*
+ * SPI Flash Configuration Register (AST2500 SPI)
+ * or
+ * Type setting Register (AST2500 FMC).
+ * CE0 and CE1 can only be of type SPI. CE2 can be of type NOR but the
+ * driver does not support it.
+ */
+#define CONFIG_REG 0x0
+#define CONFIG_DISABLE_LEGACY BIT(31) /* 1 */
+
+#define CONFIG_CE2_WRITE BIT(18)
+#define CONFIG_CE1_WRITE BIT(17)
+#define CONFIG_CE0_WRITE BIT(16)
+
+#define CONFIG_CE2_TYPE BIT(4) /* AST2500 FMC only */
+#define CONFIG_CE1_TYPE BIT(2) /* AST2500 FMC only */
+#define CONFIG_CE0_TYPE BIT(0) /* AST2500 FMC only */
+
+/*
+ * CE Control Register
+ */
+#define CE_CONTROL_REG 0x4
+
+/*
+ * CEx Control Register
+ */
+#define CONTROL_AAF_MODE BIT(31)
+#define CONTROL_IO_MODE_MASK GENMASK(30, 28)
+#define CONTROL_IO_DUAL_DATA BIT(29)
+#define CONTROL_IO_DUAL_ADDR_DATA (BIT(29) | BIT(28))
+#define CONTROL_IO_QUAD_DATA BIT(30)
+#define CONTROL_IO_QUAD_ADDR_DATA (BIT(30) | BIT(28))
+#define CONTROL_CE_INACTIVE_SHIFT 24
+#define CONTROL_CE_INACTIVE_MASK GENMASK(27, \
+ CONTROL_CE_INACTIVE_SHIFT)
+/* 0 = 16T ... 15 = 1T T=HCLK */
+#define CONTROL_COMMAND_SHIFT 16
+#define CONTROL_DUMMY_COMMAND_OUT BIT(15)
+#define CONTROL_IO_DUMMY_HI BIT(14)
+#define CONTROL_IO_DUMMY_HI_SHIFT 14
+#define CONTROL_CLK_DIV4 BIT(13) /* others */
+#define CONTROL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI */
+#define CONTROL_RW_MERGE BIT(12)
+#define CONTROL_IO_DUMMY_LO_SHIFT 6
+#define CONTROL_IO_DUMMY_LO GENMASK(7, \
+ CONTROL_IO_DUMMY_LO_SHIFT)
+#define CONTROL_IO_DUMMY_MASK (CONTROL_IO_DUMMY_HI | \
+ CONTROL_IO_DUMMY_LO)
+#define CONTROL_IO_DUMMY_SET(dummy) \
+ (((((dummy) >> 2) & 0x1) << CONTROL_IO_DUMMY_HI_SHIFT) | \
+ (((dummy) & 0x3) << CONTROL_IO_DUMMY_LO_SHIFT))
+
+#define CONTROL_CLOCK_FREQ_SEL_SHIFT 8
+#define CONTROL_CLOCK_FREQ_SEL_MASK GENMASK(11, \
+ CONTROL_CLOCK_FREQ_SEL_SHIFT)
+#define CONTROL_LSB_FIRST BIT(5)
+#define CONTROL_CLOCK_MODE_3 BIT(4)
+#define CONTROL_IN_DUAL_DATA BIT(3)
+#define CONTROL_CE_STOP_ACTIVE_CONTROL BIT(2)
+#define CONTROL_COMMAND_MODE_MASK GENMASK(1, 0)
+#define CONTROL_COMMAND_MODE_NORMAL 0
+#define CONTROL_COMMAND_MODE_FREAD 1
+#define CONTROL_COMMAND_MODE_WRITE 2
+#define CONTROL_COMMAND_MODE_USER 3
+
+#define CONTROL_KEEP_MASK \
+ (CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \
+ CONTROL_IO_DUMMY_MASK | CONTROL_CLOCK_FREQ_SEL_MASK | \
+ CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3)
+
+/*
+ * The Segment Register uses a 8MB unit to encode the start address
+ * and the end address of the mapping window of a flash SPI slave :
+ *
+ * | byte 1 | byte 2 | byte 3 | byte 4 |
+ * +--------+--------+--------+--------+
+ * | end | start | 0 | 0 |
+ */
+#define SEGMENT_ADDR_REG0 0x30
+#define SEGMENT_ADDR_START(_r) ((((_r) >> 16) & 0xFF) << 23)
+#define SEGMENT_ADDR_END(_r) ((((_r) >> 24) & 0xFF) << 23)
+
+/*
+ * In user mode all data bytes read or written to the chip decode address
+ * range are transferred to or from the SPI bus. The range is treated as a
+ * fifo of arbitratry 1, 2, or 4 byte width but each write has to be aligned
+ * to its size. The address within the multiple 8kB range is ignored when
+ * sending bytes to the SPI bus.
+ *
+ * On the arm architecture, as of Linux version 4.3, memcpy_fromio and
+ * memcpy_toio on little endian targets use the optimized memcpy routines
+ * that were designed for well behavied memory storage. These routines
+ * have a stutter if the source and destination are not both word aligned,
+ * once with a duplicate access to the source after aligning to the
+ * destination to a word boundary, and again with a duplicate access to
+ * the source when the final byte count is not word aligned.
+ *
+ * When writing or reading the fifo this stutter discards data or sends
+ * too much data to the fifo and can not be used by this driver.
+ *
+ * While the low level io string routines that implement the insl family do
+ * the desired accesses and memory increments, the cross architecture io
+ * macros make them essentially impossible to use on a memory mapped address
+ * instead of a a token from the call to iomap of an io port.
+ *
+ * These fifo routines use readl and friends to a constant io port and update
+ * the memory buffer pointer and count via explicit code. The final updates
+ * to len are optimistically suppressed.
+ */
+static int aspeed_smc_read_from_ahb(void *buf, void __iomem *src, size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ ioread32_rep(src, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ ioread8_rep(src, (u8 *)buf + offset, len);
+ return 0;
+}
+
+static int aspeed_smc_write_to_ahb(void __iomem *dst, const void *buf,
+ size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ iowrite32_rep(dst, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ iowrite8_rep(dst, (const u8 *)buf + offset, len);
+ return 0;
+}
+
+static inline u32 aspeed_smc_chip_write_bit(struct aspeed_smc_chip *chip)
+{
+ return BIT(chip->controller->info->we0 + chip->cs);
+}
+
+static void aspeed_smc_chip_check_config(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ reg = readl(controller->regs + CONFIG_REG);
+
+ if (reg & aspeed_smc_chip_write_bit(chip))
+ return;
+
+ dev_dbg(controller->dev, "config write is not set ! @%p: 0x%08x\n",
+ controller->regs + CONFIG_REG, reg);
+ reg |= aspeed_smc_chip_write_bit(chip);
+ writel(reg, controller->regs + CONFIG_REG);
+}
+
+static void aspeed_smc_start_user(struct spi_nor *nor)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+ u32 ctl = chip->ctl_val[smc_base];
+
+ /*
+ * When the chip is controlled in user mode, we need write
+ * access to send the opcodes to it. So check the config.
+ */
+ aspeed_smc_chip_check_config(chip);
+
+ ctl |= CONTROL_COMMAND_MODE_USER |
+ CONTROL_CE_STOP_ACTIVE_CONTROL;
+ writel(ctl, chip->ctl);
+
+ ctl &= ~CONTROL_CE_STOP_ACTIVE_CONTROL;
+ writel(ctl, chip->ctl);
+}
+
+static void aspeed_smc_stop_user(struct spi_nor *nor)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ u32 ctl = chip->ctl_val[smc_read];
+ u32 ctl2 = ctl | CONTROL_COMMAND_MODE_USER |
+ CONTROL_CE_STOP_ACTIVE_CONTROL;
+
+ writel(ctl2, chip->ctl); /* stop user CE control */
+ writel(ctl, chip->ctl); /* default to fread or read mode */
+}
+
+static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ mutex_lock(&chip->controller->mutex);
+ return 0;
+}
+
+static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ mutex_unlock(&chip->controller->mutex);
+}
+
+static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1);
+ aspeed_smc_read_from_ahb(buf, chip->ahb_base, len);
+ aspeed_smc_stop_user(nor);
+ return 0;
+}
+
+static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1);
+ aspeed_smc_write_to_ahb(chip->ahb_base, buf, len);
+ aspeed_smc_stop_user(nor);
+ return 0;
+}
+
+static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+ __be32 temp;
+ u32 cmdaddr;
+
+ switch (nor->addr_width) {
+ default:
+ WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n",
+ nor->addr_width);
+ /* FALLTHROUGH */
+ case 3:
+ cmdaddr = addr & 0xFFFFFF;
+ cmdaddr |= cmd << 24;
+
+ temp = cpu_to_be32(cmdaddr);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ case 4:
+ temp = cpu_to_be32(addr);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &cmd, 1);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ }
+}
+
+static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *read_buf)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+ int i;
+ u8 dummy = 0xFF;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_send_cmd_addr(nor, nor->read_opcode, from);
+ for (i = 0; i < chip->nor.read_dummy / 8; i++)
+ aspeed_smc_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
+
+ aspeed_smc_read_from_ahb(read_buf, chip->ahb_base, len);
+ aspeed_smc_stop_user(nor);
+ return len;
+}
+
+static ssize_t aspeed_smc_write_user(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_send_cmd_addr(nor, nor->program_opcode, to);
+ aspeed_smc_write_to_ahb(chip->ahb_base, write_buf, len);
+ aspeed_smc_stop_user(nor);
+ return len;
+}
+
+static int aspeed_smc_unregister(struct aspeed_smc_controller *controller)
+{
+ struct aspeed_smc_chip *chip;
+ int n;
+
+ for (n = 0; n < controller->info->nce; n++) {
+ chip = controller->chips[n];
+ if (chip)
+ mtd_device_unregister(&chip->nor.mtd);
+ }
+
+ return 0;
+}
+
+static int aspeed_smc_remove(struct platform_device *dev)
+{
+ return aspeed_smc_unregister(platform_get_drvdata(dev));
+}
+
+static const struct of_device_id aspeed_smc_matches[] = {
+ { .compatible = "aspeed,ast2400-fmc", .data = &fmc_2400_info },
+ { .compatible = "aspeed,ast2400-spi", .data = &spi_2400_info },
+ { .compatible = "aspeed,ast2500-fmc", .data = &fmc_2500_info },
+ { .compatible = "aspeed,ast2500-spi", .data = &spi_2500_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_smc_matches);
+
+/*
+ * Each chip has a mapping window defined by a segment address
+ * register defining a start and an end address on the AHB bus. These
+ * addresses can be configured to fit the chip size and offer a
+ * contiguous memory region across chips. For the moment, we only
+ * check that each chip segment is valid.
+ */
+static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip,
+ struct resource *res)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 offset = 0;
+ u32 reg;
+
+ if (controller->info->nce > 1) {
+ reg = readl(controller->regs + SEGMENT_ADDR_REG0 +
+ chip->cs * 4);
+
+ if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg))
+ return NULL;
+
+ offset = SEGMENT_ADDR_START(reg) - res->start;
+ }
+
+ return controller->ahb_base + offset;
+}
+
+static void aspeed_smc_chip_enable_write(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ reg = readl(controller->regs + CONFIG_REG);
+
+ reg |= aspeed_smc_chip_write_bit(chip);
+ writel(reg, controller->regs + CONFIG_REG);
+}
+
+static void aspeed_smc_chip_set_type(struct aspeed_smc_chip *chip, int type)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ chip->type = type;
+
+ reg = readl(controller->regs + CONFIG_REG);
+ reg &= ~(3 << (chip->cs * 2));
+ reg |= chip->type << (chip->cs * 2);
+ writel(reg, controller->regs + CONFIG_REG);
+}
+
+/*
+ * The AST2500 FMC flash controller should be strapped by hardware, or
+ * autodetected, but the AST2500 SPI flash needs to be set.
+ */
+static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ if (chip->controller->info == &spi_2500_info) {
+ reg = readl(controller->regs + CE_CONTROL_REG);
+ reg |= 1 << chip->cs;
+ writel(reg, controller->regs + CE_CONTROL_REG);
+ }
+}
+
+/*
+ * The AST2400 SPI flash controller does not have a CE Control
+ * register. It uses the CE0 control register to set 4Byte mode at the
+ * controller level.
+ */
+static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip)
+{
+ chip->ctl_val[smc_base] |= CONTROL_IO_ADDRESS_4B;
+ chip->ctl_val[smc_read] |= CONTROL_IO_ADDRESS_4B;
+}
+
+static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip,
+ struct resource *res)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
+ u32 reg, base_reg;
+
+ /*
+ * Always turn on the write enable bit to allow opcodes to be
+ * sent in user mode.
+ */
+ aspeed_smc_chip_enable_write(chip);
+
+ /* The driver only supports SPI type flash */
+ if (info->hastype)
+ aspeed_smc_chip_set_type(chip, smc_type_spi);
+
+ /*
+ * Configure chip base address in memory
+ */
+ chip->ahb_base = aspeed_smc_chip_base(chip, res);
+ if (!chip->ahb_base) {
+ dev_warn(chip->nor.dev, "CE segment window closed.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get value of the inherited control register. U-Boot usually
+ * does some timing calibration on the FMC chip, so it's good
+ * to keep them. In the future, we should handle calibration
+ * from Linux.
+ */
+ reg = readl(chip->ctl);
+ dev_dbg(controller->dev, "control register: %08x\n", reg);
+
+ base_reg = reg & CONTROL_KEEP_MASK;
+ if (base_reg != reg) {
+ dev_dbg(controller->dev,
+ "control register changed to: %08x\n",
+ base_reg);
+ }
+ chip->ctl_val[smc_base] = base_reg;
+
+ /*
+ * Retain the prior value of the control register as the
+ * default if it was normal access mode. Otherwise start with
+ * the sanitized base value set to read mode.
+ */
+ if ((reg & CONTROL_COMMAND_MODE_MASK) ==
+ CONTROL_COMMAND_MODE_NORMAL)
+ chip->ctl_val[smc_read] = reg;
+ else
+ chip->ctl_val[smc_read] = chip->ctl_val[smc_base] |
+ CONTROL_COMMAND_MODE_NORMAL;
+
+ dev_dbg(controller->dev, "default control register: %08x\n",
+ chip->ctl_val[smc_read]);
+ return 0;
+}
+
+static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
+ u32 cmd;
+
+ if (chip->nor.addr_width == 4 && info->set_4b)
+ info->set_4b(chip);
+
+ /*
+ * base mode has not been optimized yet. use it for writes.
+ */
+ chip->ctl_val[smc_write] = chip->ctl_val[smc_base] |
+ chip->nor.program_opcode << CONTROL_COMMAND_SHIFT |
+ CONTROL_COMMAND_MODE_WRITE;
+
+ dev_dbg(controller->dev, "write control register: %08x\n",
+ chip->ctl_val[smc_write]);
+
+ /*
+ * TODO: Adjust clocks if fast read is supported and interpret
+ * SPI-NOR flags to adjust controller settings.
+ */
+ switch (chip->nor.flash_read) {
+ case SPI_NOR_NORMAL:
+ cmd = CONTROL_COMMAND_MODE_NORMAL;
+ break;
+ case SPI_NOR_FAST:
+ cmd = CONTROL_COMMAND_MODE_FREAD;
+ break;
+ default:
+ dev_err(chip->nor.dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+
+ chip->ctl_val[smc_read] |= cmd |
+ CONTROL_IO_DUMMY_SET(chip->nor.read_dummy / 8);
+
+ dev_dbg(controller->dev, "base control register: %08x\n",
+ chip->ctl_val[smc_read]);
+ return 0;
+}
+
+static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
+ struct device_node *np, struct resource *r)
+{
+ const struct aspeed_smc_info *info = controller->info;
+ struct device *dev = controller->dev;
+ struct device_node *child;
+ unsigned int cs;
+ int ret = -ENODEV;
+
+ for_each_available_child_of_node(np, child) {
+ struct aspeed_smc_chip *chip;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+
+ /* This driver does not support NAND or NOR flash devices. */
+ if (!of_device_is_compatible(child, "jedec,spi-nor"))
+ continue;
+
+ ret = of_property_read_u32(child, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "Couldn't not read chip select.\n");
+ break;
+ }
+
+ if (cs >= info->nce) {
+ dev_err(dev, "Chip select %d out of range.\n",
+ cs);
+ ret = -ERANGE;
+ break;
+ }
+
+ if (controller->chips[cs]) {
+ dev_err(dev, "Chip select %d already in use by %s\n",
+ cs, dev_name(controller->chips[cs]->nor.dev));
+ ret = -EBUSY;
+ break;
+ }
+
+ chip = devm_kzalloc(controller->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ chip->controller = controller;
+ chip->ctl = controller->regs + info->ctl0 + cs * 4;
+ chip->cs = cs;
+
+ nor = &chip->nor;
+ mtd = &nor->mtd;
+
+ nor->dev = dev;
+ nor->priv = chip;
+ spi_nor_set_flash_node(nor, child);
+ nor->read = aspeed_smc_read_user;
+ nor->write = aspeed_smc_write_user;
+ nor->read_reg = aspeed_smc_read_reg;
+ nor->write_reg = aspeed_smc_write_reg;
+ nor->prepare = aspeed_smc_prep;
+ nor->unprepare = aspeed_smc_unprep;
+
+ ret = aspeed_smc_chip_setup_init(chip, r);
+ if (ret)
+ break;
+
+ /*
+ * TODO: Add support for SPI_NOR_QUAD and SPI_NOR_DUAL
+ * attach when board support is present as determined
+ * by of property.
+ */
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_NORMAL);
+ if (ret)
+ break;
+
+ ret = aspeed_smc_chip_setup_finish(chip);
+ if (ret)
+ break;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ break;
+
+ controller->chips[cs] = chip;
+ }
+
+ if (ret)
+ aspeed_smc_unregister(controller);
+
+ return ret;
+}
+
+static int aspeed_smc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct aspeed_smc_controller *controller;
+ const struct of_device_id *match;
+ const struct aspeed_smc_info *info;
+ struct resource *res;
+ int ret;
+
+ match = of_match_device(aspeed_smc_matches, &pdev->dev);
+ if (!match || !match->data)
+ return -ENODEV;
+ info = match->data;
+
+ controller = devm_kzalloc(&pdev->dev, sizeof(*controller) +
+ info->nce * sizeof(controller->chips[0]), GFP_KERNEL);
+ if (!controller)
+ return -ENOMEM;
+ controller->info = info;
+ controller->dev = dev;
+
+ mutex_init(&controller->mutex);
+ platform_set_drvdata(pdev, controller);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ controller->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(controller->regs))
+ return PTR_ERR(controller->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ controller->ahb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(controller->ahb_base))
+ return PTR_ERR(controller->ahb_base);
+
+ ret = aspeed_smc_setup_flash(controller, np, res);
+ if (ret)
+ dev_err(dev, "Aspeed SMC probe failed %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver aspeed_smc_driver = {
+ .probe = aspeed_smc_probe,
+ .remove = aspeed_smc_remove,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_smc_matches,
+ }
+};
+
+module_platform_driver(aspeed_smc_driver);
+
+MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
+MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index d489fbd07c12..9f8102de1b16 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -526,7 +526,8 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor,
bytes_to_read *= cqspi->fifo_width;
bytes_to_read = bytes_to_read > remaining ?
remaining : bytes_to_read;
- readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
+ ioread32_rep(ahb_base, rxbuf,
+ DIV_ROUND_UP(bytes_to_read, 4));
rxbuf += bytes_to_read;
remaining -= bytes_to_read;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
@@ -610,7 +611,8 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
while (remaining > 0) {
write_bytes = remaining > page_size ? page_size : remaining;
- writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
+ iowrite32_rep(cqspi->ahb_base, txbuf,
+ DIV_ROUND_UP(write_bytes, 4));
ret = wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies
@@ -891,7 +893,7 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
if (ret)
return ret;
- return (ret < 0) ? ret : len;
+ return len;
}
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
@@ -911,7 +913,7 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
if (ret)
return ret;
- return (ret < 0) ? ret : len;
+ return len;
}
static int cqspi_erase(struct spi_nor *nor, loff_t offs)
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index b4d8953fb30a..1476135e0d50 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -193,7 +193,7 @@
#define QUADSPI_LUT_NUM 64
/* SEQID -- we can have 16 seqids at most. */
-#define SEQID_QUAD_READ 0
+#define SEQID_READ 0
#define SEQID_WREN 1
#define SEQID_WRDI 2
#define SEQID_RDSR 3
@@ -373,32 +373,26 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
void __iomem *base = q->iobase;
int rxfifo = q->devtype_data->rxfifo;
u32 lut_base;
- u8 cmd, addrlen, dummy;
int i;
+ struct spi_nor *nor = &q->nor[0];
+ u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
+ u8 read_op = nor->read_opcode;
+ u8 read_dm = nor->read_dummy;
+
fsl_qspi_unlock_lut(q);
/* Clear all the LUT table */
for (i = 0; i < QUADSPI_LUT_NUM; i++)
qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
- /* Quad Read */
- lut_base = SEQID_QUAD_READ * 4;
-
- if (q->nor_size <= SZ_16M) {
- cmd = SPINOR_OP_READ_1_1_4;
- addrlen = ADDR24BIT;
- dummy = 8;
- } else {
- /* use the 4-byte address */
- cmd = SPINOR_OP_READ_1_1_4;
- addrlen = ADDR32BIT;
- dummy = 8;
- }
+ /* Read */
+ lut_base = SEQID_READ * 4;
- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
- qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
+ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
+ LUT1(FSL_READ, PAD4, rxfifo),
base + QUADSPI_LUT(lut_base + 1));
/* Write enable */
@@ -409,16 +403,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
/* Page Program */
lut_base = SEQID_PP * 4;
- if (q->nor_size <= SZ_16M) {
- cmd = SPINOR_OP_PP;
- addrlen = ADDR24BIT;
- } else {
- /* use the 4-byte address */
- cmd = SPINOR_OP_PP;
- addrlen = ADDR32BIT;
- }
-
- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) |
+ LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
base + QUADSPI_LUT(lut_base + 1));
@@ -432,10 +418,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
/* Erase a sector */
lut_base = SEQID_SE * 4;
- cmd = q->nor[0].erase_opcode;
- addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
-
- qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) |
+ LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
/* Erase the whole chip */
@@ -484,7 +468,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
{
switch (cmd) {
case SPINOR_OP_READ_1_1_4:
- return SEQID_QUAD_READ;
+ return SEQID_READ;
case SPINOR_OP_WREN:
return SEQID_WREN;
case SPINOR_OP_WRDI:
diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/intel-spi-platform.c
new file mode 100644
index 000000000000..5c943df9398f
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel-spi-platform.c
@@ -0,0 +1,57 @@
+/*
+ * Intel PCH/PCU SPI flash platform driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "intel-spi.h"
+
+static int intel_spi_platform_probe(struct platform_device *pdev)
+{
+ struct intel_spi_boardinfo *info;
+ struct intel_spi *ispi;
+ struct resource *mem;
+
+ info = dev_get_platdata(&pdev->dev);
+ if (!info)
+ return -EINVAL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ispi = intel_spi_probe(&pdev->dev, mem, info);
+ if (IS_ERR(ispi))
+ return PTR_ERR(ispi);
+
+ platform_set_drvdata(pdev, ispi);
+ return 0;
+}
+
+static int intel_spi_platform_remove(struct platform_device *pdev)
+{
+ struct intel_spi *ispi = platform_get_drvdata(pdev);
+
+ return intel_spi_remove(ispi);
+}
+
+static struct platform_driver intel_spi_platform_driver = {
+ .probe = intel_spi_platform_probe,
+ .remove = intel_spi_platform_remove,
+ .driver = {
+ .name = "intel-spi",
+ },
+};
+
+module_platform_driver(intel_spi_platform_driver);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:intel-spi");
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
new file mode 100644
index 000000000000..a10f6027b386
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -0,0 +1,777 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/platform_data/intel-spi.h>
+
+#include "intel-spi.h"
+
+/* Offsets are from @ispi->base */
+#define BFPREG 0x00
+
+#define HSFSTS_CTL 0x04
+#define HSFSTS_CTL_FSMIE BIT(31)
+#define HSFSTS_CTL_FDBC_SHIFT 24
+#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
+
+#define HSFSTS_CTL_FCYCLE_SHIFT 17
+#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
+/* HW sequencer opcodes */
+#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
+
+#define HSFSTS_CTL_FGO BIT(16)
+#define HSFSTS_CTL_FLOCKDN BIT(15)
+#define HSFSTS_CTL_FDV BIT(14)
+#define HSFSTS_CTL_SCIP BIT(5)
+#define HSFSTS_CTL_AEL BIT(2)
+#define HSFSTS_CTL_FCERR BIT(1)
+#define HSFSTS_CTL_FDONE BIT(0)
+
+#define FADDR 0x08
+#define DLOCK 0x0c
+#define FDATA(n) (0x10 + ((n) * 4))
+
+#define FRACC 0x50
+
+#define FREG(n) (0x54 + ((n) * 4))
+#define FREG_BASE_MASK 0x3fff
+#define FREG_LIMIT_SHIFT 16
+#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT)
+
+/* Offset is from @ispi->pregs */
+#define PR(n) ((n) * 4)
+#define PR_WPE BIT(31)
+#define PR_LIMIT_SHIFT 16
+#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
+#define PR_RPE BIT(15)
+#define PR_BASE_MASK 0x3fff
+/* Last PR is GPR0 */
+#define PR_NUM (5 + 1)
+
+/* Offsets are from @ispi->sregs */
+#define SSFSTS_CTL 0x00
+#define SSFSTS_CTL_FSMIE BIT(23)
+#define SSFSTS_CTL_DS BIT(22)
+#define SSFSTS_CTL_DBC_SHIFT 16
+#define SSFSTS_CTL_SPOP BIT(11)
+#define SSFSTS_CTL_ACS BIT(10)
+#define SSFSTS_CTL_SCGO BIT(9)
+#define SSFSTS_CTL_COP_SHIFT 12
+#define SSFSTS_CTL_FRS BIT(7)
+#define SSFSTS_CTL_DOFRS BIT(6)
+#define SSFSTS_CTL_AEL BIT(4)
+#define SSFSTS_CTL_FCERR BIT(3)
+#define SSFSTS_CTL_FDONE BIT(2)
+#define SSFSTS_CTL_SCIP BIT(0)
+
+#define PREOP_OPTYPE 0x04
+#define OPMENU0 0x08
+#define OPMENU1 0x0c
+
+/* CPU specifics */
+#define BYT_PR 0x74
+#define BYT_SSFSTS_CTL 0x90
+#define BYT_BCR 0xfc
+#define BYT_BCR_WPD BIT(0)
+#define BYT_FREG_NUM 5
+
+#define LPT_PR 0x74
+#define LPT_SSFSTS_CTL 0x90
+#define LPT_FREG_NUM 5
+
+#define BXT_PR 0x84
+#define BXT_SSFSTS_CTL 0xa0
+#define BXT_FREG_NUM 12
+
+#define INTEL_SPI_TIMEOUT 5000 /* ms */
+#define INTEL_SPI_FIFO_SZ 64
+
+/**
+ * struct intel_spi - Driver private data
+ * @dev: Device pointer
+ * @info: Pointer to board specific info
+ * @nor: SPI NOR layer structure
+ * @base: Beginning of MMIO space
+ * @pregs: Start of protection registers
+ * @sregs: Start of software sequencer registers
+ * @nregions: Maximum number of regions
+ * @writeable: Is the chip writeable
+ * @swseq: Use SW sequencer in register reads/writes
+ * @erase_64k: 64k erase supported
+ * @opcodes: Opcodes which are supported. This are programmed by BIOS
+ * before it locks down the controller.
+ * @preopcodes: Preopcodes which are supported.
+ */
+struct intel_spi {
+ struct device *dev;
+ const struct intel_spi_boardinfo *info;
+ struct spi_nor nor;
+ void __iomem *base;
+ void __iomem *pregs;
+ void __iomem *sregs;
+ size_t nregions;
+ bool writeable;
+ bool swseq;
+ bool erase_64k;
+ u8 opcodes[8];
+ u8 preopcodes[2];
+};
+
+static bool writeable;
+module_param(writeable, bool, 0);
+MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
+
+static void intel_spi_dump_regs(struct intel_spi *ispi)
+{
+ u32 value;
+ int i;
+
+ dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
+
+ value = readl(ispi->base + HSFSTS_CTL);
+ dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
+ if (value & HSFSTS_CTL_FLOCKDN)
+ dev_dbg(ispi->dev, "-> Locked\n");
+
+ dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
+ dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
+
+ for (i = 0; i < 16; i++)
+ dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
+ i, readl(ispi->base + FDATA(i)));
+
+ dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
+
+ for (i = 0; i < ispi->nregions; i++)
+ dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
+ readl(ispi->base + FREG(i)));
+ for (i = 0; i < PR_NUM; i++)
+ dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
+ readl(ispi->pregs + PR(i)));
+
+ value = readl(ispi->sregs + SSFSTS_CTL);
+ dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+ dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+ readl(ispi->sregs + PREOP_OPTYPE));
+ dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
+ dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
+
+ if (ispi->info->type == INTEL_SPI_BYT)
+ dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
+
+ dev_dbg(ispi->dev, "Protected regions:\n");
+ for (i = 0; i < PR_NUM; i++) {
+ u32 base, limit;
+
+ value = readl(ispi->pregs + PR(i));
+ if (!(value & (PR_WPE | PR_RPE)))
+ continue;
+
+ limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+ base = value & PR_BASE_MASK;
+
+ dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
+ i, base << 12, (limit << 12) | 0xfff,
+ value & PR_WPE ? 'W' : '.',
+ value & PR_RPE ? 'R' : '.');
+ }
+
+ dev_dbg(ispi->dev, "Flash regions:\n");
+ for (i = 0; i < ispi->nregions; i++) {
+ u32 region, base, limit;
+
+ region = readl(ispi->base + FREG(i));
+ base = region & FREG_BASE_MASK;
+ limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+ if (base >= limit || (i > 0 && limit == 0))
+ dev_dbg(ispi->dev, " %02d disabled\n", i);
+ else
+ dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
+ i, base << 12, (limit << 12) | 0xfff);
+ }
+
+ dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
+ ispi->swseq ? 'S' : 'H');
+}
+
+/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
+static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
+{
+ size_t bytes;
+ int i = 0;
+
+ if (size > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ while (size > 0) {
+ bytes = min_t(size_t, size, 4);
+ memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
+ size -= bytes;
+ buf += bytes;
+ i++;
+ }
+
+ return 0;
+}
+
+/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
+static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
+ size_t size)
+{
+ size_t bytes;
+ int i = 0;
+
+ if (size > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ while (size > 0) {
+ bytes = min_t(size_t, size, 4);
+ memcpy_toio(ispi->base + FDATA(i), buf, bytes);
+ size -= bytes;
+ buf += bytes;
+ i++;
+ }
+
+ return 0;
+}
+
+static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
+{
+ u32 val;
+
+ return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
+ !(val & HSFSTS_CTL_SCIP), 0,
+ INTEL_SPI_TIMEOUT * 1000);
+}
+
+static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
+{
+ u32 val;
+
+ return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
+ !(val & SSFSTS_CTL_SCIP), 0,
+ INTEL_SPI_TIMEOUT * 1000);
+}
+
+static int intel_spi_init(struct intel_spi *ispi)
+{
+ u32 opmenu0, opmenu1, val;
+ int i;
+
+ switch (ispi->info->type) {
+ case INTEL_SPI_BYT:
+ ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BYT_PR;
+ ispi->nregions = BYT_FREG_NUM;
+
+ if (writeable) {
+ /* Disable write protection */
+ val = readl(ispi->base + BYT_BCR);
+ if (!(val & BYT_BCR_WPD)) {
+ val |= BYT_BCR_WPD;
+ writel(val, ispi->base + BYT_BCR);
+ val = readl(ispi->base + BYT_BCR);
+ }
+
+ ispi->writeable = !!(val & BYT_BCR_WPD);
+ }
+
+ break;
+
+ case INTEL_SPI_LPT:
+ ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + LPT_PR;
+ ispi->nregions = LPT_FREG_NUM;
+ break;
+
+ case INTEL_SPI_BXT:
+ ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BXT_PR;
+ ispi->nregions = BXT_FREG_NUM;
+ ispi->erase_64k = true;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Disable #SMI generation */
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~HSFSTS_CTL_FSMIE;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ /*
+ * BIOS programs allowed opcodes and then locks down the register.
+ * So read back what opcodes it decided to support. That's the set
+ * we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
+
+ /*
+ * Some controllers can only do basic operations using hardware
+ * sequencer. All other operations are supposed to be carried out
+ * using software sequencer. If we find that BIOS has programmed
+ * opcodes for the software sequencer we use that over the hardware
+ * sequencer.
+ */
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+
+ val = readl(ispi->sregs + PREOP_OPTYPE);
+ ispi->preopcodes[0] = val;
+ ispi->preopcodes[1] = val >> 8;
+
+ /* Disable #SMI generation from SW sequencer */
+ val = readl(ispi->sregs + SSFSTS_CTL);
+ val &= ~SSFSTS_CTL_FSMIE;
+ writel(val, ispi->sregs + SSFSTS_CTL);
+
+ ispi->swseq = true;
+ }
+
+ intel_spi_dump_regs(ispi);
+
+ return 0;
+}
+
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+ if (ispi->opcodes[i] == opcode)
+ return i;
+ return -EINVAL;
+}
+
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
+ int len)
+{
+ u32 val, status;
+ int ret;
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
+
+ switch (opcode) {
+ case SPINOR_OP_RDID:
+ val |= HSFSTS_CTL_FCYCLE_RDID;
+ break;
+ case SPINOR_OP_WRSR:
+ val |= HSFSTS_CTL_FCYCLE_WRSR;
+ break;
+ case SPINOR_OP_RDSR:
+ val |= HSFSTS_CTL_FCYCLE_RDSR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
+ int len)
+{
+ u32 val, status;
+ int ret;
+
+ ret = intel_spi_opcode_index(ispi, opcode);
+ if (ret < 0)
+ return ret;
+
+ val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ val |= ret << SSFSTS_CTL_COP_SHIFT;
+ val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
+ val |= SSFSTS_CTL_SCGO;
+ writel(val, ispi->sregs + SSFSTS_CTL);
+
+ ret = intel_spi_wait_sw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + SSFSTS_CTL);
+ if (status & SSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & SSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct intel_spi *ispi = nor->priv;
+ int ret;
+
+ /* Address of the first chip */
+ writel(0, ispi->base + FADDR);
+
+ if (ispi->swseq)
+ ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
+ else
+ ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
+
+ if (ret)
+ return ret;
+
+ return intel_spi_read_block(ispi, buf, len);
+}
+
+static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct intel_spi *ispi = nor->priv;
+ int ret;
+
+ /*
+ * This is handled with atomic operation and preop code in Intel
+ * controller so skip it here now.
+ */
+ if (opcode == SPINOR_OP_WREN)
+ return 0;
+
+ writel(0, ispi->base + FADDR);
+
+ /* Write the value beforehand */
+ ret = intel_spi_write_block(ispi, buf, len);
+ if (ret)
+ return ret;
+
+ if (ispi->swseq)
+ return intel_spi_sw_cycle(ispi, opcode, buf, len);
+ return intel_spi_hw_cycle(ispi, opcode, buf, len);
+}
+
+static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct intel_spi *ispi = nor->priv;
+ size_t block_size, retlen = 0;
+ u32 val, status;
+ ssize_t ret;
+
+ switch (nor->read_opcode) {
+ case SPINOR_OP_READ:
+ case SPINOR_OP_READ_FAST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ while (len > 0) {
+ block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+
+ writel(from, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCYCLE_READ;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ ret = -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ ret = -EACCES;
+
+ if (ret < 0) {
+ dev_err(ispi->dev, "read error: %llx: %#x\n", from,
+ status);
+ return ret;
+ }
+
+ ret = intel_spi_read_block(ispi, read_buf, block_size);
+ if (ret)
+ return ret;
+
+ len -= block_size;
+ from += block_size;
+ retlen += block_size;
+ read_buf += block_size;
+ }
+
+ return retlen;
+}
+
+static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *write_buf)
+{
+ struct intel_spi *ispi = nor->priv;
+ size_t block_size, retlen = 0;
+ u32 val, status;
+ ssize_t ret;
+
+ while (len > 0) {
+ block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+
+ writel(to, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCYCLE_WRITE;
+
+ /* Write enable */
+ if (ispi->preopcodes[1] == SPINOR_OP_WREN)
+ val |= SSFSTS_CTL_SPOP;
+ val |= SSFSTS_CTL_ACS;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_write_block(ispi, write_buf, block_size);
+ if (ret) {
+ dev_err(ispi->dev, "failed to write block\n");
+ return ret;
+ }
+
+ /* Start the write now */
+ val = readl(ispi->base + HSFSTS_CTL);
+ writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret) {
+ dev_err(ispi->dev, "timeout\n");
+ return ret;
+ }
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ ret = -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ ret = -EACCES;
+
+ if (ret < 0) {
+ dev_err(ispi->dev, "write error: %llx: %#x\n", to,
+ status);
+ return ret;
+ }
+
+ len -= block_size;
+ to += block_size;
+ retlen += block_size;
+ write_buf += block_size;
+ }
+
+ return retlen;
+}
+
+static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
+{
+ size_t erase_size, len = nor->mtd.erasesize;
+ struct intel_spi *ispi = nor->priv;
+ u32 val, status, cmd;
+ int ret;
+
+ /* If the hardware can do 64k erase use that when possible */
+ if (len >= SZ_64K && ispi->erase_64k) {
+ cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
+ erase_size = SZ_64K;
+ } else {
+ cmd = HSFSTS_CTL_FCYCLE_ERASE;
+ erase_size = SZ_4K;
+ }
+
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= cmd;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ offs += erase_size;
+ len -= erase_size;
+ }
+
+ return 0;
+}
+
+static bool intel_spi_is_protected(const struct intel_spi *ispi,
+ unsigned int base, unsigned int limit)
+{
+ int i;
+
+ for (i = 0; i < PR_NUM; i++) {
+ u32 pr_base, pr_limit, pr_value;
+
+ pr_value = readl(ispi->pregs + PR(i));
+ if (!(pr_value & (PR_WPE | PR_RPE)))
+ continue;
+
+ pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+ pr_base = pr_value & PR_BASE_MASK;
+
+ if (pr_base >= base && pr_limit <= limit)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * There will be a single partition holding all enabled flash regions. We
+ * call this "BIOS".
+ */
+static void intel_spi_fill_partition(struct intel_spi *ispi,
+ struct mtd_partition *part)
+{
+ u64 end;
+ int i;
+
+ memset(part, 0, sizeof(*part));
+
+ /* Start from the mandatory descriptor region */
+ part->size = 4096;
+ part->name = "BIOS";
+
+ /*
+ * Now try to find where this partition ends based on the flash
+ * region registers.
+ */
+ for (i = 1; i < ispi->nregions; i++) {
+ u32 region, base, limit;
+
+ region = readl(ispi->base + FREG(i));
+ base = region & FREG_BASE_MASK;
+ limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+ if (base >= limit || limit == 0)
+ continue;
+
+ /*
+ * If any of the regions have protection bits set, make the
+ * whole partition read-only to be on the safe side.
+ */
+ if (intel_spi_is_protected(ispi, base, limit))
+ ispi->writeable = 0;
+
+ end = (limit << 12) + 4096;
+ if (end > part->size)
+ part->size = end;
+ }
+}
+
+struct intel_spi *intel_spi_probe(struct device *dev,
+ struct resource *mem, const struct intel_spi_boardinfo *info)
+{
+ struct mtd_partition part;
+ struct intel_spi *ispi;
+ int ret;
+
+ if (!info || !mem)
+ return ERR_PTR(-EINVAL);
+
+ ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
+ if (!ispi)
+ return ERR_PTR(-ENOMEM);
+
+ ispi->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(ispi->base))
+ return ispi->base;
+
+ ispi->dev = dev;
+ ispi->info = info;
+ ispi->writeable = info->writeable;
+
+ ret = intel_spi_init(ispi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ispi->nor.dev = ispi->dev;
+ ispi->nor.priv = ispi;
+ ispi->nor.read_reg = intel_spi_read_reg;
+ ispi->nor.write_reg = intel_spi_write_reg;
+ ispi->nor.read = intel_spi_read;
+ ispi->nor.write = intel_spi_write;
+ ispi->nor.erase = intel_spi_erase;
+
+ ret = spi_nor_scan(&ispi->nor, NULL, SPI_NOR_NORMAL);
+ if (ret) {
+ dev_info(dev, "failed to locate the chip\n");
+ return ERR_PTR(ret);
+ }
+
+ intel_spi_fill_partition(ispi, &part);
+
+ /* Prevent writes if not explicitly enabled */
+ if (!ispi->writeable || !writeable)
+ ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
+
+ ret = mtd_device_parse_register(&ispi->nor.mtd, NULL, NULL, &part, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return ispi;
+}
+EXPORT_SYMBOL_GPL(intel_spi_probe);
+
+int intel_spi_remove(struct intel_spi *ispi)
+{
+ return mtd_device_unregister(&ispi->nor.mtd);
+}
+EXPORT_SYMBOL_GPL(intel_spi_remove);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/intel-spi.h
new file mode 100644
index 000000000000..5ab7dc250050
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel-spi.h
@@ -0,0 +1,24 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef INTEL_SPI_H
+#define INTEL_SPI_H
+
+#include <linux/platform_data/intel-spi.h>
+
+struct intel_spi;
+struct resource;
+
+struct intel_spi *intel_spi_probe(struct device *dev,
+ struct resource *mem, const struct intel_spi_boardinfo *info);
+int intel_spi_remove(struct intel_spi *ispi);
+
+#endif /* INTEL_SPI_H */
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index da7cd69d4857..1ae872bfc3ba 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -75,6 +75,16 @@ struct flash_info {
* bit. Must be used with
* SPI_NOR_HAS_LOCK.
*/
+#define SPI_S3AN BIT(10) /*
+ * Xilinx Spartan 3AN In-System Flash
+ * (MFR cannot be used for probing
+ * because it has the same value as
+ * ATMEL flashes)
+ */
+#define SPI_NOR_4B_OPCODES BIT(11) /*
+ * Use dedicated 4byte address op codes
+ * to support memory size above 128Mib.
+ */
};
#define JEDEC_MFR(info) ((info)->id[0])
@@ -122,7 +132,7 @@ static int read_fsr(struct spi_nor *nor)
/*
* Read configuration register, returning its value in the
* location. Return the configuration register value.
- * Returns negative if error occured.
+ * Returns negative if error occurred.
*/
static int read_cr(struct spi_nor *nor)
{
@@ -188,6 +198,78 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
return mtd->priv;
}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+static inline u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static inline u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static inline u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ /* Do some manufacturer fixups first */
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_SPANSION:
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = info->sector_size;
+ break;
+
+ default:
+ break;
+ }
+
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+}
+
/* Enable/disable 4-byte addressing mode. */
static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
int enable)
@@ -217,6 +299,21 @@ static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
}
}
+
+static int s3an_sr_ready(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ return ret;
+ }
+
+ return !!(val & XSR_RDY);
+}
+
static inline int spi_nor_sr_ready(struct spi_nor *nor)
{
int sr = read_sr(nor);
@@ -238,7 +335,11 @@ static inline int spi_nor_fsr_ready(struct spi_nor *nor)
static int spi_nor_ready(struct spi_nor *nor)
{
int sr, fsr;
- sr = spi_nor_sr_ready(nor);
+
+ if (nor->flags & SNOR_F_READY_XSR_RDY)
+ sr = s3an_sr_ready(nor);
+ else
+ sr = spi_nor_sr_ready(nor);
if (sr < 0)
return sr;
fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
@@ -320,6 +421,27 @@ static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
}
/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
+{
+ unsigned int offset;
+ unsigned int page;
+
+ offset = addr % nor->page_size;
+ page = addr / nor->page_size;
+ page <<= (nor->page_size > 512) ? 10 : 9;
+
+ return page | offset;
+}
+
+/*
* Initiate the erasure of a single sector
*/
static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
@@ -327,6 +449,9 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
int i;
+ if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+ addr = spi_nor_s3an_addr_convert(nor, addr);
+
if (nor->erase)
return nor->erase(nor, addr);
@@ -368,7 +493,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
return ret;
/* whole-chip erase? */
- if (len == mtd->size) {
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
unsigned long timeout;
write_enable(nor);
@@ -782,6 +907,19 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
.addr_width = (_addr_width), \
.flags = (_flags),
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff \
+ }, \
+ .id_len = 3, \
+ .sector_size = (8*_page_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = _page_size, \
+ .addr_width = 3, \
+ .flags = SPI_NOR_NO_FR | SPI_S3AN,
+
/* NOTE: double check command sets and memory organization when you add
* more nor chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
@@ -821,7 +959,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
/* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -833,6 +971,11 @@ static const struct flash_info spi_nor_ids[] = {
/* GigaDevice */
{
+ "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -1014,6 +1157,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+
+ /* Xilinx S3AN Internal Flash */
+ { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+ { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+ { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
{ },
};
@@ -1054,7 +1204,12 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
return ret;
while (len) {
- ret = nor->read(nor, from, len, buf);
+ loff_t addr = from;
+
+ if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+ addr = spi_nor_s3an_addr_convert(nor, addr);
+
+ ret = nor->read(nor, addr, len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -1175,17 +1330,32 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
for (i = 0; i < len; ) {
ssize_t written;
+ loff_t addr = to + i;
+
+ /*
+ * If page_size is a power of two, the offset can be quickly
+ * calculated with an AND operation. On the other cases we
+ * need to do a modulus operation (more expensive).
+ * Power of two numbers have only one bit set and we can use
+ * the instruction hweight32 to detect if we need to do a
+ * modulus (do_div()) or not.
+ */
+ if (hweight32(nor->page_size) == 1) {
+ page_offset = addr & (nor->page_size - 1);
+ } else {
+ uint64_t aux = addr;
- page_offset = (to + i) & (nor->page_size - 1);
- WARN_ONCE(page_offset,
- "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.",
- page_offset);
+ page_offset = do_div(aux, nor->page_size);
+ }
/* the size of data remaining on the first page */
page_remain = min_t(size_t,
nor->page_size - page_offset, len - i);
+ if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+ addr = spi_nor_s3an_addr_convert(nor, addr);
+
write_enable(nor);
- ret = nor->write(nor, to + i, page_remain, buf + i);
+ ret = nor->write(nor, addr, page_remain, buf + i);
if (ret < 0)
goto write_err;
written = ret;
@@ -1216,6 +1386,9 @@ static int macronix_quad_enable(struct spi_nor *nor)
val = read_sr(nor);
if (val < 0)
return val;
+ if (val & SR_QUAD_EN_MX)
+ return 0;
+
write_enable(nor);
write_sr(nor, val | SR_QUAD_EN_MX);
@@ -1236,7 +1409,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
* Write status Register and configuration register with 2 bytes
* The first byte will be written to the status register, while the
* second byte will be written to the configuration register.
- * Return negative if error occured.
+ * Return negative if error occurred.
*/
static int write_sr_cr(struct spi_nor *nor, u16 val)
{
@@ -1312,6 +1485,47 @@ static int spi_nor_check(struct spi_nor *nor)
return 0;
}
+static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ return ret;
+ }
+
+ nor->erase_opcode = SPINOR_OP_XSE;
+ nor->program_opcode = SPINOR_OP_XPP;
+ nor->read_opcode = SPINOR_OP_READ;
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ /*
+ * This flashes have a page size of 264 or 528 bytes (known as
+ * Default addressing mode). It can be changed to a more standard
+ * Power of two mode where the page size is 256/512. This comes
+ * with a price: there is 3% less of space, the data is corrupted
+ * and the page size cannot be changed back to default addressing
+ * mode.
+ *
+ * The current addressing mode can be read from the XRDSR register
+ * and should not be changed, because is a destructive operation.
+ */
+ if (val & XSR_PAGESIZE) {
+ /* Flash in Power of 2 mode */
+ nor->page_size = (nor->page_size == 264) ? 256 : 512;
+ nor->mtd.writebufsize = nor->page_size;
+ nor->mtd.size = 8 * nor->page_size * info->n_sectors;
+ nor->mtd.erasesize = 8 * nor->page_size;
+ } else {
+ /* Flash in Default addressing mode */
+ nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
+ }
+
+ return 0;
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
{
const struct flash_info *info = NULL;
@@ -1360,6 +1574,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mutex_init(&nor->lock);
/*
+ * Make sure the XSR_RDY flag is set before calling
+ * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+ * with Atmel spi-nor
+ */
+ if (info->flags & SPI_S3AN)
+ nor->flags |= SNOR_F_READY_XSR_RDY;
+
+ /*
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
* with the software protection bits set
*/
@@ -1483,27 +1705,10 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
else if (mtd->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
- if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
- /* Dedicated 4-byte command set */
- switch (nor->flash_read) {
- case SPI_NOR_QUAD:
- nor->read_opcode = SPINOR_OP_READ4_1_1_4;
- break;
- case SPI_NOR_DUAL:
- nor->read_opcode = SPINOR_OP_READ4_1_1_2;
- break;
- case SPI_NOR_FAST:
- nor->read_opcode = SPINOR_OP_READ4_FAST;
- break;
- case SPI_NOR_NORMAL:
- nor->read_opcode = SPINOR_OP_READ4;
- break;
- }
- nor->program_opcode = SPINOR_OP_PP_4B;
- /* No small sector erase for 4-byte command set */
- nor->erase_opcode = SPINOR_OP_SE_4B;
- mtd->erasesize = info->sector_size;
- } else
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor, info);
+ else
set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
@@ -1517,6 +1722,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
nor->read_dummy = spi_nor_read_dummy_cycles(nor);
+ if (info->flags & SPI_S3AN) {
+ ret = s3an_nor_scan(info, nor);
+ if (ret)
+ return ret;
+ }
+
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index d1e6931c132f..c80869e60909 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -323,16 +323,15 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
struct ubiblock *dev = hctx->queue->queuedata;
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
- if (req->cmd_type != REQ_TYPE_FS)
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ ubi_sgl_init(&pdu->usgl);
+ queue_work(dev->wq, &pdu->work);
+ return BLK_MQ_RQ_QUEUE_OK;
+ default:
return BLK_MQ_RQ_QUEUE_ERROR;
+ }
- if (rq_data_dir(req) != READ)
- return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
-
- ubi_sgl_init(&pdu->usgl);
- queue_work(dev->wq, &pdu->work);
-
- return BLK_MQ_RQ_QUEUE_OK;
}
static int ubiblock_init_request(void *data, struct request *req,
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 95c32f2d7601..100fbdc9b95c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -135,6 +135,7 @@ config MACVTAP
tristate "MAC-VLAN based tap driver"
depends on MACVLAN
depends on INET
+ select TAP
help
This adds a specialized tap character device driver that is based
on the MAC-VLAN network interface, called macvtap. A macvtap device
@@ -165,11 +166,25 @@ config IPVLAN
To compile this driver as a module, choose M here: the module
will be called ipvlan.
+config IPVTAP
+ tristate "IP-VLAN based tap driver"
+ depends on IPVLAN
+ depends on INET
+ select TAP
+ ---help---
+ This adds a specialized tap character device driver that is based
+ on the IP-VLAN network interface, called ipvtap. An ipvtap device
+ can be added in the same way as a ipvlan device, using 'type
+ ipvtap', and then be accessed through the tap user space interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ipvtap.
config VXLAN
tristate "Virtual eXtensible Local Area Network (VXLAN)"
depends on INET
select NET_UDP_TUNNEL
+ select GRO_CELLS
---help---
This allows one to create vxlan virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
@@ -184,6 +199,7 @@ config GENEVE
tristate "Generic Network Virtualization Encapsulation"
depends on INET && NET_UDP_TUNNEL
select NET_IP_TUNNEL
+ select GRO_CELLS
---help---
This allows one to create geneve virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. GENEVE is often used
@@ -216,6 +232,7 @@ config MACSEC
select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
+ select GRO_CELLS
---help---
MACsec is an encryption standard for Ethernet.
@@ -284,6 +301,12 @@ config TUN
If you don't know what to use this for, you don't need it.
+config TAP
+ tristate
+ ---help---
+ This option is selected by any driver implementing tap user space
+ interface for a virtual interface to re-use core tap functionality.
+
config TUN_VNET_CROSS_LE
bool "Support for cross-endian vnet headers on little-endian kernels"
default n
@@ -437,6 +460,9 @@ config XEN_NETDEV_BACKEND
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
+ depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
+ IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \
+ PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES)
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7336cbd3ef5d..98ed4d96987c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -7,6 +7,7 @@
#
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_IPVLAN) += ipvlan/
+obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
@@ -21,6 +22,7 @@ obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
+obj-$(CONFIG_TAP) += tap.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8029dd4912b6..6321f12630c8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -211,8 +211,8 @@ static int lacp_fast;
static int bond_init(struct net_device *bond_dev);
static void bond_uninit(struct net_device *bond_dev);
-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
- struct rtnl_link_stats64 *stats);
+static void bond_get_stats(struct net_device *bond_dev,
+ struct rtnl_link_stats64 *stats);
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
@@ -1993,11 +1993,10 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
return ret;
}
-static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
+static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
bond_fill_ifbond(bond, info);
- return 0;
}
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
@@ -3337,8 +3336,8 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
}
}
-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
- struct rtnl_link_stats64 *stats)
+static void bond_get_stats(struct net_device *bond_dev,
+ struct rtnl_link_stats64 *stats)
{
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
@@ -3362,8 +3361,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
memcpy(&bond->bond_stats, stats, sizeof(*stats));
spin_unlock(&bond->stats_lock);
-
- return stats;
}
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
@@ -3411,12 +3408,11 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
return -EFAULT;
- res = bond_info_query(bond_dev, &k_binfo);
- if (res == 0 &&
- copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
+ bond_info_query(bond_dev, &k_binfo);
+ if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
return -EFAULT;
- return res;
+ return 0;
case BOND_SLAVE_INFO_QUERY_OLD:
case SIOCBONDSLAVEINFOQUERY:
u_sinfo = (struct ifslave __user *)ifr->ifr_data;
@@ -4149,8 +4145,6 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
- .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
- .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7a85495dbb0c..0da4f2f5c7e3 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y := dev.o
+can-dev-y += dev.o
+can-dev-y += rx-offload.o
can-dev-$(CONFIG_CAN_LEDS) += led.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8f5e93cb7975..0e0df0ba288c 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
u32 reg_ier = AT91_IRQ_ERR_FRAME;
reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
at91_write(priv, AT91_IER, reg_ier);
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e3dccd3200d5..606b7d8ffe13 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
end:
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* enable all IRQs if we are not in bus off state */
if (priv->can.state != CAN_STATE_BUS_OFF)
c_can_irq_control(priv, true);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7be393c96b1a..cf7c18947189 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
priv->base = addr;
+ priv->device = &pdev->dev;
if (!c_can_pci_data->freq) {
dev_err(&pdev->dev, "no clock frequency defined\n");
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 8d6208c0b400..611d16a7061d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -279,25 +279,45 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
return 0;
}
+/* Checks the validity of predefined bitrate settings */
+static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < bitrate_const_cnt; i++) {
+ if (bt->bitrate == bitrate_const[i])
+ break;
+ }
+
+ if (i >= priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
{
int err;
- /* Check if the CAN device has bit-timing parameters */
- if (!btc)
- return -EOPNOTSUPP;
-
/*
* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
- if (!bt->tq && bt->bitrate)
+ if (!bt->tq && bt->bitrate && btc)
err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate)
+ else if (bt->tq && !bt->bitrate && btc)
err = can_fixup_bittiming(dev, bt, btc);
+ else if (!bt->tq && bt->bitrate && bitrate_const)
+ err = can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt);
else
err = -EINVAL;
@@ -872,8 +892,20 @@ static int can_changelink(struct net_device *dev,
/* Do not allow changing bittiming while running */
if (dev->flags & IFF_UP)
return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming)
+ return -EOPNOTSUPP;
+
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt, priv->bittiming_const);
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt);
if (err)
return err;
memcpy(&priv->bittiming, &bt, sizeof(bt));
@@ -943,9 +975,21 @@ static int can_changelink(struct net_device *dev,
/* Do not allow changing bittiming while running */
if (dev->flags & IFF_UP)
return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * data_bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ return -EOPNOTSUPP;
+
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
sizeof(dbt));
- err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
+ err = can_get_bittiming(dev, &dbt,
+ priv->data_bittiming_const,
+ priv->data_bitrate_const,
+ priv->data_bitrate_const_cnt);
if (err)
return err;
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
@@ -958,6 +1002,30 @@ static int can_changelink(struct net_device *dev,
}
}
+ if (data[IFLA_CAN_TERMINATION]) {
+ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+ const unsigned int num_term = priv->termination_const_cnt;
+ unsigned int i;
+
+ if (!priv->do_set_termination)
+ return -EOPNOTSUPP;
+
+ /* check whether given value is supported by the interface */
+ for (i = 0; i < num_term; i++) {
+ if (termval == priv->termination_const[i])
+ break;
+ }
+ if (i >= num_term)
+ return -EINVAL;
+
+ /* Finally, set the termination value */
+ err = priv->do_set_termination(dev, termval);
+ if (err)
+ return err;
+
+ priv->termination = termval;
+ }
+
return 0;
}
@@ -980,6 +1048,17 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(struct can_bittiming));
if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
+ if (priv->termination_const) {
+ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+ priv->termination_const_cnt);
+ }
+ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt);
+ if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt);
return size;
}
@@ -1018,7 +1097,28 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
(priv->data_bittiming_const &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)))
+ priv->data_bittiming_const)) ||
+
+ (priv->termination_const &&
+ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+ sizeof(*priv->termination_const) *
+ priv->termination_const_cnt,
+ priv->termination_const))) ||
+
+ (priv->bitrate_const &&
+ nla_put(skb, IFLA_CAN_BITRATE_CONST,
+ sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt,
+ priv->bitrate_const)) ||
+
+ (priv->data_bitrate_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt,
+ priv->data_bitrate_const))
+ )
+
return -EMSGSIZE;
return 0;
@@ -1073,6 +1173,22 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
*/
int register_candev(struct net_device *dev)
{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Ensure termination_const, termination_const_cnt and
+ * do_set_termination consistency. All must be either set or
+ * unset.
+ */
+ if ((!priv->termination_const != !priv->termination_const_cnt) ||
+ (!priv->termination_const != !priv->do_set_termination))
+ return -EINVAL;
+
+ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ return -EINVAL;
+
dev->rtnl_link_ops = &can_link_ops;
return register_netdev(dev);
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 16f7cadda5c3..ea57fed375c6 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -3,7 +3,8 @@
*
* Copyright (c) 2005-2006 Varma Electronics Oy
* Copyright (c) 2009 Sascha Hauer, Pengutronix
- * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix
+ * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014 David Jander, Protonic Holland
*
* Based on code originally by Andrey Volkov <avolkov@varma-el.com>
*
@@ -24,6 +25,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
+#include <linux/can/rx-offload.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -55,9 +57,10 @@
#define FLEXCAN_MCR_WAK_SRC BIT(19)
#define FLEXCAN_MCR_DOZE BIT(18)
#define FLEXCAN_MCR_SRX_DIS BIT(17)
-#define FLEXCAN_MCR_BCC BIT(16)
+#define FLEXCAN_MCR_IRMQ BIT(16)
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
#define FLEXCAN_MCR_AEN BIT(12)
+/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f)
#define FLEXCAN_MCR_IDAM_A (0x0 << 8)
#define FLEXCAN_MCR_IDAM_B (0x1 << 8)
@@ -143,17 +146,20 @@
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_BUF_RESERVED 8
-#define FLEXCAN_TX_BUF_ID 9
-#define FLEXCAN_IFLAG_BUF(x) BIT(x)
+#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
+#define FLEXCAN_TX_MB_OFF_FIFO 9
+#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
+#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63
+#define FLEXCAN_IFLAG_MB(x) BIT(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
-#define FLEXCAN_IFLAG_DEFAULT \
- (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \
- FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
/* FLEXCAN message buffers */
+#define FLEXCAN_MB_CODE_MASK (0xf << 24)
+#define FLEXCAN_MB_CODE_RX_BUSY_BIT (0x1 << 24)
#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24)
#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24)
#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24)
@@ -189,7 +195,9 @@
*/
#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_DISABLE_MECR BIT(3) /* Disble Memory error detection */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */
+#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
/* Structure of the message buffer */
struct flexcan_mb {
@@ -213,7 +221,10 @@ struct flexcan_regs {
u32 imask1; /* 0x28 */
u32 iflag2; /* 0x2c */
u32 iflag1; /* 0x30 */
- u32 ctrl2; /* 0x34 */
+ union { /* 0x34 */
+ u32 gfwr_mx28; /* MX28, MX53 */
+ u32 ctrl2; /* MX6, VF610 */
+ };
u32 esr2; /* 0x38 */
u32 imeur; /* 0x3c */
u32 lrfr; /* 0x40 */
@@ -232,7 +243,11 @@ struct flexcan_regs {
* size conf'ed via ctrl2::RFFN
* (mx6, vf610)
*/
- u32 _reserved4[408];
+ u32 _reserved4[256]; /* 0x480 */
+ u32 rximr[64]; /* 0x880 */
+ u32 _reserved5[24]; /* 0x980 */
+ u32 gfwr_mx6; /* 0x9e0 - MX6 */
+ u32 _reserved6[63]; /* 0x9e4 */
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
u32 erridpr; /* 0xae8 */
@@ -249,31 +264,36 @@ struct flexcan_devtype_data {
struct flexcan_priv {
struct can_priv can;
- struct napi_struct napi;
+ struct can_rx_offload offload;
struct flexcan_regs __iomem *regs;
- u32 reg_esr;
+ struct flexcan_mb __iomem *tx_mb;
+ struct flexcan_mb __iomem *tx_mb_reserved;
+ u8 tx_mb_idx;
u32 reg_ctrl_default;
+ u32 reg_imask1_default;
+ u32 reg_imask2_default;
struct clk *clk_ipg;
struct clk *clk_per;
- struct flexcan_platform_data *pdata;
const struct flexcan_devtype_data *devtype_data;
struct regulator *reg_xceiver;
};
-static struct flexcan_devtype_data fsl_p1010_devtype_data = {
+static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
};
-static struct flexcan_devtype_data fsl_imx28_devtype_data;
+static const struct flexcan_devtype_data fsl_imx28_devtype_data;
-static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
- .quirks = FLEXCAN_QUIRK_DISABLE_RXFG,
+static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
};
-static struct flexcan_devtype_data fsl_vf610_devtype_data = {
- .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR,
+static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -331,13 +351,6 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
return regulator_disable(priv->reg_xceiver);
}
-static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
- u32 reg_esr)
-{
- return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
- (reg_esr & FLEXCAN_ESR_ERR_BUS);
-}
-
static int flexcan_chip_enable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
@@ -468,7 +481,6 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
- struct flexcan_regs __iomem *regs = priv->regs;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 can_id;
u32 data;
@@ -491,68 +503,73 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (cf->can_dlc > 0) {
data = be32_to_cpup((__be32 *)&cf->data[0]);
- flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[0]);
+ flexcan_write(data, &priv->tx_mb->data[0]);
}
if (cf->can_dlc > 3) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
- flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[1]);
+ flexcan_write(data, &priv->tx_mb->data[1]);
}
can_put_echo_skb(skb, dev, 0);
- flexcan_write(can_id, &regs->mb[FLEXCAN_TX_BUF_ID].can_id);
- flexcan_write(ctrl, &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
+ flexcan_write(can_id, &priv->tx_mb->can_id);
+ flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
/* Errata ERR005829 step8:
* Write twice INACTIVE(0x8) code to first MB.
*/
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+ &priv->tx_mb_reserved->can_ctrl);
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+ &priv->tx_mb_reserved->can_ctrl);
return NETDEV_TX_OK;
}
-static void do_bus_err(struct net_device *dev,
- struct can_frame *cf, u32 reg_esr)
+static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
{
struct flexcan_priv *priv = netdev_priv(dev);
- int rx_errors = 0, tx_errors = 0;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ bool rx_errors = false, tx_errors = false;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
netdev_dbg(dev, "BIT1_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_BIT1;
- tx_errors = 1;
+ tx_errors = true;
}
if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
netdev_dbg(dev, "BIT0_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_BIT0;
- tx_errors = 1;
+ tx_errors = true;
}
if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
netdev_dbg(dev, "ACK_ERR irq\n");
cf->can_id |= CAN_ERR_ACK;
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
- tx_errors = 1;
+ tx_errors = true;
}
if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
netdev_dbg(dev, "CRC_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_BIT;
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- rx_errors = 1;
+ rx_errors = true;
}
if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
netdev_dbg(dev, "FRM_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_FORM;
- rx_errors = 1;
+ rx_errors = true;
}
if (reg_esr & FLEXCAN_ESR_STF_ERR) {
netdev_dbg(dev, "STF_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_STUFF;
- rx_errors = 1;
+ rx_errors = true;
}
priv->can.can_stats.bus_error++;
@@ -560,32 +577,16 @@ static void do_bus_err(struct net_device *dev,
dev->stats.rx_errors++;
if (tx_errors)
dev->stats.tx_errors++;
-}
-
-static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
-{
- struct sk_buff *skb;
- struct can_frame *cf;
-
- skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
-
- do_bus_err(dev, cf, reg_esr);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->can_dlc;
- netif_receive_skb(skb);
- return 1;
+ can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
}
-static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
+static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
- enum can_state new_state = 0, rx_state = 0, tx_state = 0;
+ enum can_state new_state, rx_state, tx_state;
int flt;
struct can_berr_counter bec;
@@ -606,33 +607,63 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
/* state hasn't changed */
if (likely(new_state == priv->can.state))
- return 0;
+ return;
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
- return 0;
+ return;
can_change_state(dev, cf, tx_state, rx_state);
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->can_dlc;
- netif_receive_skb(skb);
+ can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+}
- return 1;
+static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct flexcan_priv, offload);
}
-static void flexcan_read_fifo(const struct net_device *dev,
- struct can_frame *cf)
+static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
+ struct can_frame *cf,
+ u32 *timestamp, unsigned int n)
{
- const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_priv *priv = rx_offload_to_priv(offload);
struct flexcan_regs __iomem *regs = priv->regs;
- struct flexcan_mb __iomem *mb = &regs->mb[0];
- u32 reg_ctrl, reg_id;
+ struct flexcan_mb __iomem *mb = &regs->mb[n];
+ u32 reg_ctrl, reg_id, reg_iflag1;
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ u32 code;
+
+ do {
+ reg_ctrl = flexcan_read(&mb->can_ctrl);
+ } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
+
+ /* is this MB empty? */
+ code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
+ if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
+ (code != FLEXCAN_MB_CODE_RX_OVERRUN))
+ return 0;
+
+ if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
+ /* This MB was overrun, we lost data */
+ offload->dev->stats.rx_over_errors++;
+ offload->dev->stats.rx_errors++;
+ }
+ } else {
+ reg_iflag1 = flexcan_read(&regs->iflag1);
+ if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
+ return 0;
+
+ reg_ctrl = flexcan_read(&mb->can_ctrl);
+ }
+
+ /* increase timstamp to full 32 bit */
+ *timestamp = reg_ctrl << 16;
- reg_ctrl = flexcan_read(&mb->can_ctrl);
reg_id = flexcan_read(&mb->can_id);
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -647,69 +678,31 @@ static void flexcan_read_fifo(const struct net_device *dev,
*(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
/* mark as read */
- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- flexcan_read(&regs->timer);
-}
-
-static int flexcan_read_frame(struct net_device *dev)
-{
- struct net_device_stats *stats = &dev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
-
- skb = alloc_can_skb(dev, &cf);
- if (unlikely(!skb)) {
- stats->rx_dropped++;
- return 0;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ /* Clear IRQ */
+ if (n < 32)
+ flexcan_write(BIT(n), &regs->iflag1);
+ else
+ flexcan_write(BIT(n - 32), &regs->iflag2);
+ } else {
+ flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+ flexcan_read(&regs->timer);
}
- flexcan_read_fifo(dev, cf);
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_receive_skb(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return 1;
}
-static int flexcan_poll(struct napi_struct *napi, int quota)
+
+static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
{
- struct net_device *dev = napi->dev;
- const struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
- u32 reg_iflag1, reg_esr;
- int work_done = 0;
-
- /* The error bits are cleared on read,
- * use saved value from irq handler.
- */
- reg_esr = flexcan_read(&regs->esr) | priv->reg_esr;
-
- /* handle state changes */
- work_done += flexcan_poll_state(dev, reg_esr);
+ u32 iflag1, iflag2;
- /* handle RX-FIFO */
- reg_iflag1 = flexcan_read(&regs->iflag1);
- while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
- work_done < quota) {
- work_done += flexcan_read_frame(dev);
- reg_iflag1 = flexcan_read(&regs->iflag1);
- }
-
- /* report bus errors */
- if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota)
- work_done += flexcan_poll_bus_err(dev, reg_esr);
+ iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
+ iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+ ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
- if (work_done < quota) {
- napi_complete(napi);
- /* enable IRQs */
- flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
- flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
- }
-
- return work_done;
+ return (u64)iflag2 << 32 | iflag1;
}
static irqreturn_t flexcan_irq(int irq, void *dev_id)
@@ -718,55 +711,70 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct net_device_stats *stats = &dev->stats;
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
+ irqreturn_t handled = IRQ_NONE;
u32 reg_iflag1, reg_esr;
reg_iflag1 = flexcan_read(&regs->iflag1);
- reg_esr = flexcan_read(&regs->esr);
- /* ACK all bus error and state change IRQ sources */
- if (reg_esr & FLEXCAN_ESR_ALL_INT)
- flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
-
- /* schedule NAPI in case of:
- * - rx IRQ
- * - state change IRQ
- * - bus error IRQ and bus error reporting is activated
- */
- if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) ||
- (reg_esr & FLEXCAN_ESR_ERR_STATE) ||
- flexcan_has_and_handle_berr(priv, reg_esr)) {
- /* The error bits are cleared on read,
- * save them for later use.
- */
- priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
- flexcan_write(FLEXCAN_IFLAG_DEFAULT &
- ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->imask1);
- flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
- &regs->ctrl);
- napi_schedule(&priv->napi);
- }
+ /* reception interrupt */
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ u64 reg_iflag;
+ int ret;
+
+ while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) {
+ handled = IRQ_HANDLED;
+ ret = can_rx_offload_irq_offload_timestamp(&priv->offload,
+ reg_iflag);
+ if (!ret)
+ break;
+ }
+ } else {
+ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
+ handled = IRQ_HANDLED;
+ can_rx_offload_irq_offload_fifo(&priv->offload);
+ }
- /* FIFO overflow */
- if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
+ /* FIFO overflow interrupt */
+ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
+ handled = IRQ_HANDLED;
+ flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ }
}
/* transmission complete interrupt */
- if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
+ if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+ handled = IRQ_HANDLED;
stats->tx_bytes += can_get_echo_skb(dev, 0);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
- flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
+ &priv->tx_mb->can_ctrl);
+ flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
netif_wake_queue(dev);
}
- return IRQ_HANDLED;
+ reg_esr = flexcan_read(&regs->esr);
+
+ /* ACK all bus error and state change IRQ sources */
+ if (reg_esr & FLEXCAN_ESR_ALL_INT) {
+ handled = IRQ_HANDLED;
+ flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ }
+
+ /* state change interrupt */
+ if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+ flexcan_irq_state(dev, reg_esr);
+
+ /* bus error IRQ - handle if bus error reporting is activated */
+ if ((reg_esr & FLEXCAN_ESR_ERR_BUS) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ flexcan_irq_bus_err(dev, reg_esr);
+
+ return handled;
}
static void flexcan_set_bittiming(struct net_device *dev)
@@ -839,14 +847,23 @@ static int flexcan_chip_start(struct net_device *dev)
* only supervisor access
* enable warning int
* disable local echo
+ * enable individual RX masking
* choose format C
* set max mailbox number
*/
reg_mcr = flexcan_read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
- reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
- FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS |
- FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
+ reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
+ FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
+ FLEXCAN_MCR_IDAM_C;
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ reg_mcr &= ~FLEXCAN_MCR_FEN;
+ reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last);
+ } else {
+ reg_mcr |= FLEXCAN_MCR_FEN |
+ FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
+ }
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
flexcan_write(reg_mcr, &regs->mcr);
@@ -883,19 +900,31 @@ static int flexcan_chip_start(struct net_device *dev)
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
flexcan_write(reg_ctrl, &regs->ctrl);
+ if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
+ reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
+ flexcan_write(reg_ctrl2, &regs->ctrl2);
+ }
+
/* clear and invalidate all mailboxes first */
- for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->mb); i++) {
+ for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
&regs->mb[i].can_ctrl);
}
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
+ flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
+ &regs->mb[i].can_ctrl);
+ }
+
/* Errata ERR005829: mark first TX mailbox as INACTIVE */
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+ &priv->tx_mb_reserved->can_ctrl);
/* mark TX mailbox as INACTIVE */
flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
+ &priv->tx_mb->can_ctrl);
/* acceptance mask/acceptance code (accept everything) */
flexcan_write(0x0, &regs->rxgmask);
@@ -905,6 +934,10 @@ static int flexcan_chip_start(struct net_device *dev)
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
flexcan_write(0x0, &regs->rxfgmask);
+ /* clear acceptance filters */
+ for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
+ flexcan_write(0, &regs->rximr[i]);
+
/* On Vybrid, disable memory error detection interrupts
* and freeze mode.
* This also works around errata e5295 which generates
@@ -942,7 +975,8 @@ static int flexcan_chip_start(struct net_device *dev)
/* enable interrupts atomically */
disable_irq(dev->irq);
flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
- flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+ flexcan_write(priv->reg_imask1_default, &regs->imask1);
+ flexcan_write(priv->reg_imask2_default, &regs->imask2);
enable_irq(dev->irq);
/* print chip status */
@@ -972,6 +1006,7 @@ static void flexcan_chip_stop(struct net_device *dev)
flexcan_chip_disable(priv);
/* Disable all interrupts */
+ flexcan_write(0, &regs->imask2);
flexcan_write(0, &regs->imask1);
flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
&regs->ctrl);
@@ -1008,7 +1043,7 @@ static int flexcan_open(struct net_device *dev)
can_led_event(dev, CAN_LED_EVENT_OPEN);
- napi_enable(&priv->napi);
+ can_rx_offload_enable(&priv->offload);
netif_start_queue(dev);
return 0;
@@ -1030,7 +1065,7 @@ static int flexcan_close(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
- napi_disable(&priv->napi);
+ can_rx_offload_disable(&priv->offload);
flexcan_chip_stop(dev);
free_irq(dev->irq, dev);
@@ -1104,8 +1139,9 @@ static int register_flexcandev(struct net_device *dev)
flexcan_write(reg, &regs->mcr);
/* Currently we only support newer versions of this core
- * featuring a RX FIFO. Older cores found on some Coldfire
- * derivates are not yet supported.
+ * featuring a RX hardware FIFO (although this driver doesn't
+ * make use of it on some cores). Older cores, found on some
+ * Coldfire derivates are not tested.
*/
reg = flexcan_read(&regs->mcr);
if (!(reg & FLEXCAN_MCR_FEN)) {
@@ -1208,6 +1244,9 @@ static int flexcan_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -1223,14 +1262,41 @@ static int flexcan_probe(struct platform_device *pdev)
priv->regs = regs;
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
- priv->pdata = dev_get_platdata(&pdev->dev);
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
- netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
+ priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
+ } else {
+ priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
+ priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
+ }
+ priv->tx_mb = &regs->mb[priv->tx_mb_idx];
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
+ priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+ priv->reg_imask2_default = 0;
+
+ priv->offload.mailbox_read = flexcan_mailbox_read;
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ u64 imask;
+
+ priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+ priv->offload.mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST;
+
+ imask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first);
+ priv->reg_imask1_default |= imask;
+ priv->reg_imask2_default |= imask >> 32;
+
+ err = can_rx_offload_add_timestamp(dev, &priv->offload);
+ } else {
+ priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+ FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
+ err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT);
+ }
+ if (err)
+ goto failed_offload;
err = register_flexcandev(dev);
if (err) {
@@ -1245,6 +1311,7 @@ static int flexcan_probe(struct platform_device *pdev)
return 0;
+ failed_offload:
failed_register:
free_candev(dev);
return err;
@@ -1256,7 +1323,7 @@ static int flexcan_remove(struct platform_device *pdev)
struct flexcan_priv *priv = netdev_priv(dev);
unregister_flexcandev(dev);
- netif_napi_del(&priv->napi);
+ can_rx_offload_del(&priv->offload);
free_candev(dev);
return 0;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 368bb0710d8f..138f5ae75c0b 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ifi_canfd_irq_enable(ndev, 1);
}
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index f13bb8d9bb84..2ba1a81500c1 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* We have processed all packets that the adapter had, but it
* was less than our budget, stop polling */
if (received < budget)
- napi_complete(napi);
+ napi_complete_done(napi, received);
spin_lock_irqsave(&mod->lock, flags);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 195f15edb32e..7a6554efd42b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct *napi, int quota)
work_done += m_can_do_rx_poll(dev, (quota - work_done));
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
m_can_enable_all_interrupts(priv);
}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 788459f6bf5c..caed4e6960f8 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
}
/* All packets processed */
if (num_pkts < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, num_pkts);
priv->ier |= RCAR_CAN_IER_RXFIE;
writeb(priv->ier, &priv->regs->ier);
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 43cdd5544b0c..4ef07d97156d 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* All packets processed */
if (num_pkts < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, num_pkts);
/* Enable Rx FIFO interrupts */
rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
RCANFD_RFCC_RFIE);
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
new file mode 100644
index 000000000000..f394f77d7528
--- /dev/null
+++ b/drivers/net/can/rx-offload.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+
+struct can_rx_offload_cb {
+ u32 timestamp;
+};
+
+static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
+
+ return (struct can_rx_offload_cb *)skb->cb;
+}
+
+static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
+{
+ if (offload->inc)
+ return a <= b;
+ else
+ return a >= b;
+}
+
+static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+{
+ if (offload->inc)
+ return (*val)++;
+ else
+ return (*val)--;
+}
+
+static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
+{
+ struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ int work_done = 0;
+
+ while ((work_done < quota) &&
+ (skb = skb_dequeue(&offload->skb_queue))) {
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ work_done++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+ }
+
+ if (work_done < quota) {
+ napi_complete_done(napi, work_done);
+
+ /* Check if there was another interrupt */
+ if (!skb_queue_empty(&offload->skb_queue))
+ napi_reschedule(&offload->napi);
+ }
+
+ can_led_event(offload->dev, CAN_LED_EVENT_RX);
+
+ return work_done;
+}
+
+static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+ int (*compare)(struct sk_buff *a, struct sk_buff *b))
+{
+ struct sk_buff *pos, *insert = (struct sk_buff *)head;
+
+ skb_queue_reverse_walk(head, pos) {
+ const struct can_rx_offload_cb *cb_pos, *cb_new;
+
+ cb_pos = can_rx_offload_get_cb(pos);
+ cb_new = can_rx_offload_get_cb(new);
+
+ netdev_dbg(new->dev,
+ "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
+ __func__,
+ cb_pos->timestamp, cb_new->timestamp,
+ cb_new->timestamp - cb_pos->timestamp,
+ skb_queue_len(head));
+
+ if (compare(pos, new) < 0)
+ continue;
+ insert = pos;
+ break;
+ }
+
+ __skb_queue_after(head, insert, new);
+}
+
+static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
+{
+ const struct can_rx_offload_cb *cb_a, *cb_b;
+
+ cb_a = can_rx_offload_get_cb(a);
+ cb_b = can_rx_offload_get_cb(b);
+
+ /* Substract two u32 and return result as int, to keep
+ * difference steady around the u32 overflow.
+ */
+ return cb_b->timestamp - cb_a->timestamp;
+}
+
+static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+{
+ struct sk_buff *skb = NULL;
+ struct can_rx_offload_cb *cb;
+ struct can_frame *cf;
+ int ret;
+
+ /* If queue is full or skb not available, read to discard mailbox */
+ if (likely(skb_queue_len(&offload->skb_queue) <=
+ offload->skb_queue_len_max))
+ skb = alloc_can_skb(offload->dev, &cf);
+
+ if (!skb) {
+ struct can_frame cf_overflow;
+ u32 timestamp;
+
+ ret = offload->mailbox_read(offload, &cf_overflow,
+ &timestamp, n);
+ if (ret)
+ offload->dev->stats.rx_dropped++;
+
+ return NULL;
+ }
+
+ cb = can_rx_offload_get_cb(skb);
+ ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
+ if (!ret) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ return skb;
+}
+
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
+{
+ struct sk_buff_head skb_queue;
+ unsigned int i;
+
+ __skb_queue_head_init(&skb_queue);
+
+ for (i = offload->mb_first;
+ can_rx_offload_le(offload, i, offload->mb_last);
+ can_rx_offload_inc(offload, &i)) {
+ struct sk_buff *skb;
+
+ if (!(pending & BIT_ULL(i)))
+ continue;
+
+ skb = can_rx_offload_offload_one(offload, i);
+ if (!skb)
+ break;
+
+ __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
+ }
+
+ if (!skb_queue_empty(&skb_queue)) {
+ unsigned long flags;
+ u32 queue_len;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ if ((queue_len = skb_queue_len(&offload->skb_queue)) >
+ (offload->skb_queue_len_max / 8))
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ can_rx_offload_schedule(offload);
+ }
+
+ return skb_queue_len(&skb_queue);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
+
+int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
+{
+ struct sk_buff *skb;
+ int received = 0;
+
+ while ((skb = can_rx_offload_offload_one(offload, 0))) {
+ skb_queue_tail(&offload->skb_queue, skb);
+ received++;
+ }
+
+ if (received)
+ can_rx_offload_schedule(offload);
+
+ return received;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+
+int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
+{
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max)
+ return -ENOMEM;
+
+ skb_queue_tail(&offload->skb_queue, skb);
+ can_rx_offload_schedule(offload);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
+
+static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+{
+ offload->dev = dev;
+
+ /* Limit queue len to 4x the weight (rounted to next power of two) */
+ offload->skb_queue_len_max = 2 << fls(weight);
+ offload->skb_queue_len_max *= 4;
+ skb_queue_head_init(&offload->skb_queue);
+
+ can_rx_offload_reset(offload);
+ netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+
+ dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
+ __func__, offload->skb_queue_len_max);
+
+ return 0;
+}
+
+int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
+{
+ unsigned int weight;
+
+ if (offload->mb_first > BITS_PER_LONG_LONG ||
+ offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
+ return -EINVAL;
+
+ if (offload->mb_first < offload->mb_last) {
+ offload->inc = true;
+ weight = offload->mb_last - offload->mb_first;
+ } else {
+ offload->inc = false;
+ weight = offload->mb_first - offload->mb_last;
+ }
+
+ return can_rx_offload_init_queue(dev, offload, weight);;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
+
+int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+{
+ if (!offload->mailbox_read)
+ return -EINVAL;
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+
+void can_rx_offload_enable(struct can_rx_offload *offload)
+{
+ can_rx_offload_reset(offload);
+ napi_enable(&offload->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_enable);
+
+void can_rx_offload_del(struct can_rx_offload *offload)
+{
+ netif_napi_del(&offload->napi);
+ skb_queue_purge(&offload->skb_queue);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_del);
+
+void can_rx_offload_reset(struct can_rx_offload *offload)
+{
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_reset);
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index cdc0c7433a4b..4d4492884e0b 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -310,7 +310,7 @@ pcmcia_bad:
pcmcia_failed:
pcmcia_disable_device(pcmcia);
pcmcia->priv = NULL;
- return ret ?: -ENODEV;
+ return ret;
}
static const struct pcmcia_device_id softingcs_ids[] = {
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 680d1ff07a55..6749b1829469 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
HECC_DEF_NAPI_WEIGHT);
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+ goto probe_exit_clk;
+ }
+
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
struct ti_hecc_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(dev);
+ int err;
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index c71a03593595..89aec07c225f 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
can_led_event(ndev, CAN_LED_EVENT_RX);
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 8346e4f9737a..a3c941632217 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
+obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
+bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-y += b53/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 947adda3397d..8cf4801994e8 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -712,7 +712,7 @@ static unsigned int b53_get_mib_size(struct b53_device *dev)
return B53_MIBS_SIZE;
}
-static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
{
struct b53_device *dev = ds->priv;
const struct b53_mib_desc *mibs = b53_get_mib(dev);
@@ -723,9 +723,9 @@ static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
memcpy(data + i * ETH_GSTRING_LEN,
mibs[i].name, ETH_GSTRING_LEN);
}
+EXPORT_SYMBOL(b53_get_strings);
-static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
+void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
struct b53_device *dev = ds->priv;
const struct b53_mib_desc *mibs = b53_get_mib(dev);
@@ -756,13 +756,15 @@ static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
mutex_unlock(&dev->stats_mutex);
}
+EXPORT_SYMBOL(b53_get_ethtool_stats);
-static int b53_get_sset_count(struct dsa_switch *ds)
+int b53_get_sset_count(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
return b53_get_mib_size(dev);
}
+EXPORT_SYMBOL(b53_get_sset_count);
static int b53_setup(struct dsa_switch *ds)
{
@@ -921,15 +923,15 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
}
}
-static int b53_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
return 0;
}
+EXPORT_SYMBOL(b53_vlan_filtering);
-static int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
struct b53_device *dev = ds->priv;
@@ -943,10 +945,11 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
+EXPORT_SYMBOL(b53_vlan_prepare);
-static void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+void b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -977,9 +980,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
b53_fast_age_vlan(dev, vid);
}
}
+EXPORT_SYMBOL(b53_vlan_add);
-static int b53_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int b53_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1015,10 +1019,11 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
+EXPORT_SYMBOL(b53_vlan_del);
-static int b53_vlan_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+int b53_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
{
struct b53_device *dev = ds->priv;
u16 vid, vid_start = 0, pvid;
@@ -1057,6 +1062,7 @@ static int b53_vlan_dump(struct dsa_switch *ds, int port,
return err;
}
+EXPORT_SYMBOL(b53_vlan_dump);
/* Address Resolution Logic routines */
static int b53_arl_op_wait(struct b53_device *dev)
@@ -1137,7 +1143,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
int ret;
/* Convert the array into a 64-bit MAC */
- mac = b53_mac_to_u64(addr);
+ mac = ether_addr_to_u64(addr);
/* Perform a read for the given MAC and VID */
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
@@ -1175,9 +1181,9 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
return b53_arl_rw_op(dev, 0);
}
-static int b53_fdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+int b53_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
struct b53_device *priv = ds->priv;
@@ -1189,24 +1195,27 @@ static int b53_fdb_prepare(struct dsa_switch *ds, int port,
return 0;
}
+EXPORT_SYMBOL(b53_fdb_prepare);
-static void b53_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+void b53_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
struct b53_device *priv = ds->priv;
if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
pr_err("%s: failed to add MAC address\n", __func__);
}
+EXPORT_SYMBOL(b53_fdb_add);
-static int b53_fdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb)
+int b53_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
{
struct b53_device *priv = ds->priv;
return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
}
+EXPORT_SYMBOL(b53_fdb_del);
static int b53_arl_search_wait(struct b53_device *dev)
{
@@ -1258,9 +1267,9 @@ static int b53_fdb_copy(struct net_device *dev, int port,
return cb(&fdb->obj);
}
-static int b53_fdb_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+int b53_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
{
struct b53_device *priv = ds->priv;
struct net_device *dev = ds->ports[port].netdev;
@@ -1297,9 +1306,9 @@ static int b53_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+EXPORT_SYMBOL(b53_fdb_dump);
-static int b53_br_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = ds->dst->cpu_port;
@@ -1317,11 +1326,10 @@ static int b53_br_join(struct dsa_switch *ds, int port,
b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
}
- dev->ports[port].bridge_dev = bridge;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
- if (dev->ports[i].bridge_dev != bridge)
+ if (ds->ports[i].bridge_dev != br)
continue;
/* Add this local port to the remote port VLAN control
@@ -1343,11 +1351,11 @@ static int b53_br_join(struct dsa_switch *ds, int port,
return 0;
}
+EXPORT_SYMBOL(b53_br_join);
-static void b53_br_leave(struct dsa_switch *ds, int port)
+void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- struct net_device *bridge = dev->ports[port].bridge_dev;
struct b53_vlan *vl = &dev->vlans[0];
s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
@@ -1357,7 +1365,7 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
b53_for_each_port(dev, i) {
/* Don't touch the remaining ports */
- if (dev->ports[i].bridge_dev != bridge)
+ if (ds->ports[i].bridge_dev != br)
continue;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
@@ -1372,7 +1380,6 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
- dev->ports[port].bridge_dev = NULL;
if (is5325(dev) || is5365(dev))
pvid = 1;
@@ -1393,8 +1400,9 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
b53_set_vlan_entry(dev, pvid, vl);
}
}
+EXPORT_SYMBOL(b53_br_leave);
-static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
+void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
{
struct b53_device *dev = ds->priv;
u8 hw_state;
@@ -1426,21 +1434,88 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
reg |= hw_state;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
}
+EXPORT_SYMBOL(b53_br_set_stp_state);
-static void b53_br_fast_age(struct dsa_switch *ds, int port)
+void b53_br_fast_age(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
if (b53_fast_age_port(dev, port))
dev_err(ds->dev, "fast ageing failed\n");
}
+EXPORT_SYMBOL(b53_br_fast_age);
static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
{
return DSA_TAG_PROTO_NONE;
}
-static struct dsa_switch_ops b53_switch_ops = {
+int b53_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg, loc;
+
+ if (ingress)
+ loc = B53_IG_MIR_CTL;
+ else
+ loc = B53_EG_MIR_CTL;
+
+ b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+ reg &= ~MIRROR_MASK;
+ reg |= BIT(port);
+ b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+ b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+ reg &= ~CAP_PORT_MASK;
+ reg |= mirror->to_local_port;
+ reg |= MIRROR_EN;
+ b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_mirror_add);
+
+void b53_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct b53_device *dev = ds->priv;
+ bool loc_disable = false, other_loc_disable = false;
+ u16 reg, loc;
+
+ if (mirror->ingress)
+ loc = B53_IG_MIR_CTL;
+ else
+ loc = B53_EG_MIR_CTL;
+
+ /* Update the desired ingress/egress register */
+ b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+ reg &= ~BIT(port);
+ if (!(reg & MIRROR_MASK))
+ loc_disable = true;
+ b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+ /* Now look at the other one to know if we can disable mirroring
+ * entirely
+ */
+ if (mirror->ingress)
+ b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
+ else
+ b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
+ if (!(reg & MIRROR_MASK))
+ other_loc_disable = true;
+
+ b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+ /* Both no longer have ports, let's disable mirroring */
+ if (loc_disable && other_loc_disable) {
+ reg &= ~MIRROR_EN;
+ reg &= ~mirror->to_local_port;
+ }
+ b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+}
+EXPORT_SYMBOL(b53_mirror_del);
+
+static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
.get_strings = b53_get_strings,
@@ -1464,6 +1539,8 @@ static struct dsa_switch_ops b53_switch_ops = {
.port_fdb_dump = b53_fdb_dump,
.port_fdb_add = b53_fdb_add,
.port_fdb_del = b53_fdb_del,
+ .port_mirror_add = b53_mirror_add,
+ .port_mirror_del = b53_mirror_del,
};
struct b53_chip_data {
@@ -1672,6 +1749,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
+ {
+ .chip_id = BCM7278_DEVICE_ID,
+ .dev_name = "BCM7278",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_entries= 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
};
static int b53_switch_init(struct b53_device *dev)
@@ -1765,14 +1854,15 @@ struct b53_device *b53_switch_alloc(struct device *base,
struct dsa_switch *ds;
struct b53_device *dev;
- ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
+ ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
if (!ds)
return NULL;
- dev = (struct b53_device *)(ds + 1);
+ dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
ds->priv = dev;
- ds->dev = base;
dev->dev = base;
dev->ds = ds;
@@ -1869,7 +1959,7 @@ int b53_switch_register(struct b53_device *dev)
pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
- return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
+ return dsa_register_switch(dev->ds, dev->ds->dev);
}
EXPORT_SYMBOL(b53_switch_register);
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 477a16b5660a..fa7556f5d4fb 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -375,18 +375,7 @@ static struct mdio_driver b53_mdio_driver = {
.of_match_table = b53_of_match,
},
};
-
-static int __init b53_mdio_driver_register(void)
-{
- return mdio_driver_register(&b53_mdio_driver);
-}
-module_init(b53_mdio_driver_register);
-
-static void __exit b53_mdio_driver_unregister(void)
-{
- mdio_driver_unregister(&b53_mdio_driver);
-}
-module_exit(b53_mdio_driver_unregister);
+mdio_module_driver(b53_mdio_driver);
MODULE_DESCRIPTION("B53 MDIO access driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index f192a673caba..a9dc90a01438 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/phy.h>
+#include <linux/etherdevice.h>
#include <net/dsa.h>
#include "b53_regs.h"
@@ -61,6 +62,7 @@ enum {
BCM53019_DEVICE_ID = 0x53019,
BCM58XX_DEVICE_ID = 0x5800,
BCM7445_DEVICE_ID = 0x7445,
+ BCM7278_DEVICE_ID = 0x7278,
};
#define B53_N_PORTS 9
@@ -68,7 +70,6 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
- struct net_device *bridge_dev;
};
struct b53_vlan {
@@ -178,7 +179,8 @@ static inline int is5301x(struct b53_device *dev)
static inline int is58xx(struct b53_device *dev)
{
return dev->chip_id == BCM58XX_DEVICE_ID ||
- dev->chip_id == BCM7445_DEVICE_ID;
+ dev->chip_id == BCM7445_DEVICE_ID ||
+ dev->chip_id == BCM7278_DEVICE_ID;
}
#define B53_CPU_PORT_25 5
@@ -325,25 +327,6 @@ struct b53_arl_entry {
u8 is_static:1;
};
-static inline void b53_mac_from_u64(u64 src, u8 *dst)
-{
- unsigned int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
-}
-
-static inline u64 b53_mac_to_u64(const u8 *src)
-{
- unsigned int i;
- u64 dst = 0;
-
- for (i = 0; i < ETH_ALEN; i++)
- dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
-
- return dst;
-}
-
static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
u64 mac_vid, u32 fwd_entry)
{
@@ -352,14 +335,14 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
ent->is_age = !!(fwd_entry & ARLTBL_AGE);
ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
- b53_mac_from_u64(mac_vid, ent->mac);
+ u64_to_ether_addr(mac_vid, ent->mac);
ent->vid = mac_vid >> ARLTBL_VID_S;
}
static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
const struct b53_arl_entry *ent)
{
- *mac_vid = b53_mac_to_u64(ent->mac);
+ *mac_vid = ether_addr_to_u64(ent->mac);
*mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
*fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
if (ent->is_valid)
@@ -392,4 +375,41 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
return -ENOENT;
}
#endif
+
+/* Exported functions towards other drivers */
+void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
+void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_get_sset_count(struct dsa_switch *ds);
+int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
+void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
+void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
+void b53_br_fast_age(struct dsa_switch *ds, int port);
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
+int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans);
+void b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans);
+int b53_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int b53_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj));
+int b53_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans);
+void b53_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans);
+int b53_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb);
+int b53_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj));
+int b53_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+void b53_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+
#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index dac0af4e2cd0..9fd24c418fa4 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -206,6 +206,38 @@
#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
+/* Mirror capture control register (16 bit) */
+#define B53_MIR_CAP_CTL 0x10
+#define CAP_PORT_MASK 0xf
+#define BLK_NOT_MIR BIT(14)
+#define MIRROR_EN BIT(15)
+
+/* Ingress mirror control register (16 bit) */
+#define B53_IG_MIR_CTL 0x12
+#define MIRROR_MASK 0x1ff
+#define DIV_EN BIT(13)
+#define MIRROR_FILTER_MASK 0x3
+#define MIRROR_FILTER_SHIFT 14
+#define MIRROR_ALL 0
+#define MIRROR_DA 1
+#define MIRROR_SA 2
+
+/* Ingress mirror divider register (16 bit) */
+#define B53_IG_MIR_DIV 0x14
+#define IN_MIRROR_DIV_MASK 0x3ff
+
+/* Ingress mirror MAC address register (48 bit) */
+#define B53_IG_MIR_MAC 0x16
+
+/* Egress mirror control register (16 bit) */
+#define B53_EG_MIR_CTL 0x1C
+
+/* Egress mirror divider register (16 bit) */
+#define B53_EG_MIR_DIV 0x1E
+
+/* Egress mirror MAC address register (48 bit) */
+#define B53_EG_MIR_MAC 0x20
+
/* Device ID register (8 or 32 bit) */
#define B53_DEVICE_ID 0x30
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2ce7ae97ac91..2be963252ca5 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -61,30 +61,10 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
}
}
-static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, val;
- /* Enable the port memories */
- reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
- reg &= ~P_TXQ_PSM_VDD(port);
- core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
-
- /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
- reg = core_readl(priv, CORE_IMP_CTL);
- reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
- reg &= ~(RX_DIS | TX_DIS);
- core_writel(priv, reg, CORE_IMP_CTL);
-
- /* Enable forwarding */
- core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
-
- /* Enable IMP port in dumb mode */
- reg = core_readl(priv, CORE_SWITCH_CTRL);
- reg |= MII_DUMB_FWDG_EN;
- core_writel(priv, reg, CORE_SWITCH_CTRL);
-
/* Resolve which bit controls the Broadcom tag */
switch (port) {
case 8:
@@ -119,11 +99,43 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
reg &= ~(1 << port);
core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
+}
+
+static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg, offset;
+
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_IMP;
+ else
+ offset = CORE_STS_OVERRIDE_IMP2;
+
+ /* Enable the port memories */
+ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
+ reg &= ~P_TXQ_PSM_VDD(port);
+ core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+ reg = core_readl(priv, CORE_IMP_CTL);
+ reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_IMP_CTL);
+
+ /* Enable forwarding */
+ core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
+
+ /* Enable IMP port in dumb mode */
+ reg = core_readl(priv, CORE_SWITCH_CTRL);
+ reg |= MII_DUMB_FWDG_EN;
+ core_writel(priv, reg, CORE_SWITCH_CTRL);
+
+ bcm_sf2_brcm_hdr_setup(priv, port);
/* Force link status for IMP port */
- reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
+ reg = core_readl(priv, offset);
reg |= (MII_SW_OR | LINK_STS);
- core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
+ core_writel(priv, reg, offset);
}
static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
@@ -217,6 +229,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->dst[ds->index].cpu_port;
+ unsigned int i;
u32 reg;
/* Clear the memory power down */
@@ -224,6 +237,18 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+ /* Enable Broadcom tags for that port if requested */
+ if (priv->brcm_tag_mask & BIT(port))
+ bcm_sf2_brcm_hdr_setup(priv, port);
+
+ /* Configure Traffic Class to QoS mapping, allow each priority to map
+ * to a different queue number
+ */
+ reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
+ for (i = 0; i < 8; i++)
+ reg |= i << (PRT_TO_QID_SHIFT * i);
+ core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
+
/* Clear the Rx and Tx disable bits and set to no spanning tree */
core_writel(priv, 0, CORE_G_PCTL_PORT(port));
@@ -503,6 +528,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
if (mode == PHY_INTERFACE_MODE_MOCA)
priv->moca_port = port_num;
+
+ if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
+ priv->brcm_tag_mask |= 1 << port_num;
}
}
@@ -591,7 +619,12 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct ethtool_eee *p = &priv->port_sts[port].eee;
u32 id_mode_dis = 0, port_mode;
const char *str = NULL;
- u32 reg;
+ u32 reg, offset;
+
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ else
+ offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
@@ -662,7 +695,7 @@ force_link:
if (phydev->duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
- core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ core_writel(priv, reg, offset);
if (!phydev->is_pseudo_fixed_link)
p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
@@ -672,9 +705,14 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- u32 duplex, pause;
+ u32 duplex, pause, offset;
u32 reg;
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ else
+ offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+
duplex = core_readl(priv, CORE_DUPSTS);
pause = core_readl(priv, CORE_PAUSESTS);
@@ -703,13 +741,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
status->duplex = !!(duplex & (1 << port));
}
- reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ reg = core_readl(priv, offset);
reg |= SW_OVERRIDE;
if (status->link)
reg |= LINK_STS;
else
reg &= ~LINK_STS;
- core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ core_writel(priv, reg, offset);
if ((pause & (1 << port)) &&
(pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
@@ -977,10 +1015,110 @@ static struct b53_io_ops bcm_sf2_io_ops = {
.write64 = bcm_sf2_core_write64,
};
+static const struct dsa_switch_ops bcm_sf2_ops = {
+ .get_tag_protocol = bcm_sf2_sw_get_tag_protocol,
+ .setup = bcm_sf2_sw_setup,
+ .get_strings = b53_get_strings,
+ .get_ethtool_stats = b53_get_ethtool_stats,
+ .get_sset_count = b53_get_sset_count,
+ .get_phy_flags = bcm_sf2_sw_get_phy_flags,
+ .adjust_link = bcm_sf2_sw_adjust_link,
+ .fixed_link_update = bcm_sf2_sw_fixed_link_update,
+ .suspend = bcm_sf2_sw_suspend,
+ .resume = bcm_sf2_sw_resume,
+ .get_wol = bcm_sf2_sw_get_wol,
+ .set_wol = bcm_sf2_sw_set_wol,
+ .port_enable = bcm_sf2_port_setup,
+ .port_disable = bcm_sf2_port_disable,
+ .get_eee = bcm_sf2_sw_get_eee,
+ .set_eee = bcm_sf2_sw_set_eee,
+ .port_bridge_join = b53_br_join,
+ .port_bridge_leave = b53_br_leave,
+ .port_stp_state_set = b53_br_set_stp_state,
+ .port_fast_age = b53_br_fast_age,
+ .port_vlan_filtering = b53_vlan_filtering,
+ .port_vlan_prepare = b53_vlan_prepare,
+ .port_vlan_add = b53_vlan_add,
+ .port_vlan_del = b53_vlan_del,
+ .port_vlan_dump = b53_vlan_dump,
+ .port_fdb_prepare = b53_fdb_prepare,
+ .port_fdb_dump = b53_fdb_dump,
+ .port_fdb_add = b53_fdb_add,
+ .port_fdb_del = b53_fdb_del,
+ .get_rxnfc = bcm_sf2_get_rxnfc,
+ .set_rxnfc = bcm_sf2_set_rxnfc,
+ .port_mirror_add = b53_mirror_add,
+ .port_mirror_del = b53_mirror_del,
+};
+
+struct bcm_sf2_of_data {
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
+};
+
+/* Register offsets for the SWITCH_REG_* block */
+static const u16 bcm_sf2_7445_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0C,
+ [REG_SWITCH_REVISION] = 0x18,
+ [REG_PHY_REVISION] = 0x1C,
+ [REG_SPHY_CNTRL] = 0x2C,
+ [REG_RGMII_0_CNTRL] = 0x34,
+ [REG_RGMII_1_CNTRL] = 0x40,
+ [REG_RGMII_2_CNTRL] = 0x4c,
+ [REG_LED_0_CNTRL] = 0x90,
+ [REG_LED_1_CNTRL] = 0x94,
+ [REG_LED_2_CNTRL] = 0x98,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
+ .type = BCM7445_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_7445_reg_offsets,
+};
+
+static const u16 bcm_sf2_7278_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0c,
+ [REG_SWITCH_REVISION] = 0x10,
+ [REG_PHY_REVISION] = 0x14,
+ [REG_SPHY_CNTRL] = 0x24,
+ [REG_RGMII_0_CNTRL] = 0xe0,
+ [REG_RGMII_1_CNTRL] = 0xec,
+ [REG_RGMII_2_CNTRL] = 0xf8,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
+ .type = BCM7278_DEVICE_ID,
+ .core_reg_align = 1,
+ .reg_offsets = bcm_sf2_7278_reg_offsets,
+};
+
+static const struct of_device_id bcm_sf2_of_match[] = {
+ { .compatible = "brcm,bcm7445-switch-v4.0",
+ .data = &bcm_sf2_7445_data
+ },
+ { .compatible = "brcm,bcm7278-switch-v4.0",
+ .data = &bcm_sf2_7278_data
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
+
static int bcm_sf2_sw_probe(struct platform_device *pdev)
{
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id = NULL;
+ const struct bcm_sf2_of_data *data;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
struct bcm_sf2_priv *priv;
@@ -1008,42 +1146,38 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
+ of_id = of_match_node(bcm_sf2_of_match, dn);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ data = of_id->data;
+
+ /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
+ priv->type = data->type;
+ priv->reg_offsets = data->reg_offsets;
+ priv->core_reg_align = data->core_reg_align;
+
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
* b53_common to work with
*/
- pdata->chip_id = BCM7445_DEVICE_ID;
+ pdata->chip_id = priv->type;
dev->pdata = pdata;
priv->dev = dev;
ds = dev->ds;
-
- /* Override the parts that are non-standard wrt. normal b53 devices */
- memcpy(ops, ds->ops, sizeof(*ops));
- ds->ops = ops;
- ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
- ds->ops->setup = bcm_sf2_sw_setup;
- ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
- ds->ops->adjust_link = bcm_sf2_sw_adjust_link;
- ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update;
- ds->ops->suspend = bcm_sf2_sw_suspend;
- ds->ops->resume = bcm_sf2_sw_resume;
- ds->ops->get_wol = bcm_sf2_sw_get_wol;
- ds->ops->set_wol = bcm_sf2_sw_set_wol;
- ds->ops->port_enable = bcm_sf2_port_setup;
- ds->ops->port_disable = bcm_sf2_port_disable;
- ds->ops->get_eee = bcm_sf2_sw_get_eee;
- ds->ops->set_eee = bcm_sf2_sw_set_eee;
-
- /* Avoid having DSA free our slave MDIO bus (checking for
- * ds->slave_mii_bus and ds->ops->phy_read being non-NULL)
- */
- ds->ops->phy_read = NULL;
+ ds->ops = &bcm_sf2_ops;
dev_set_drvdata(&pdev->dev, priv);
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex);
+ mutex_init(&priv->cfp.lock);
+
+ /* CFP rule #0 cannot be used for specific classifications, flag it as
+ * permanently used
+ */
+ set_bit(0, priv->cfp.used);
bcm_sf2_identify_ports(priv, dn->child);
@@ -1073,6 +1207,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
return ret;
}
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret) {
+ pr_err("failed to reset CFP\n");
+ goto out_mdio;
+ }
+
/* Disable all interrupts and request them */
bcm_sf2_intr_disable(priv);
@@ -1179,11 +1319,6 @@ static int bcm_sf2_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
bcm_sf2_suspend, bcm_sf2_resume);
-static const struct of_device_id bcm_sf2_of_match[] = {
- { .compatible = "brcm,bcm7445-switch-v4.0" },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 44692673e1d5..7d3030e04f11 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -52,6 +52,13 @@ struct bcm_sf2_port_status {
struct ethtool_eee eee;
};
+struct bcm_sf2_cfp_priv {
+ /* Mutex protecting concurrent accesses to the CFP registers */
+ struct mutex lock;
+ DECLARE_BITMAP(used, CFP_NUM_RULES);
+ unsigned int rules_cnt;
+};
+
struct bcm_sf2_priv {
/* Base registers, keep those in order with BCM_SF2_REGS_NAME */
void __iomem *core;
@@ -61,6 +68,11 @@ struct bcm_sf2_priv {
void __iomem *fcb;
void __iomem *acb;
+ /* Register offsets indirection tables */
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
+
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
@@ -95,6 +107,12 @@ struct bcm_sf2_priv {
struct device_node *master_mii_dn;
struct mii_bus *slave_mii_bus;
struct mii_bus *master_mii_bus;
+
+ /* Bitmask of ports needing BRCM tags */
+ unsigned int brcm_tag_mask;
+
+ /* CFP rules context */
+ struct bcm_sf2_cfp_priv cfp;
};
static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
@@ -104,6 +122,11 @@ static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
return dev->priv;
}
+static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off)
+{
+ return off << priv->core_reg_align;
+}
+
#define SF2_IO_MACRO(name) \
static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \
{ \
@@ -125,7 +148,7 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
{ \
u32 indir, dir; \
spin_lock(&priv->indir_lock); \
- dir = __raw_readl(priv->name + off); \
+ dir = name##_readl(priv, off); \
indir = reg_readl(priv, REG_DIR_DATA_READ); \
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
@@ -135,7 +158,7 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
{ \
spin_lock(&priv->indir_lock); \
reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
- __raw_writel(lower_32_bits(val), priv->name + off); \
+ name##_writel(priv, lower_32_bits(val), off); \
spin_unlock(&priv->indir_lock); \
}
@@ -153,8 +176,28 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
priv->irq##which##_mask |= (mask); \
} \
-SF2_IO_MACRO(core);
-SF2_IO_MACRO(reg);
+static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+ u32 tmp = bcm_sf2_mangle_addr(priv, off);
+ return __raw_readl(priv->core + tmp);
+}
+
+static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+ u32 tmp = bcm_sf2_mangle_addr(priv, off);
+ __raw_writel(val, priv->core + tmp);
+}
+
+static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off)
+{
+ return __raw_readl(priv->reg + priv->reg_offsets[off]);
+}
+
+static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off)
+{
+ __raw_writel(val, priv->reg + priv->reg_offsets[off]);
+}
+
SF2_IO64_MACRO(core);
SF2_IO_MACRO(intrl2_0);
SF2_IO_MACRO(intrl2_1);
@@ -164,4 +207,11 @@ SF2_IO_MACRO(acb);
SWITCH_INTR_L2(0);
SWITCH_INTR_L2(1);
+/* RXNFC */
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc);
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+
#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
new file mode 100644
index 000000000000..346dd9a1232d
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -0,0 +1,613 @@
+/*
+ * Broadcom Starfighter 2 DSA switch CFP support
+ *
+ * Copyright (C) 2016, Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <net/dsa.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/bitmap.h>
+
+#include "bcm_sf2.h"
+#include "bcm_sf2_regs.h"
+
+struct cfp_udf_layout {
+ u8 slices[UDF_NUM_SLICES];
+ u32 mask_value;
+
+};
+
+/* UDF slices layout for a TCPv4/UDPv4 specification */
+static const struct cfp_udf_layout udf_tcpip4_layout = {
+ .slices = {
+ /* End of L2, byte offset 12, src IP[0:15] */
+ CFG_UDF_EOL2 | 6,
+ /* End of L2, byte offset 14, src IP[16:31] */
+ CFG_UDF_EOL2 | 7,
+ /* End of L2, byte offset 16, dst IP[0:15] */
+ CFG_UDF_EOL2 | 8,
+ /* End of L2, byte offset 18, dst IP[16:31] */
+ CFG_UDF_EOL2 | 9,
+ /* End of L3, byte offset 0, src port */
+ CFG_UDF_EOL3 | 0,
+ /* End of L3, byte offset 2, dst port */
+ CFG_UDF_EOL3 | 1,
+ 0, 0, 0
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+};
+
+static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < UDF_NUM_SLICES; i++) {
+ if (layout[i] != 0)
+ count++;
+ }
+
+ return count;
+}
+
+static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
+ unsigned int slice_num,
+ const u8 *layout)
+{
+ u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
+ unsigned int i;
+
+ for (i = 0; i < UDF_NUM_SLICES; i++)
+ core_writel(priv, layout[i], offset + i * 4);
+}
+
+static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
+ reg |= OP_STR_DONE | op;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ reg = core_readl(priv, CORE_CFP_ACC);
+ if (!(reg & OP_STR_DONE))
+ break;
+
+ cpu_relax();
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
+ unsigned int addr)
+{
+ u32 reg;
+
+ WARN_ON(addr >= CFP_NUM_RULES);
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+ reg |= addr << XCESS_ADDR_SHIFT;
+ core_writel(priv, reg, CORE_CFP_ACC);
+}
+
+static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
+{
+ /* Entry #0 is reserved */
+ return CFP_NUM_RULES - 1;
+}
+
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_tcpip4_spec *v4_spec;
+ const struct cfp_udf_layout *layout;
+ unsigned int slice_num, rule_index;
+ unsigned int queue_num, port_num;
+ u8 ip_proto, ip_frag;
+ u8 num_udf;
+ u32 reg;
+ int ret;
+
+ /* Check for unsupported extensions */
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_etype || fs->m_ext.data[1]))
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+
+ /* We do not support discarding packets, check that the
+ * destination port is enabled and that we are within the
+ * number of ports supported by the switch
+ */
+ port_num = fs->ring_cookie / 8;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
+ !(BIT(port_num) & ds->enabled_port_mask) ||
+ port_num >= priv->hw_params.num_ports)
+ return -EINVAL;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ip_proto = IPPROTO_TCP;
+ v4_spec = &fs->h_u.tcp_ip4_spec;
+ break;
+ case UDP_V4_FLOW:
+ ip_proto = IPPROTO_UDP;
+ v4_spec = &fs->h_u.udp_ip4_spec;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* We only use one UDF slice for now */
+ slice_num = 1;
+ layout = &udf_tcpip4_layout;
+ num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
+
+ /* Apply the UDF layout for this filter */
+ bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
+
+ /* Apply to all packets received through this port */
+ core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
+
+ /* S-Tag status [31:30]
+ * C-Tag status [29:28]
+ * L2 framing [27:26]
+ * L3 framing [25:24]
+ * IP ToS [23:16]
+ * IP proto [15:08]
+ * IP Fragm [7]
+ * Non 1st frag [6]
+ * IP Authen [5]
+ * TTL range [4:3]
+ * PPPoE session [2]
+ * Reserved [1]
+ * UDF_Valid[8] [0]
+ */
+ core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
+ CORE_CFP_DATA_PORT(6));
+
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
+
+ /* C-Tag [31:24]
+ * UDF_n_A8 [23:8]
+ * UDF_n_A7 [7:0]
+ */
+ core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
+
+ /* UDF_n_A7 [31:24]
+ * UDF_n_A6 [23:8]
+ * UDF_n_A5 [7:0]
+ */
+ core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
+ CORE_CFP_DATA_PORT(3));
+
+ /* UDF_n_A5 [31:24]
+ * UDF_n_A4 [23:8]
+ * UDF_n_A3 [7:0]
+ */
+ reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
+ (u32)be16_to_cpu(v4_spec->psrc) << 8 |
+ (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
+
+ /* UDF_n_A3 [31:24]
+ * UDF_n_A2 [23:8]
+ * UDF_n_A1 [7:0]
+ */
+ reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
+ (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
+ (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
+
+ /* UDF_n_A1 [31:24]
+ * UDF_n_A0 [23:8]
+ * Reserved [7:4]
+ * Slice ID [3:2]
+ * Slice valid [1:0]
+ */
+ reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
+ (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
+ SLICE_NUM(slice_num) | SLICE_VALID;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+
+ /* Source port map match */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+
+ /* Mask with the specific layout for IPv4 packets */
+ core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
+
+ /* Mask all but valid UDFs */
+ core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
+
+ /* Mask all */
+ core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
+
+ /* All other UDFs should be matched with the filter */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
+ core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
+ core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
+ core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
+
+ /* Locate the first rule available */
+ if (fs->location == RX_CLS_LOC_ANY)
+ rule_index = find_first_zero_bit(priv->cfp.used,
+ bcm_sf2_cfp_rule_size(priv));
+ else
+ rule_index = fs->location;
+
+ /* Insert into TCAM now */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret) {
+ pr_err("TCAM entry at addr %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Replace ARL derived destination with DST_MAP derived, define
+ * which port and queue this should be forwarded to.
+ *
+ * We have a small oddity where Port 6 just does not have a
+ * valid bit here (so we subtract by one).
+ */
+ queue_num = fs->ring_cookie % 8;
+ if (port_num >= 7)
+ port_num -= 1;
+
+ reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
+ CHANGE_TC | queue_num << NEW_TC_SHIFT;
+
+ core_writel(priv, reg, CORE_ACT_POL_DATA0);
+
+ /* Set classification ID that needs to be put in Broadcom tag */
+ core_writel(priv, rule_index << CHAIN_ID_SHIFT,
+ CORE_ACT_POL_DATA1);
+
+ core_writel(priv, 0, CORE_ACT_POL_DATA2);
+
+ /* Configure policer RAM now */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
+ if (ret) {
+ pr_err("Policer entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Disable the policer */
+ core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
+
+ /* Now the rate meter */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
+ if (ret) {
+ pr_err("Meter entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Turn on CFP for this rule now */
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ /* Flag the rule as being used and return it */
+ set_bit(rule_index, priv->cfp.used);
+ fs->location = rule_index;
+
+ return 0;
+}
+
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
+{
+ int ret;
+ u32 reg;
+
+ /* Refuse deletion of unused rules, and the default reserved rule */
+ if (!test_bit(loc, priv->cfp.used) || loc == 0)
+ return -EINVAL;
+
+ /* Indicate which rule we want to read */
+ bcm_sf2_cfp_rule_addr_set(priv, loc);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ /* Clear its valid bits */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
+ reg &= ~SLICE_VALID;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+
+ /* Write back this entry into the TCAM now */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ clear_bit(loc, priv->cfp.used);
+
+ return 0;
+}
+
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xff;
+
+ flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
+}
+
+static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rxnfc *nfc, bool search)
+{
+ struct ethtool_tcpip4_spec *v4_spec;
+ unsigned int queue_num;
+ u16 src_dst_port;
+ u32 reg, ipv4;
+ int ret;
+
+ if (!search) {
+ bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
+ if (ret)
+ return ret;
+
+ reg = core_readl(priv, CORE_ACT_POL_DATA0);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+ if (ret)
+ return ret;
+ } else {
+ reg = core_readl(priv, CORE_ACT_POL_DATA0);
+ }
+
+ /* Extract the destination port */
+ nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
+ DST_MAP_IB_MASK) - 1;
+
+ /* There is no Port 6, so we compensate for that here */
+ if (nfc->fs.ring_cookie >= 6)
+ nfc->fs.ring_cookie++;
+ nfc->fs.ring_cookie *= 8;
+
+ /* Extract the destination queue */
+ queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
+ nfc->fs.ring_cookie += queue_num;
+
+ /* Extract the IP protocol */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
+ case IPPROTO_TCP:
+ nfc->fs.flow_type = TCP_V4_FLOW;
+ v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
+ break;
+ case IPPROTO_UDP:
+ nfc->fs.flow_type = UDP_V4_FLOW;
+ v4_spec = &nfc->fs.h_u.udp_ip4_spec;
+ break;
+ default:
+ /* Clear to exit the search process */
+ if (search)
+ core_readl(priv, CORE_CFP_DATA_PORT(7));
+ return -EINVAL;
+ }
+
+ v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
+ nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
+
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
+ /* src port [15:8] */
+ src_dst_port = reg << 8;
+
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
+ /* src port [7:0] */
+ src_dst_port |= (reg >> 24);
+
+ v4_spec->pdst = cpu_to_be16(src_dst_port);
+ nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
+ nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ /* IPv4 dst [15:8] */
+ ipv4 = (reg & 0xff) << 8;
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
+ /* IPv4 dst [31:16] */
+ ipv4 |= ((reg >> 8) & 0xffff) << 16;
+ /* IPv4 dst [7:0] */
+ ipv4 |= (reg >> 24) & 0xff;
+ v4_spec->ip4dst = cpu_to_be32(ipv4);
+ nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ /* IPv4 src [15:8] */
+ ipv4 = (reg & 0xff) << 8;
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
+
+ if (!(reg & SLICE_VALID))
+ return -EINVAL;
+
+ /* IPv4 src [7:0] */
+ ipv4 |= (reg >> 24) & 0xff;
+ /* IPv4 src [31:16] */
+ ipv4 |= ((reg >> 8) & 0xffff) << 16;
+ v4_spec->ip4src = cpu_to_be32(ipv4);
+ nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+ /* Read last to avoid next entry clobbering the results during search
+ * operations
+ */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
+ if (!(reg & 1 << port))
+ return -EINVAL;
+
+ bcm_sf2_invert_masks(&nfc->fs);
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+
+ return 0;
+}
+
+/* We implement the search doing a TCAM search operation */
+static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
+ int port, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ unsigned int index = 1, rules_cnt = 0;
+ int ret;
+ u32 reg;
+
+ /* Do not poll on OP_STR_DONE to be self-clearing for search
+ * operations, we cannot use bcm_sf2_cfp_op here because it completes
+ * on clearing OP_STR_DONE which won't clear until the entire search
+ * operation is over.
+ */
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+ reg |= index << XCESS_ADDR_SHIFT;
+ reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
+ reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ /* Wait for results to be ready */
+ reg = core_readl(priv, CORE_CFP_ACC);
+
+ /* Extract the address we are searching */
+ index = reg >> XCESS_ADDR_SHIFT;
+ index &= XCESS_ADDR_MASK;
+
+ /* We have a valid search result, so flag it accordingly */
+ if (reg & SEARCH_STS) {
+ ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
+ if (ret)
+ continue;
+
+ rule_locs[rules_cnt] = index;
+ rules_cnt++;
+ }
+
+ /* Search is over break out */
+ if (!(reg & OP_STR_DONE))
+ break;
+
+ } while (index < CFP_NUM_RULES);
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+ nfc->rule_cnt = rules_cnt;
+
+ return 0;
+}
+
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+ mutex_lock(&priv->cfp.lock);
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ /* Subtract the default, unusable rule */
+ nfc->rule_cnt = bitmap_weight(priv->cfp.used,
+ CFP_NUM_RULES) - 1;
+ /* We support specifying rule locations */
+ nfc->data |= RX_CLS_LOC_SPECIAL;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->cfp.lock);
+
+ return ret;
+}
+
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+ mutex_lock(&priv->cfp.lock);
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->cfp.lock);
+
+ return ret;
+}
+
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg |= TCAM_RESET;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ reg = core_readl(priv, CORE_CFP_ACC);
+ if (!(reg & TCAM_RESET))
+ break;
+
+ cpu_relax();
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 838fe373cd6f..26052450091e 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -12,22 +12,36 @@
#define __BCM_SF2_REGS_H
/* Register set relative to 'REG' */
-#define REG_SWITCH_CNTRL 0x00
-#define MDIO_MASTER_SEL (1 << 0)
-#define REG_SWITCH_STATUS 0x04
-#define REG_DIR_DATA_WRITE 0x08
-#define REG_DIR_DATA_READ 0x0C
+enum bcm_sf2_reg_offs {
+ REG_SWITCH_CNTRL = 0,
+ REG_SWITCH_STATUS,
+ REG_DIR_DATA_WRITE,
+ REG_DIR_DATA_READ,
+ REG_SWITCH_REVISION,
+ REG_PHY_REVISION,
+ REG_SPHY_CNTRL,
+ REG_RGMII_0_CNTRL,
+ REG_RGMII_1_CNTRL,
+ REG_RGMII_2_CNTRL,
+ REG_LED_0_CNTRL,
+ REG_LED_1_CNTRL,
+ REG_LED_2_CNTRL,
+ REG_SWITCH_REG_MAX,
+};
+
+/* Relative to REG_SWITCH_CNTRL */
+#define MDIO_MASTER_SEL (1 << 0)
-#define REG_SWITCH_REVISION 0x18
+/* Relative to REG_SWITCH_REVISION */
#define SF2_REV_MASK 0xffff
#define SWITCH_TOP_REV_SHIFT 16
#define SWITCH_TOP_REV_MASK 0xffff
-#define REG_PHY_REVISION 0x1C
+/* Relative to REG_PHY_REVISION */
#define PHY_REVISION_MASK 0xffff
-#define REG_SPHY_CNTRL 0x2C
+/* Relative to REG_SPHY_CNTRL */
#define IDDQ_BIAS (1 << 0)
#define EXT_PWR_DOWN (1 << 1)
#define FORCE_DLL_EN (1 << 2)
@@ -37,13 +51,8 @@
#define PHY_PHYAD_SHIFT 8
#define PHY_PHYAD_MASK 0x1F
-#define REG_RGMII_0_BASE 0x34
-#define REG_RGMII_CNTRL 0x00
-#define REG_RGMII_IB_STATUS 0x04
-#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08
-#define REG_RGMII_CNTRL_SIZE 0x0C
-#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \
- ((x) * REG_RGMII_CNTRL_SIZE))
+#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x))
+
/* Relative to REG_RGMII_CNTRL */
#define RGMII_MODE_EN (1 << 0)
#define ID_MODE_DIS (1 << 1)
@@ -61,8 +70,8 @@
#define LPI_COUNT_SHIFT 9
#define LPI_COUNT_MASK 0x3F
-#define REG_LED_CNTRL_BASE 0x90
-#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4)
+#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x))
+
#define SPDLNK_SRC_SEL (1 << 24)
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
@@ -125,6 +134,9 @@
#define GMII_SPEED_UP_2G (1 << 6)
#define MII_SW_OR (1 << 7)
+/* Alternate layout for e.g: 7278 */
+#define CORE_STS_OVERRIDE_IMP2 0x39040
+
#define CORE_NEW_CTRL 0x00084
#define IP_MC (1 << 0)
#define OUTRANGEERR_DISCARD (1 << 1)
@@ -142,6 +154,7 @@
#define SW_LEARN_CNTL(x) (1 << (x))
#define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4)
+#define CORE_STS_OVERRIDE_GMIIP2_PORT(x) (0x39000 + (x) * 8)
#define LINK_STS (1 << 0)
#define DUPLX_MODE (1 << 1)
#define SPEED_SHIFT 2
@@ -225,6 +238,10 @@
#define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \
((x) * P_TXQ_PSM_VDD_SHIFT))
+#define CORE_PORT_TC2_QOS_MAP_PORT(x) (0xc1c0 + ((x) * 0x10))
+#define PRT_TO_QID_MASK 0x3
+#define PRT_TO_QID_SHIFT 3
+
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff
@@ -238,4 +255,150 @@
#define CORE_EEE_EN_CTRL 0x24800
#define CORE_EEE_LPI_INDICATE 0x24810
+#define CORE_CFP_ACC 0x28000
+#define OP_STR_DONE (1 << 0)
+#define OP_SEL_SHIFT 1
+#define OP_SEL_READ (1 << OP_SEL_SHIFT)
+#define OP_SEL_WRITE (2 << OP_SEL_SHIFT)
+#define OP_SEL_SEARCH (4 << OP_SEL_SHIFT)
+#define OP_SEL_MASK (7 << OP_SEL_SHIFT)
+#define CFP_RAM_CLEAR (1 << 4)
+#define RAM_SEL_SHIFT 10
+#define TCAM_SEL (1 << RAM_SEL_SHIFT)
+#define ACT_POL_RAM (2 << RAM_SEL_SHIFT)
+#define RATE_METER_RAM (4 << RAM_SEL_SHIFT)
+#define GREEN_STAT_RAM (8 << RAM_SEL_SHIFT)
+#define YELLOW_STAT_RAM (16 << RAM_SEL_SHIFT)
+#define RED_STAT_RAM (24 << RAM_SEL_SHIFT)
+#define RAM_SEL_MASK (0x1f << RAM_SEL_SHIFT)
+#define TCAM_RESET (1 << 15)
+#define XCESS_ADDR_SHIFT 16
+#define XCESS_ADDR_MASK 0xff
+#define SEARCH_STS (1 << 27)
+#define RD_STS_SHIFT 28
+#define RD_STS_TCAM (1 << RD_STS_SHIFT)
+#define RD_STS_ACT_POL_RAM (2 << RD_STS_SHIFT)
+#define RD_STS_RATE_METER_RAM (4 << RD_STS_SHIFT)
+#define RD_STS_STAT_RAM (8 << RD_STS_SHIFT)
+
+#define CORE_CFP_RATE_METER_GLOBAL_CTL 0x28010
+
+#define CORE_CFP_DATA_PORT_0 0x28040
+#define CORE_CFP_DATA_PORT(x) (CORE_CFP_DATA_PORT_0 + \
+ (x) * 0x10)
+
+/* UDF_DATA7 */
+#define L3_FRAMING_SHIFT 24
+#define L3_FRAMING_MASK (0x3 << L3_FRAMING_SHIFT)
+#define IPPROTO_SHIFT 8
+#define IPPROTO_MASK (0xff << IPPROTO_SHIFT)
+#define IP_FRAG (1 << 7)
+
+/* UDF_DATA0 */
+#define SLICE_VALID 3
+#define SLICE_NUM_SHIFT 2
+#define SLICE_NUM(x) ((x) << SLICE_NUM_SHIFT)
+
+#define CORE_CFP_MASK_PORT_0 0x280c0
+
+#define CORE_CFP_MASK_PORT(x) (CORE_CFP_MASK_PORT_0 + \
+ (x) * 0x10)
+
+#define CORE_ACT_POL_DATA0 0x28140
+#define VLAN_BYP (1 << 0)
+#define EAP_BYP (1 << 1)
+#define STP_BYP (1 << 2)
+#define REASON_CODE_SHIFT 3
+#define REASON_CODE_MASK 0x3f
+#define LOOP_BK_EN (1 << 9)
+#define NEW_TC_SHIFT 10
+#define NEW_TC_MASK 0x7
+#define CHANGE_TC (1 << 13)
+#define DST_MAP_IB_SHIFT 14
+#define DST_MAP_IB_MASK 0x1ff
+#define CHANGE_FWRD_MAP_IB_SHIFT 24
+#define CHANGE_FWRD_MAP_IB_MASK 0x3
+#define CHANGE_FWRD_MAP_IB_NO_DEST (0 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_REM_ARL (1 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_REP_ARL (2 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_ADD_DST (3 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define NEW_DSCP_IB_SHIFT 26
+#define NEW_DSCP_IB_MASK 0x3f
+
+#define CORE_ACT_POL_DATA1 0x28150
+#define CHANGE_DSCP_IB (1 << 0)
+#define DST_MAP_OB_SHIFT 1
+#define DST_MAP_OB_MASK 0x3ff
+#define CHANGE_FWRD_MAP_OB_SHIT 11
+#define CHANGE_FWRD_MAP_OB_MASK 0x3
+#define NEW_DSCP_OB_SHIFT 13
+#define NEW_DSCP_OB_MASK 0x3f
+#define CHANGE_DSCP_OB (1 << 19)
+#define CHAIN_ID_SHIFT 20
+#define CHAIN_ID_MASK 0xff
+#define CHANGE_COLOR (1 << 28)
+#define NEW_COLOR_SHIFT 29
+#define NEW_COLOR_MASK 0x3
+#define NEW_COLOR_GREEN (0 << NEW_COLOR_SHIFT)
+#define NEW_COLOR_YELLOW (1 << NEW_COLOR_SHIFT)
+#define NEW_COLOR_RED (2 << NEW_COLOR_SHIFT)
+#define RED_DEFAULT (1 << 31)
+
+#define CORE_ACT_POL_DATA2 0x28160
+#define MAC_LIMIT_BYPASS (1 << 0)
+#define CHANGE_TC_O (1 << 1)
+#define NEW_TC_O_SHIFT 2
+#define NEW_TC_O_MASK 0x7
+#define SPCP_RMK_DISABLE (1 << 5)
+#define CPCP_RMK_DISABLE (1 << 6)
+#define DEI_RMK_DISABLE (1 << 7)
+
+#define CORE_RATE_METER0 0x28180
+#define COLOR_MODE (1 << 0)
+#define POLICER_ACTION (1 << 1)
+#define COUPLING_FLAG (1 << 2)
+#define POLICER_MODE_SHIFT 3
+#define POLICER_MODE_MASK 0x3
+#define POLICER_MODE_RFC2698 (0 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_RFC4115 (1 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_MEF (2 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_DISABLE (3 << POLICER_MODE_SHIFT)
+
+#define CORE_RATE_METER1 0x28190
+#define EIR_TK_BKT_MASK 0x7fffff
+
+#define CORE_RATE_METER2 0x281a0
+#define EIR_BKT_SIZE_MASK 0xfffff
+
+#define CORE_RATE_METER3 0x281b0
+#define EIR_REF_CNT_MASK 0x7ffff
+
+#define CORE_RATE_METER4 0x281c0
+#define CIR_TK_BKT_MASK 0x7fffff
+
+#define CORE_RATE_METER5 0x281d0
+#define CIR_BKT_SIZE_MASK 0xfffff
+
+#define CORE_RATE_METER6 0x281e0
+#define CIR_REF_CNT_MASK 0x7ffff
+
+#define CORE_CFP_CTL_REG 0x28400
+#define CFP_EN_MAP_MASK 0x1ff
+
+/* IPv4 slices, 3 of them */
+#define CORE_UDF_0_A_0_8_PORT_0 0x28440
+#define CFG_UDF_OFFSET_MASK 0x1f
+#define CFG_UDF_OFFSET_BASE_SHIFT 5
+#define CFG_UDF_SOF (0 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define CFG_UDF_EOL2 (2 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define CFG_UDF_EOL3 (3 << CFG_UDF_OFFSET_BASE_SHIFT)
+
+/* Number of slices for IPv4, IPv6 and non-IP */
+#define UDF_NUM_SLICES 9
+
+/* Spacing between different slices */
+#define UDF_SLICE_OFFSET 0x40
+
+#define CFP_NUM_RULES 256
+
#endif /* __BCM_SF2_REGS_H */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 7ce36dbd9b62..5934b7a4c448 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -252,7 +252,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
return reg_write(ds, addr, regnum, val);
}
-static struct dsa_switch_ops mv88e6060_switch_ops = {
+static const struct dsa_switch_ops mv88e6060_switch_ops = {
.get_tag_protocol = mv88e6060_get_tag_protocol,
.probe = mv88e6060_drv_probe,
.setup = mv88e6060_setup,
@@ -261,16 +261,20 @@ static struct dsa_switch_ops mv88e6060_switch_ops = {
.phy_write = mv88e6060_phy_write,
};
+static struct dsa_switch_driver mv88e6060_switch_drv = {
+ .ops = &mv88e6060_switch_ops,
+};
+
static int __init mv88e6060_init(void)
{
- register_switch_driver(&mv88e6060_switch_ops);
+ register_switch_driver(&mv88e6060_switch_drv);
return 0;
}
module_init(mv88e6060_init);
static void __exit mv88e6060_cleanup(void)
{
- unregister_switch_driver(&mv88e6060_switch_ops);
+ unregister_switch_driver(&mv88e6060_switch_drv);
}
module_exit(mv88e6060_cleanup);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f7222dc6581d..03dc886ed3d6 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -222,26 +222,62 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
+static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+
+ mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+ list);
+ if (!mdio_bus)
+ return NULL;
+
+ return mdio_bus->bus;
+}
+
static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
- return chip->info->ops->phy_read(chip, addr, reg, val);
+ return chip->info->ops->phy_read(chip, bus, addr, reg, val);
}
static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
- return chip->info->ops->phy_write(chip, addr, reg, val);
+ return chip->info->ops->phy_write(chip, bus, addr, reg, val);
}
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
@@ -611,8 +647,9 @@ static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
del_timer_sync(&chip->ppu_timer);
}
-static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 *val)
+static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
{
int err;
@@ -625,8 +662,9 @@ static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr,
return err;
}
-static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 val)
+static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val)
{
int err;
@@ -639,11 +677,6 @@ static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
return err;
}
-static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6095;
-}
-
static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip)
{
return chip->info->family == MV88E6XXX_FAMILY_6097;
@@ -654,14 +687,14 @@ static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
return chip->info->family == MV88E6XXX_FAMILY_6165;
}
-static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip)
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
{
- return chip->info->family == MV88E6XXX_FAMILY_6185;
+ return chip->info->family == MV88E6XXX_FAMILY_6320;
}
-static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
+static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip)
{
- return chip->info->family == MV88E6XXX_FAMILY_6320;
+ return chip->info->family == MV88E6XXX_FAMILY_6341;
}
static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip)
@@ -706,6 +739,12 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
+ if (chip->info->ops->port_set_cmode) {
+ err = chip->info->ops->port_set_cmode(chip, port, mode);
+ if (err && err != -EOPNOTSUPP)
+ goto restore_link;
+ }
+
err = 0;
restore_link:
if (chip->info->ops->port_set_link(chip, port, link))
@@ -1209,8 +1248,8 @@ static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
- struct net_device *bridge = chip->ports[port].bridge_dev;
struct dsa_switch *ds = chip->ds;
+ struct net_device *bridge = ds->ports[port].bridge_dev;
u16 output_ports = 0;
int i;
@@ -1220,7 +1259,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
} else {
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
/* allow sending frames to every group member */
- if (bridge && chip->ports[i].bridge_dev == bridge)
+ if (bridge && ds->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
/* allow sending frames to CPU port and DSA link(s) */
@@ -1688,7 +1727,8 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
+ mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6341_family(chip)) {
struct mv88e6xxx_vtu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1782,17 +1822,17 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
- if (chip->ports[i].bridge_dev ==
- chip->ports[port].bridge_dev)
+ if (ds->ports[i].bridge_dev ==
+ ds->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
- if (!chip->ports[i].bridge_dev)
+ if (!ds->ports[i].bridge_dev)
continue;
netdev_warn(ds->ports[port].netdev,
"hardware VLAN %d already used by %s\n",
vlan.vid,
- netdev_name(chip->ports[i].bridge_dev));
+ netdev_name(ds->ports[i].bridge_dev));
err = -EOPNOTSUPP;
goto unlock;
}
@@ -2023,7 +2063,8 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
struct mv88e6xxx_atu_entry next;
int err;
- eth_broadcast_addr(next.mac);
+ memcpy(next.mac, addr, ETH_ALEN);
+ eth_addr_dec(next.mac);
err = _mv88e6xxx_atu_mac_write(chip, next.mac);
if (err)
@@ -2041,7 +2082,7 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
*entry = next;
return 0;
}
- } while (!is_broadcast_ether_addr(next.mac));
+ } while (ether_addr_greater(addr, next.mac));
memset(entry, 0, sizeof(*entry));
entry->fid = fid;
@@ -2281,18 +2322,16 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
int i, err = 0;
mutex_lock(&chip->reg_lock);
- /* Assign the bridge and remap each port's VLANTable */
- chip->ports[port].bridge_dev = bridge;
-
+ /* Remap each port's VLANTable */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (chip->ports[i].bridge_dev == bridge) {
+ if (ds->ports[i].bridge_dev == br) {
err = _mv88e6xxx_port_based_vlan_map(chip, i);
if (err)
break;
@@ -2304,19 +2343,17 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct net_device *bridge = chip->ports[port].bridge_dev;
int i;
mutex_lock(&chip->reg_lock);
- /* Unassign the bridge and remap each port's VLANTable */
- chip->ports[port].bridge_dev = NULL;
-
+ /* Remap each port's VLANTable */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- if (i == port || chip->ports[i].bridge_dev == bridge)
+ if (i == port || ds->ports[i].bridge_dev == br)
if (_mv88e6xxx_port_based_vlan_map(chip, i))
netdev_warn(ds->ports[i].netdev,
"failed to remap\n");
@@ -2538,31 +2575,23 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* received packets as usual, disable ARP mirroring and don't send a
* copy of all transmitted/received frames on this port to the CPU.
*/
- reg = 0;
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) ||
- mv88e6xxx_6185_family(chip))
- reg = PORT_CONTROL_2_MAP_DA;
-
- if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) {
- /* Set the upstream port this port should use */
- reg |= dsa_upstream_port(ds);
- /* enable forwarding of unknown multicast addresses to
- * the upstream port
- */
- if (port == dsa_upstream_port(ds))
- reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
- }
-
- reg |= PORT_CONTROL_2_8021Q_DISABLED;
+ err = mv88e6xxx_port_set_map_da(chip, port);
+ if (err)
+ return err;
- if (reg) {
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ reg = 0;
+ if (chip->info->ops->port_set_upstream_port) {
+ err = chip->info->ops->port_set_upstream_port(
+ chip, port, dsa_upstream_port(ds));
if (err)
return err;
}
+ err = mv88e6xxx_port_set_8021q_mode(chip, port,
+ PORT_CONTROL_2_8021Q_DISABLED);
+ if (err)
+ return err;
+
if (chip->info->ops->port_jumbo_config) {
err = chip->info->ops->port_jumbo_config(chip, port);
if (err)
@@ -2596,7 +2625,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip)) {
+ mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) {
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
@@ -2820,7 +2849,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
int i;
chip->ds = ds;
- ds->slave_mii_bus = chip->mdio_bus;
+ ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
mutex_lock(&chip->reg_lock);
@@ -2877,50 +2906,64 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
- struct mv88e6xxx_chip *chip = bus->priv;
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
u16 val;
int err;
- if (phy >= mv88e6xxx_num_ports(chip))
- return 0xffff;
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_phy_read(chip, phy, reg, &val);
+ err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mutex_unlock(&chip->reg_lock);
+ if (reg == MII_PHYSID2) {
+ /* Some internal PHYS don't have a model number. Use
+ * the mv88e6390 family model number instead.
+ */
+ if (!(val & 0x3f0))
+ val |= PORT_SWITCH_ID_PROD_NUM_6390;
+ }
+
return err ? err : val;
}
static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
- struct mv88e6xxx_chip *chip = bus->priv;
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
int err;
- if (phy >= mv88e6xxx_num_ports(chip))
- return 0xffff;
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_phy_write(chip, phy, reg, val);
+ err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
mutex_unlock(&chip->reg_lock);
return err;
}
static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
- struct device_node *np)
+ struct device_node *np,
+ bool external)
{
static int index;
+ struct mv88e6xxx_mdio_bus *mdio_bus;
struct mii_bus *bus;
int err;
- if (np)
- chip->mdio_np = of_get_child_by_name(np, "mdio");
-
- bus = devm_mdiobus_alloc(chip->dev);
+ bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
- bus->priv = (void *)chip;
+ mdio_bus = bus->priv;
+ mdio_bus->bus = bus;
+ mdio_bus->chip = chip;
+ INIT_LIST_HEAD(&mdio_bus->list);
+ mdio_bus->external = external;
+
if (np) {
bus->name = np->full_name;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name);
@@ -2933,183 +2976,73 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
bus->write = mv88e6xxx_mdio_write;
bus->parent = chip->dev;
- if (chip->mdio_np)
- err = of_mdiobus_register(bus, chip->mdio_np);
+ if (np)
+ err = of_mdiobus_register(bus, np);
else
err = mdiobus_register(bus);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
- goto out;
+ return err;
}
- chip->mdio_bus = bus;
-
- return 0;
-
-out:
- if (chip->mdio_np)
- of_node_put(chip->mdio_np);
-
- return err;
-}
-
-static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip)
-
-{
- struct mii_bus *bus = chip->mdio_bus;
-
- mdiobus_unregister(bus);
-
- if (chip->mdio_np)
- of_node_put(chip->mdio_np);
-}
-
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- u16 val;
- int ret;
-
- *temp = 0;
-
- mutex_lock(&chip->reg_lock);
-
- ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6);
- if (ret < 0)
- goto error;
-
- /* Enable temperature sensor */
- ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5));
- if (ret < 0)
- goto error;
-
- /* Wait for temperature to stabilize */
- usleep_range(10000, 12000);
- ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
- if (ret < 0)
- goto error;
-
- /* Disable temperature sensor */
- ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5));
- if (ret < 0)
- goto error;
-
- *temp = ((val & 0x1f) - 5) * 5;
-
-error:
- mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0);
- mutex_unlock(&chip->reg_lock);
- return ret;
-}
-
-static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int ret;
-
- *temp = 0;
-
- mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val);
- mutex_unlock(&chip->reg_lock);
- if (ret < 0)
- return ret;
-
- *temp = (val & 0xff) - 25;
+ if (external)
+ list_add_tail(&mdio_bus->list, &chip->mdios);
+ else
+ list_add(&mdio_bus->list, &chip->mdios);
return 0;
}
-static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP))
- return -EOPNOTSUPP;
-
- if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
- return mv88e63xx_get_temp(ds, temp);
-
- return mv88e61xx_get_temp(ds, temp);
-}
+static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
+ { .compatible = "marvell,mv88e6xxx-mdio-external",
+ .data = (void *)true },
+ { },
+};
-static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
+ struct device_node *np)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int ret;
-
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
- return -EOPNOTSUPP;
-
- *temp = 0;
+ const struct of_device_id *match;
+ struct device_node *child;
+ int err;
- mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
- mutex_unlock(&chip->reg_lock);
- if (ret < 0)
- return ret;
+ /* Always register one mdio bus for the internal/default mdio
+ * bus. This maybe represented in the device tree, but is
+ * optional.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ err = mv88e6xxx_mdio_register(chip, child, false);
+ if (err)
+ return err;
- *temp = (((val >> 8) & 0x1f) * 5) - 25;
+ /* Walk the device tree, and see if there are any other nodes
+ * which say they are compatible with the external mdio
+ * bus.
+ */
+ for_each_available_child_of_node(np, child) {
+ match = of_match_node(mv88e6xxx_mdio_external_match, child);
+ if (match) {
+ err = mv88e6xxx_mdio_register(chip, child, true);
+ if (err)
+ return err;
+ }
+ }
return 0;
}
-static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int err;
-
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
- return -EOPNOTSUPP;
-
- mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
- if (err)
- goto unlock;
- temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- err = mv88e6xxx_phy_page_write(chip, phy, 6, 26,
- (val & 0xe0ff) | (temp << 8));
-unlock:
- mutex_unlock(&chip->reg_lock);
-
- return err;
-}
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
-static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int ret;
-
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
- return -EOPNOTSUPP;
-
- *alarm = false;
-
- mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
- mutex_unlock(&chip->reg_lock);
- if (ret < 0)
- return ret;
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mii_bus *bus;
- *alarm = !!(val & 0x40);
+ list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ bus = mdio_bus->bus;
- return 0;
+ mdiobus_unregister(bus);
+ }
}
-#endif /* CONFIG_NET_DSA_HWMON */
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
{
@@ -3178,6 +3111,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
@@ -3193,7 +3127,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+ .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3225,6 +3160,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3232,8 +3168,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
static const struct mv88e6xxx_ops mv88e6123_ops = {
/* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_read,
- .phy_write = mv88e6xxx_write,
+ .phy_read = mv88e6165_phy_read,
+ .phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -3245,6 +3181,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3259,8 +3196,9 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
.port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
@@ -3270,6 +3208,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
@@ -3279,8 +3218,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
static const struct mv88e6xxx_ops mv88e6161_ops = {
/* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_read,
- .phy_write = mv88e6xxx_write,
+ .phy_read = mv88e6165_phy_read,
+ .phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -3297,6 +3236,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3304,8 +3244,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
static const struct mv88e6xxx_ops mv88e6165_ops = {
/* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_read,
- .phy_write = mv88e6xxx_write,
+ .phy_read = mv88e6165_phy_read,
+ .phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -3315,6 +3255,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3341,6 +3282,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3369,6 +3311,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3395,6 +3338,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3423,6 +3367,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3436,14 +3381,16 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+ .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
@@ -3452,6 +3399,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3471,12 +3420,15 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.stats_get_stats = mv88e6390_stats_get_stats,
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3496,12 +3448,15 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.stats_get_stats = mv88e6390_stats_get_stats,
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3521,6 +3476,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.stats_get_stats = mv88e6390_stats_get_stats,
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3549,12 +3505,15 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3567,6 +3526,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3574,6 +3534,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.stats_get_stats = mv88e6390_stats_get_stats,
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3653,6 +3614,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3679,6 +3641,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3707,12 +3670,73 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.stats_get_stats = mv88e6095_stats_get_stats,
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
+static const struct mv88e6xxx_ops mv88e6141_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6341_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3727,6 +3751,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3734,12 +3759,15 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.stats_get_stats = mv88e6390_stats_get_stats,
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3761,12 +3789,15 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.stats_get_stats = mv88e6390_stats_get_stats,
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
static const struct mv88e6xxx_ops mv88e6391_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3786,6 +3817,7 @@ static const struct mv88e6xxx_ops mv88e6391_ops = {
.stats_get_stats = mv88e6390_stats_get_stats,
.g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
.g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
};
@@ -3996,7 +4028,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.port_base_addr = 0x0,
.global1_addr = 0x1b,
.tag_protocol = DSA_TAG_PROTO_DSA,
- .age_time_coeff = 15000,
+ .age_time_coeff = 3750,
.g1_irqs = 9,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6190_ops,
@@ -4010,7 +4042,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.port_base_addr = 0x0,
.global1_addr = 0x1b,
- .age_time_coeff = 15000,
+ .age_time_coeff = 3750,
.g1_irqs = 9,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4025,7 +4057,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.port_base_addr = 0x0,
.global1_addr = 0x1b,
- .age_time_coeff = 15000,
+ .age_time_coeff = 3750,
.g1_irqs = 9,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4055,7 +4087,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.port_base_addr = 0x0,
.global1_addr = 0x1b,
- .age_time_coeff = 15000,
+ .age_time_coeff = 3750,
.g1_irqs = 9,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4092,6 +4124,34 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.ops = &mv88e6321_ops,
},
+ [MV88E6141] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6341",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .port_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 3750,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+ .ops = &mv88e6141_ops,
+ },
+
+ [MV88E6341] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6341,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6341",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .port_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 3750,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+ .ops = &mv88e6341_ops,
+ },
+
[MV88E6350] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
.family = MV88E6XXX_FAMILY_6351,
@@ -4144,7 +4204,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.port_base_addr = 0x0,
.global1_addr = 0x1b,
- .age_time_coeff = 15000,
+ .age_time_coeff = 3750,
.g1_irqs = 9,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4158,7 +4218,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.port_base_addr = 0x0,
.global1_addr = 0x1b,
- .age_time_coeff = 15000,
+ .age_time_coeff = 3750,
.g1_irqs = 9,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4221,6 +4281,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
chip->dev = dev;
mutex_init(&chip->reg_lock);
+ INIT_LIST_HEAD(&chip->mdios);
return chip;
}
@@ -4240,10 +4301,6 @@ static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int sw_addr)
{
- /* ADDR[0] pin is unavailable externally and considered zero */
- if (sw_addr & 0x1)
- return -EINVAL;
-
if (sw_addr == 0)
chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP))
@@ -4299,7 +4356,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
mv88e6xxx_phy_init(chip);
- err = mv88e6xxx_mdio_register(chip, NULL);
+ err = mv88e6xxx_mdios_register(chip, NULL);
if (err)
goto free;
@@ -4364,7 +4421,7 @@ static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port,
return err;
}
-static struct dsa_switch_ops mv88e6xxx_switch_ops = {
+static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.probe = mv88e6xxx_drv_probe,
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
@@ -4375,12 +4432,6 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_sset_count = mv88e6xxx_get_sset_count,
.set_eee = mv88e6xxx_set_eee,
.get_eee = mv88e6xxx_get_eee,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
- .get_temp_limit = mv88e6xxx_get_temp_limit,
- .set_temp_limit = mv88e6xxx_set_temp_limit,
- .get_temp_alarm = mv88e6xxx_get_temp_alarm,
-#endif
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
.set_eeprom = mv88e6xxx_set_eeprom,
@@ -4406,23 +4457,25 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_mdb_dump = mv88e6xxx_port_mdb_dump,
};
-static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
- struct device_node *np)
+static struct dsa_switch_driver mv88e6xxx_switch_drv = {
+ .ops = &mv88e6xxx_switch_ops,
+};
+
+static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
{
struct device *dev = chip->dev;
struct dsa_switch *ds;
- ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ ds = dsa_switch_alloc(dev, DSA_MAX_PORTS);
if (!ds)
return -ENOMEM;
- ds->dev = dev;
ds->priv = chip;
ds->ops = &mv88e6xxx_switch_ops;
dev_set_drvdata(dev, ds);
- return dsa_register_switch(ds, np);
+ return dsa_register_switch(ds, dev);
}
static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
@@ -4502,18 +4555,18 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
}
}
- err = mv88e6xxx_mdio_register(chip, np);
+ err = mv88e6xxx_mdios_register(chip, np);
if (err)
goto out_g2_irq;
- err = mv88e6xxx_register_switch(chip, np);
+ err = mv88e6xxx_register_switch(chip);
if (err)
goto out_mdio;
return 0;
out_mdio:
- mv88e6xxx_mdio_unregister(chip);
+ mv88e6xxx_mdios_unregister(chip);
out_g2_irq:
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0)
mv88e6xxx_g2_irq_free(chip);
@@ -4534,7 +4587,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
- mv88e6xxx_mdio_unregister(chip);
+ mv88e6xxx_mdios_unregister(chip);
if (chip->irq > 0) {
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT))
@@ -4568,7 +4621,7 @@ static struct mdio_driver mv88e6xxx_driver = {
static int __init mv88e6xxx_init(void)
{
- register_switch_driver(&mv88e6xxx_switch_ops);
+ register_switch_driver(&mv88e6xxx_switch_drv);
return mdio_driver_register(&mv88e6xxx_driver);
}
module_init(mv88e6xxx_init);
@@ -4576,7 +4629,7 @@ module_init(mv88e6xxx_init);
static void __exit mv88e6xxx_cleanup(void)
{
mdio_driver_unregister(&mv88e6xxx_driver);
- unregister_switch_driver(&mv88e6xxx_switch_ops);
+ unregister_switch_driver(&mv88e6xxx_switch_drv);
}
module_exit(mv88e6xxx_cleanup);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 3e77071949ab..8f15bc7b1f5f 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -218,7 +218,8 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
}
/* Offset 0x14: EEPROM Command
- * Offset 0x15: EEPROM Data
+ * Offset 0x15: EEPROM Data (for 16-bit data access)
+ * Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
@@ -239,6 +240,50 @@ static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
return mv88e6xxx_g2_eeprom_wait(chip);
}
+static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
+ u16 addr, u8 *data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd);
+ if (err)
+ return err;
+
+ *data = cmd & 0xff;
+
+ return 0;
+}
+
+static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
+ u16 addr, u8 data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data);
+}
+
static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
u8 addr, u16 *data)
{
@@ -273,6 +318,52 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
}
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ int err;
+
+ eeprom->len = 0;
+
+ while (len) {
+ err = mv88e6xxx_g2_eeprom_read8(chip, offset, data);
+ if (err)
+ return err;
+
+ eeprom->len++;
+ offset++;
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ int err;
+
+ eeprom->len = 0;
+
+ while (len) {
+ err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data);
+ if (err)
+ return err;
+
+ eeprom->len++;
+ offset++;
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data)
{
@@ -410,12 +501,67 @@ static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
return mv88e6xxx_g2_smi_phy_wait(chip);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 *val)
+static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip,
+ int addr, int device, int reg,
+ bool external)
+{
+ int cmd = SMI_CMD_OP_45_WRITE_ADDR | (addr << 5) | device;
+ int err;
+
+ if (external)
+ cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+}
+
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr,
+ int reg_c45, u16 *val, bool external)
+{
+ int device = (reg_c45 >> 16) & 0x1f;
+ int reg = reg_c45 & 0xffff;
+ int err;
+ u16 cmd;
+
+ err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
+ external);
+ if (err)
+ return err;
+
+ cmd = GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA | (addr << 5) | device;
+
+ if (external)
+ cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
+ err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
+ if (err)
+ return err;
+
+ err = *val;
+
+ return 0;
+}
+
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, int addr,
+ int reg, u16 *val, bool external)
{
u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
int err;
+ if (external)
+ cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
@@ -427,12 +573,57 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
}
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 val)
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ if (reg & MII_ADDR_C45)
+ return mv88e6xxx_g2_smi_phy_read_c45(chip, addr, reg, val,
+ external);
+ return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external);
+}
+
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr,
+ int reg_c45, u16 val, bool external)
+{
+ int device = (reg_c45 >> 16) & 0x1f;
+ int reg = reg_c45 & 0xffff;
+ int err;
+ u16 cmd;
+
+ err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
+ external);
+ if (err)
+ return err;
+
+ cmd = GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA | (addr << 5) | device;
+
+ if (external)
+ cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, int addr,
+ int reg, u16 val, bool external)
{
u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
int err;
+ if (external)
+ cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
@@ -444,6 +635,153 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
}
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ if (reg & MII_ADDR_C45)
+ return mv88e6xxx_g2_smi_phy_write_c45(chip, addr, reg, val,
+ external);
+
+ return mv88e6xxx_g2_smi_phy_write_c22(chip, addr, reg, val, external);
+}
+
+static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+ dev_info(chip->dev, "Watchdog event: 0x%04x", reg);
+
+ return IRQ_HANDLED;
+}
+
+static void mv88e6097_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+ reg &= ~(GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
+ GLOBAL2_WDOG_CONTROL_QC_ENABLE);
+
+ mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, reg);
+}
+
+static int mv88e6097_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL,
+ GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
+ GLOBAL2_WDOG_CONTROL_QC_ENABLE |
+ GLOBAL2_WDOG_CONTROL_SWRESET);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
+ .irq_action = mv88e6097_watchdog_action,
+ .irq_setup = mv88e6097_watchdog_setup,
+ .irq_free = mv88e6097_watchdog_free,
+};
+
+static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
+ GLOBAL2_WDOG_INT_ENABLE |
+ GLOBAL2_WDOG_CUT_THROUGH |
+ GLOBAL2_WDOG_QUEUE_CONTROLLER |
+ GLOBAL2_WDOG_EGRESS |
+ GLOBAL2_WDOG_FORCE_IRQ);
+}
+
+static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ int err;
+ u16 reg;
+
+ mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_EVENT);
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+ dev_info(chip->dev, "Watchdog event: 0x%04x",
+ reg & GLOBAL2_WDOG_DATA_MASK);
+
+ mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_HISTORY);
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+ dev_info(chip->dev, "Watchdog history: 0x%04x",
+ reg & GLOBAL2_WDOG_DATA_MASK);
+
+ /* Trigger a software reset to try to recover the switch */
+ if (chip->info->ops->reset)
+ chip->info->ops->reset(chip);
+
+ mv88e6390_watchdog_setup(chip);
+
+ return IRQ_HANDLED;
+}
+
+static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
+ GLOBAL2_WDOG_INT_ENABLE);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
+ .irq_action = mv88e6390_watchdog_action,
+ .irq_setup = mv88e6390_watchdog_setup,
+ .irq_free = mv88e6390_watchdog_free,
+};
+
+static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+
+ mutex_lock(&chip->reg_lock);
+ if (chip->info->ops->watchdog_ops->irq_action)
+ ret = chip->info->ops->watchdog_ops->irq_action(chip, irq);
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+
+static void mv88e6xxx_g2_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ mutex_lock(&chip->reg_lock);
+ if (chip->info->ops->watchdog_ops->irq_free)
+ chip->info->ops->watchdog_ops->irq_free(chip);
+ mutex_unlock(&chip->reg_lock);
+
+ free_irq(chip->watchdog_irq, chip);
+ irq_dispose_mapping(chip->watchdog_irq);
+}
+
+static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->watchdog_irq = irq_find_mapping(chip->g2_irq.domain,
+ GLOBAL2_INT_SOURCE_WATCHDOG);
+ if (chip->watchdog_irq < 0)
+ return chip->watchdog_irq;
+
+ err = request_threaded_irq(chip->watchdog_irq, NULL,
+ mv88e6xxx_g2_watchdog_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ "mv88e6xxx-watchdog", chip);
+ if (err)
+ return err;
+
+ mutex_lock(&chip->reg_lock);
+ if (chip->info->ops->watchdog_ops->irq_setup)
+ err = chip->info->ops->watchdog_ops->irq_setup(chip);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
static void mv88e6xxx_g2_irq_mask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
@@ -532,6 +870,8 @@ void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
{
int irq, virq;
+ mv88e6xxx_g2_watchdog_free(chip);
+
free_irq(chip->device_irq, chip);
irq_dispose_mapping(chip->device_irq);
@@ -574,7 +914,7 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
goto out;
- return 0;
+ return mv88e6xxx_g2_watchdog_setup(chip);
out:
for (irq = 0; irq < 16; irq++) {
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 9aefb7d8b0ad..a8b2f9486a4a 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -23,20 +23,32 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
return 0;
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 *val);
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 val);
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
+
int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+
#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
@@ -50,12 +62,14 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
}
static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
int addr, int reg, u16 *val)
{
return -EOPNOTSUPP;
}
static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
int addr, int reg, u16 val)
{
return -EOPNOTSUPP;
@@ -67,6 +81,20 @@ static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom,
u8 *data)
@@ -100,6 +128,9 @@ static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
return -EOPNOTSUPP;
}
+static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
+static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index af54baea47cf..6033f2f6260a 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
+#include <linux/phy.h>
#ifndef UINT64_MAX
#define UINT64_MAX (u64)(~((u64)0))
@@ -58,6 +59,9 @@
#define PORT_STATUS_CMODE_100BASE_X 0x8
#define PORT_STATUS_CMODE_1000BASE_X 0x9
#define PORT_STATUS_CMODE_SGMII 0xa
+#define PORT_STATUS_CMODE_2500BASEX 0xb
+#define PORT_STATUS_CMODE_XAUI 0xc
+#define PORT_STATUS_CMODE_RXAUI 0xd
#define PORT_PCS_CTRL 0x01
#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
@@ -87,6 +91,7 @@
#define PORT_SWITCH_ID_PROD_NUM_6131 0x106
#define PORT_SWITCH_ID_PROD_NUM_6320 0x115
#define PORT_SWITCH_ID_PROD_NUM_6123 0x121
+#define PORT_SWITCH_ID_PROD_NUM_6141 0x340
#define PORT_SWITCH_ID_PROD_NUM_6161 0x161
#define PORT_SWITCH_ID_PROD_NUM_6165 0x165
#define PORT_SWITCH_ID_PROD_NUM_6171 0x171
@@ -100,6 +105,7 @@
#define PORT_SWITCH_ID_PROD_NUM_6240 0x240
#define PORT_SWITCH_ID_PROD_NUM_6290 0x290
#define PORT_SWITCH_ID_PROD_NUM_6321 0x310
+#define PORT_SWITCH_ID_PROD_NUM_6341 0x341
#define PORT_SWITCH_ID_PROD_NUM_6352 0x352
#define PORT_SWITCH_ID_PROD_NUM_6350 0x371
#define PORT_SWITCH_ID_PROD_NUM_6351 0x375
@@ -163,6 +169,7 @@
#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
+#define PORT_CONTROL_2_UPSTREAM_MASK 0x0f
#define PORT_RATE_CONTROL 0x09
#define PORT_RATE_CONTROL_2 0x0a
#define PORT_ASSOC_VECTOR 0x0b
@@ -332,6 +339,7 @@
#define GLOBAL_STATS_COUNTER_01 0x1f
#define GLOBAL2_INT_SOURCE 0x00
+#define GLOBAL2_INT_SOURCE_WATCHDOG 15
#define GLOBAL2_INT_MASK 0x01
#define GLOBAL2_MGMT_EN_2X 0x02
#define GLOBAL2_MGMT_EN_0X 0x03
@@ -382,10 +390,12 @@
#define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10)
#define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff
#define GLOBAL2_EEPROM_DATA 0x15
+#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390, 6341 */
#define GLOBAL2_PTP_AVB_OP 0x16
#define GLOBAL2_PTP_AVB_DATA 0x17
#define GLOBAL2_SMI_PHY_CMD 0x18
#define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15)
+#define GLOBAL2_SMI_PHY_CMD_EXTERNAL BIT(13)
#define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12)
#define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \
GLOBAL2_SMI_PHY_CMD_MODE_22 | \
@@ -393,12 +403,38 @@
#define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA ((0x2 << 10) | \
GLOBAL2_SMI_PHY_CMD_MODE_22 | \
GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_ADDR ((0x0 << 10) | \
+ GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA ((0x1 << 10) | \
+ GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA ((0x3 << 10) | \
+ GLOBAL2_SMI_PHY_CMD_BUSY)
+
#define GLOBAL2_SMI_PHY_DATA 0x19
#define GLOBAL2_SCRATCH_MISC 0x1a
#define GLOBAL2_SCRATCH_BUSY BIT(15)
#define GLOBAL2_SCRATCH_REGISTER_SHIFT 8
#define GLOBAL2_SCRATCH_VALUE_MASK 0xff
#define GLOBAL2_WDOG_CONTROL 0x1b
+#define GLOBAL2_WDOG_CONTROL_EGRESS_EVENT BIT(7)
+#define GLOBAL2_WDOG_CONTROL_RMU_TIMEOUT BIT(6)
+#define GLOBAL2_WDOG_CONTROL_QC_ENABLE BIT(5)
+#define GLOBAL2_WDOG_CONTROL_EGRESS_HISTORY BIT(4)
+#define GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE BIT(3)
+#define GLOBAL2_WDOG_CONTROL_FORCE_IRQ BIT(2)
+#define GLOBAL2_WDOG_CONTROL_HISTORY BIT(1)
+#define GLOBAL2_WDOG_CONTROL_SWRESET BIT(0)
+#define GLOBAL2_WDOG_UPDATE BIT(15)
+#define GLOBAL2_WDOG_INT_SOURCE (0x00 << 8)
+#define GLOBAL2_WDOG_INT_STATUS (0x10 << 8)
+#define GLOBAL2_WDOG_INT_ENABLE (0x11 << 8)
+#define GLOBAL2_WDOG_EVENT (0x12 << 8)
+#define GLOBAL2_WDOG_HISTORY (0x13 << 8)
+#define GLOBAL2_WDOG_DATA_MASK 0xff
+#define GLOBAL2_WDOG_CUT_THROUGH BIT(3)
+#define GLOBAL2_WDOG_QUEUE_CONTROLLER BIT(2)
+#define GLOBAL2_WDOG_EGRESS BIT(1)
+#define GLOBAL2_WDOG_FORCE_IRQ BIT(0)
#define GLOBAL2_QOS_WEIGHT 0x1c
#define GLOBAL2_MISC 0x1d
@@ -418,6 +454,7 @@ enum mv88e6xxx_model {
MV88E6097,
MV88E6123,
MV88E6131,
+ MV88E6141,
MV88E6161,
MV88E6165,
MV88E6171,
@@ -432,6 +469,7 @@ enum mv88e6xxx_model {
MV88E6290,
MV88E6320,
MV88E6321,
+ MV88E6341,
MV88E6350,
MV88E6351,
MV88E6352,
@@ -447,6 +485,7 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
+ MV88E6XXX_FAMILY_6341, /* 6141 6341 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
@@ -496,12 +535,6 @@ enum mv88e6xxx_cap {
*/
MV88E6XXX_CAP_STU,
- /* Internal temperature sensor.
- * Available from any enabled port's PHY register 26, page 6.
- */
- MV88E6XXX_CAP_TEMP,
- MV88E6XXX_CAP_TEMP_LIMIT,
-
/* VLAN Table Unit.
* The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
*/
@@ -532,8 +565,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
-#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP)
-#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU)
/* Ingress Rate Limit unit */
@@ -566,6 +597,7 @@ enum mv88e6xxx_cap {
(MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
@@ -584,7 +616,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
@@ -603,13 +634,25 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_TEMP | \
- MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT)
+#define MV88E6XXX_FLAGS_FAMILY_6341 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
+ MV88E6XXX_FLAGS_PVT | \
+ MV88E6XXX_FLAGS_SERDES)
+
#define MV88E6XXX_FLAGS_FAMILY_6351 \
(MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
@@ -619,7 +662,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
@@ -635,27 +677,24 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
- MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT | \
MV88E6XXX_FLAGS_SERDES)
-struct mv88e6xxx_ops;
-
#define MV88E6XXX_FLAGS_FAMILY_6390 \
(MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
- MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT)
+struct mv88e6xxx_ops;
+
struct mv88e6xxx_info {
enum mv88e6xxx_family family;
u16 prod_num;
@@ -688,10 +727,7 @@ struct mv88e6xxx_vtu_entry {
};
struct mv88e6xxx_bus_ops;
-
-struct mv88e6xxx_priv_port {
- struct net_device *bridge_dev;
-};
+struct mv88e6xxx_irq_ops;
struct mv88e6xxx_irq {
u16 masked;
@@ -733,8 +769,6 @@ struct mv88e6xxx_chip {
*/
struct mutex stats_mutex;
- struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS];
-
/* A switch may have a GPIO line tied to its reset pin. Parse
* this from the device tree, and use it before performing
* switch soft reset.
@@ -744,11 +778,8 @@ struct mv88e6xxx_chip {
/* set to size of eeprom if supported by the switch */
int eeprom_len;
- /* Device node for the MDIO bus */
- struct device_node *mdio_np;
-
- /* And the MDIO bus itself */
- struct mii_bus *mdio_bus;
+ /* List of mdio busses */
+ struct list_head mdios;
/* There can be two interrupt controllers, which are chained
* off a GPIO as interrupt source
@@ -757,6 +788,7 @@ struct mv88e6xxx_chip {
struct mv88e6xxx_irq g2_irq;
int irq;
int device_irq;
+ int watchdog_irq;
};
struct mv88e6xxx_bus_ops {
@@ -764,6 +796,13 @@ struct mv88e6xxx_bus_ops {
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
};
+struct mv88e6xxx_mdio_bus {
+ struct mii_bus *bus;
+ struct mv88e6xxx_chip *chip;
+ struct list_head list;
+ bool external;
+};
+
struct mv88e6xxx_ops {
int (*get_eeprom)(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
@@ -772,10 +811,12 @@ struct mv88e6xxx_ops {
int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
- int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 *val);
- int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 val);
+ int (*phy_read)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+ int (*phy_write)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
/* PHY Polling Unit (PPU) operations */
int (*ppu_enable)(struct mv88e6xxx_chip *chip);
@@ -832,6 +873,18 @@ struct mv88e6xxx_ops {
int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
+ /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
+ * Some chips allow this to be configured on specific ports.
+ */
+ int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+ /* Some devices have a per port register indicating what is
+ * the upstream port this port should forward to.
+ */
+ int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port);
+
/* Snapshot the statistics for a port. The statistics can then
* be read back a leisure but still with a consistent view.
*/
@@ -849,11 +902,21 @@ struct mv88e6xxx_ops {
uint64_t *data);
int (*g1_set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int (*g1_set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+ const struct mv88e6xxx_irq_ops *watchdog_ops;
/* Can be either in g1 or g2, so don't use a prefix */
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
};
+struct mv88e6xxx_irq_ops {
+ /* Action to be performed when the interrupt happens */
+ int (*irq_action)(struct mv88e6xxx_chip *chip, int irq);
+ /* Setup the hardware to generate the interrupt */
+ int (*irq_setup)(struct mv88e6xxx_chip *chip);
+ /* Reset the hardware to stop generating the interrupt */
+ void (*irq_free)(struct mv88e6xxx_chip *chip);
+};
+
#define STATS_TYPE_PORT BIT(0)
#define STATS_TYPE_BANK0 BIT(1)
#define STATS_TYPE_BANK1 BIT(2)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 0db7fa0373ae..8875784c4718 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -11,6 +11,7 @@
* (at your option) any later version.
*/
+#include <linux/phy.h>
#include "mv88e6xxx.h"
#include "port.h"
@@ -193,7 +194,7 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
ctrl = PORT_PCS_CTRL_SPEED_1000;
break;
case 2500:
- ctrl = PORT_PCS_CTRL_SPEED_1000 | PORT_PCS_CTRL_ALTSPEED;
+ ctrl = PORT_PCS_CTRL_SPEED_10000 | PORT_PCS_CTRL_ALTSPEED;
break;
case 10000:
/* all bits set, fall through... */
@@ -304,6 +305,69 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
}
+int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ u16 reg;
+ u16 cmode;
+ int err;
+
+ if (mode == PHY_INTERFACE_MODE_NA)
+ return 0;
+
+ if (port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ cmode = PORT_STATUS_CMODE_1000BASE_X;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ cmode = PORT_STATUS_CMODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ cmode = PORT_STATUS_CMODE_2500BASEX;
+ break;
+ case PHY_INTERFACE_MODE_XGMII:
+ cmode = PORT_STATUS_CMODE_XAUI;
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ cmode = PORT_STATUS_CMODE_RXAUI;
+ break;
+ default:
+ cmode = 0;
+ }
+
+ if (cmode) {
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_STATUS_CMODE_MASK;
+ reg |= cmode;
+
+ err = mv88e6xxx_port_write(chip, port, PORT_STATUS, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+ if (err)
+ return err;
+
+ *cmode = reg & PORT_STATUS_CMODE_MASK;
+
+ return 0;
+}
+
/* Offset 0x02: Pause Control
*
* Do not limit the period of time that this port can be paused for by
@@ -608,6 +672,40 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
};
+int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+ bool on)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
+ return err;
+
+ if (on)
+ reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+ else
+ reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+}
+
+int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
+ return err;
+
+ reg &= ~PORT_CONTROL_2_UPSTREAM_MASK;
+ reg |= upstream_port;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
@@ -631,6 +729,20 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
+ return err;
+
+ reg |= PORT_CONTROL_2_MAP_DA;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+}
+
int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port)
{
u16 reg;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 7b3bacaacbfe..c83cbb3f4491 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -58,6 +58,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
bool on);
+int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+ bool on);
int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
bool on);
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
@@ -67,5 +69,10 @@ int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port);
-
+int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port);
#endif /* _MV88E6XXX_PORT_H */
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index b3df70d07ff6..a4fd4ccf7b67 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -746,17 +746,14 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
}
static int
-qca8k_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int port_mask = BIT(QCA8K_CPU_PORT);
int i;
- priv->port_sts[port].bridge_dev = bridge;
-
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
- if (priv->port_sts[i].bridge_dev != bridge)
+ if (ds->ports[i].bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
@@ -775,14 +772,13 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port,
}
static void
-qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
+qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int i;
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
- if (priv->port_sts[i].bridge_dev !=
- priv->port_sts[port].bridge_dev)
+ if (ds->ports[i].bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
* in the bridge
@@ -791,7 +787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
}
- priv->port_sts[port].bridge_dev = NULL;
+
/* Set the cpu port to be the only one in the portvlan mask of
* this port
*/
@@ -911,7 +907,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds)
return DSA_TAG_PROTO_QCA;
}
-static struct dsa_switch_ops qca8k_switch_ops = {
+static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
.get_strings = qca8k_get_strings,
@@ -954,17 +950,16 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
if (!priv->ds)
return -ENOMEM;
priv->ds->priv = priv;
- priv->ds->dev = &mdiodev->dev;
priv->ds->ops = &qca8k_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
- return dsa_register_switch(priv->ds, priv->ds->dev->of_node);
+ return dsa_register_switch(priv->ds, &mdiodev->dev);
}
static void
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 201464719531..1ed4fac6cd6d 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -157,7 +157,6 @@ enum qca8k_fdb_cmd {
struct ar8xxx_port_status {
struct ethtool_eee eee;
- struct net_device *bridge_dev;
int enabled;
};
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 6421835f11b7..2c80611b94ae 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -41,7 +41,48 @@
#define DRV_NAME "dummy"
#define DRV_VERSION "1.0"
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
static int numdummies = 1;
+static int num_vfs;
+
+struct vf_data_storage {
+ u8 vf_mac[ETH_ALEN];
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ __be16 vlan_proto;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u8 spoofchk_enabled;
+ bool rss_query_enabled;
+ u8 trusted;
+ int link_state;
+};
+
+struct dummy_priv {
+ struct vf_data_storage *vfinfo;
+};
+
+static int dummy_num_vf(struct device *dev)
+{
+ return num_vfs;
+}
+
+static struct bus_type dummy_bus = {
+ .name = "dummy",
+ .num_vf = dummy_num_vf,
+};
+
+static void release_dummy_parent(struct device *dev)
+{
+}
+
+static struct device dummy_parent = {
+ .init_name = "dummy",
+ .bus = &dummy_bus,
+ .release = release_dummy_parent,
+};
/* fake multicast ability */
static void set_multicast_list(struct net_device *dev)
@@ -54,8 +95,8 @@ struct pcpu_dstats {
struct u64_stats_sync syncp;
};
-static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void dummy_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
int i;
@@ -73,7 +114,6 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
stats->tx_bytes += tbytes;
stats->tx_packets += tpackets;
}
- return stats;
}
static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -91,10 +131,25 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
+ struct dummy_priv *priv = netdev_priv(dev);
+
dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
+ priv->vfinfo = NULL;
+
+ if (!num_vfs)
+ return 0;
+
+ dev->dev.parent = &dummy_parent;
+ priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!priv->vfinfo) {
+ free_percpu(dev->dstats);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -112,6 +167,117 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
return 0;
}
+static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN);
+
+ return 0;
+}
+
+static int dummy_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+
+ priv->vfinfo[vf].pf_vlan = vlan;
+ priv->vfinfo[vf].pf_qos = qos;
+ priv->vfinfo[vf].vlan_proto = vlan_proto;
+
+ return 0;
+}
+
+static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].min_tx_rate = min;
+ priv->vfinfo[vf].max_tx_rate = max;
+
+ return 0;
+}
+
+static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].spoofchk_enabled = val;
+
+ return 0;
+}
+
+static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].rss_query_enabled = val;
+
+ return 0;
+}
+
+static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].trusted = val;
+
+ return 0;
+}
+
+static int dummy_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ ivi->vf = vf;
+ memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN);
+ ivi->vlan = priv->vfinfo[vf].pf_vlan;
+ ivi->qos = priv->vfinfo[vf].pf_qos;
+ ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled;
+ ivi->linkstate = priv->vfinfo[vf].link_state;
+ ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate;
+ ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate;
+ ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled;
+ ivi->trusted = priv->vfinfo[vf].trusted;
+ ivi->vlan_proto = priv->vfinfo[vf].vlan_proto;
+
+ return 0;
+}
+
+static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].link_state = state;
+
+ return 0;
+}
+
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_uninit = dummy_dev_uninit,
@@ -121,6 +287,14 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
.ndo_change_carrier = dummy_change_carrier,
+ .ndo_set_vf_mac = dummy_set_vf_mac,
+ .ndo_set_vf_vlan = dummy_set_vf_vlan,
+ .ndo_set_vf_rate = dummy_set_vf_rate,
+ .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk,
+ .ndo_set_vf_trust = dummy_set_vf_trust,
+ .ndo_get_vf_config = dummy_get_vf_config,
+ .ndo_set_vf_link_state = dummy_set_vf_link_state,
+ .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en,
};
static void dummy_get_drvinfo(struct net_device *dev,
@@ -134,6 +308,14 @@ static const struct ethtool_ops dummy_ethtool_ops = {
.get_drvinfo = dummy_get_drvinfo,
};
+static void dummy_free_netdev(struct net_device *dev)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ kfree(priv->vfinfo);
+ free_netdev(dev);
+}
+
static void dummy_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -141,7 +323,7 @@ static void dummy_setup(struct net_device *dev)
/* Initialize the device structure. */
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->destructor = dummy_free_netdev;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
@@ -172,6 +354,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
static struct rtnl_link_ops dummy_link_ops __read_mostly = {
.kind = DRV_NAME,
+ .priv_size = sizeof(struct dummy_priv),
.setup = dummy_setup,
.validate = dummy_validate,
};
@@ -180,12 +363,16 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = {
module_param(numdummies, int, 0);
MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
+module_param(num_vfs, int, 0);
+MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device");
+
static int __init dummy_init_one(void)
{
struct net_device *dev_dummy;
int err;
- dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
+ dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
+ "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
if (!dev_dummy)
return -ENOMEM;
@@ -204,6 +391,21 @@ static int __init dummy_init_module(void)
{
int i, err = 0;
+ if (num_vfs) {
+ err = bus_register(&dummy_bus);
+ if (err < 0) {
+ pr_err("registering dummy bus failed\n");
+ return err;
+ }
+
+ err = device_register(&dummy_parent);
+ if (err < 0) {
+ pr_err("registering dummy parent device failed\n");
+ bus_unregister(&dummy_bus);
+ return err;
+ }
+ }
+
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
if (err < 0)
@@ -219,12 +421,22 @@ static int __init dummy_init_module(void)
out:
rtnl_unlock();
+ if (err && num_vfs) {
+ device_unregister(&dummy_parent);
+ bus_unregister(&dummy_bus);
+ }
+
return err;
}
static void __exit dummy_cleanup_module(void)
{
rtnl_link_unregister(&dummy_link_ops);
+
+ if (num_vfs) {
+ device_unregister(&dummy_parent);
+ bus_unregister(&dummy_bus);
+ }
}
module_init(dummy_init_module);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 9fe3990319ec..084a6d58543a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1753,7 +1753,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
iowrite32(TYPHOON_INTR_NONE,
tp->ioaddr + TYPHOON_REG_INTR_MASK);
typhoon_post_pci_writes(tp->ioaddr);
@@ -2370,9 +2370,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* 4) Get the hardware address.
* 5) Put the card to sleep.
*/
- if (typhoon_reset(ioaddr, WaitSleep) < 0) {
+ err = typhoon_reset(ioaddr, WaitSleep);
+ if (err < 0) {
err_msg = "could not reset 3XP";
- err = -EIO;
goto error_out_dma;
}
@@ -2386,24 +2386,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
typhoon_init_interface(tp);
typhoon_init_rings(tp);
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
+ if (err < 0) {
err_msg = "cannot boot 3XP sleep image";
- err = -EIO;
goto error_out_reset;
}
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
- if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
+ if (err < 0) {
err_msg = "cannot read MAC address";
- err = -EIO;
goto error_out_reset;
}
*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
- if(!is_valid_ether_addr(dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
+ err = -EIO;
goto error_out_reset;
}
@@ -2411,7 +2412,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* later when we print out the version reported.
*/
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
- if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
+ if (err < 0) {
err_msg = "Could not get Sleep Image version";
goto error_out_reset;
}
@@ -2428,9 +2430,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if(xp_resp[0].numDesc != 0)
tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
+ err = typhoon_sleep(tp, PCI_D3hot, 0);
+ if (err < 0) {
err_msg = "cannot put adapter to sleep";
- err = -EIO;
goto error_out_reset;
}
@@ -2453,7 +2455,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features = dev->hw_features |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
- if(register_netdev(dev) < 0) {
+ err = register_netdev(dev);
+ if (err < 0) {
err_msg = "unable to register netdev";
goto error_out_reset;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e4c28fed61d5..8c08f9deef92 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig"
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
+source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/aurora/Kconfig"
@@ -170,7 +171,6 @@ source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
-source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 24330f4885a9..26dce5bf2c18 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
+obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
@@ -81,7 +82,6 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
-obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c12d2618eebf..3872ab96b80a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[i].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[i].skb = NULL;
+ break;
+ }
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
+ unsigned int prev_tx;
u32 status;
- int i;
+ int i, j;
/*
* be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
+ prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->tx_info[entry].mapping)) {
+ dev->stats.tx_dropped++;
+ goto err_out;
+ }
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
return NETDEV_TX_OK;
-}
+err_out:
+ entry = prev_tx % TX_RING_SIZE;
+ np->tx_info[entry].skb = NULL;
+ if (i > 0) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].mapping = 0;
+ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+ for (j = 1; j < i; j++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_frag_size(
+ &skb_shinfo(skb)->frags[j-1]),
+ PCI_DMA_TODEVICE);
+ entry++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ np->cur_tx = prev_tx;
+ return NETDEV_TX_OK;
+}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[entry].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[entry].skb = NULL;
+ break;
+ }
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 88164529b52a..a81731303730 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_struct *napi, int budget)
}
if (i < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, i);
if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
enable_irq(IRQ_MAC_RX);
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 93def92f9997..9f7422ada704 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1008,7 +1008,7 @@ restart_txrx_poll:
spin_unlock_irqrestore(&greth->devlock, flags);
goto restart_txrx_poll;
} else {
- __napi_complete(napi);
+ napi_complete_done(napi, work_done);
spin_unlock_irqrestore(&greth->devlock, flags);
}
}
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 831bab352f8e..87a11b9f0ea5 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3575,7 +3575,7 @@ static int et131x_poll(struct napi_struct *napi, int budget)
et131x_handle_send_pkts(adapter);
if (work_done < budget) {
- napi_complete(&adapter->napi);
+ napi_complete_done(&adapter->napi, work_done);
et131x_enable_interrupts(adapter);
}
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index b21d8aa8d653..15a8096c60df 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1471,8 +1471,8 @@ drop_skb:
return NETDEV_TX_OK;
}
-static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *lst)
+static void slic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *lst)
{
struct slic_device *sdev = netdev_priv(dev);
struct slic_stats *stats = &sdev->stats;
@@ -1489,8 +1489,6 @@ static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev,
SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc);
SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802);
SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier);
-
- return lst;
}
static int slic_get_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 25864bff25ee..527908c7e384 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -513,7 +513,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
if (rxcomplete < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rxcomplete);
netdev_dbg(priv->dev,
"NAPI Complete, did %d packets with budget %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index a46e749bf226..5b6509d59716 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -631,22 +631,22 @@ enum ena_admin_flow_hash_proto {
/* RSS flow hash fields */
enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */
- ENA_ADMIN_RSS_L2_DA = 0,
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */
- ENA_ADMIN_RSS_L2_SA = 1,
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */
- ENA_ADMIN_RSS_L3_DA = 2,
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */
- ENA_ADMIN_RSS_L3_SA = 5,
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */
- ENA_ADMIN_RSS_L4_DP = 6,
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */
- ENA_ADMIN_RSS_L4_SP = 7,
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
};
struct ena_admin_proto_input {
@@ -873,6 +873,14 @@ struct ena_admin_aenq_link_change_desc {
u32 flags;
};
+struct ena_admin_aenq_keep_alive_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ u32 rx_drops_low;
+
+ u32 rx_drops_high;
+};
+
struct ena_admin_ena_mmio_req_read_less_resp {
u16 req_id;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 3066d9c99984..08d11cede9c9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -36,9 +36,9 @@
/*****************************************************************************/
/* Timeout in micro-sec */
-#define ADMIN_CMD_TIMEOUT_US (1000000)
+#define ADMIN_CMD_TIMEOUT_US (3000000)
-#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
@@ -784,7 +784,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- pr_info("Feature %d isn't supported\n", feature_id);
+ pr_debug("Feature %d isn't supported\n", feature_id);
return -EPERM;
}
@@ -1126,7 +1126,13 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size);
if (unlikely(IS_ERR(comp_ctx))) {
- pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx));
+ if (comp_ctx == ERR_PTR(-ENODEV))
+ pr_debug("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+ else
+ pr_err("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+
return PTR_ERR(comp_ctx);
}
@@ -1895,7 +1901,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EPERM;
}
@@ -1948,8 +1954,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
- pr_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_FUNCTION);
+ pr_debug("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
return -EPERM;
}
@@ -2112,7 +2118,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
- pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT);
+ pr_debug("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
return -EPERM;
}
@@ -2184,7 +2191,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
@@ -2270,8 +2277,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(
ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
- pr_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ pr_debug("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
return -EPERM;
}
@@ -2444,11 +2451,9 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_HOST_ATTR_CONFIG)) {
- pr_warn("Set host attribute isn't supported\n");
- return -EPERM;
- }
+ /* Host attribute config is called before ena_com_get_dev_attr_feat
+ * so ena_com can't check if the feature is supported.
+ */
memset(&cmd, 0x0, sizeof(cmd));
admin_queue = &ena_dev->admin_queue;
@@ -2542,8 +2547,8 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EPERM) {
- pr_info("Feature %d isn't supported\n",
- ENA_ADMIN_INTERRUPT_MODERATION);
+ pr_debug("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 509d7b8e15ab..c9b33ee5f258 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -33,6 +33,7 @@
#ifndef ENA_COM
#define ENA_COM
+#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 539c536464a5..f999305e1363 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -45,7 +45,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -141,7 +141,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -489,13 +489,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
* expected, it mean that the device still didn't update
* this completion.
*/
- cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
if (cdesc_phase != expected_phase)
return -EAGAIN;
ena_com_cq_inc_head(io_cq);
- *req_id = cdesc->req_id;
+ *req_id = READ_ONCE(cdesc->req_id);
return 0;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index cc8b13ebfa75..35f19430c84a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -80,14 +80,18 @@ static void ena_tx_timeout(struct net_device *dev)
{
struct ena_adapter *adapter = netdev_priv(dev);
+ /* Change the state of the device to trigger reset
+ * Check that we are not in the middle or a trigger already
+ */
+
+ if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+ return;
+
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.tx_timeout++;
u64_stats_update_end(&adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n");
-
- /* Change the state of the device to trigger reset */
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
@@ -559,6 +563,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
*/
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
+ bool print_once = true;
u32 i;
for (i = 0; i < tx_ring->ring_size; i++) {
@@ -570,9 +575,16 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
if (!tx_info->skb)
continue;
- netdev_notice(tx_ring->netdev,
- "free uncompleted tx skb qid %d idx 0x%x\n",
- tx_ring->qid, i);
+ if (print_once) {
+ netdev_notice(tx_ring->netdev,
+ "free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
+ print_once = false;
+ } else {
+ netdev_dbg(tx_ring->netdev,
+ "free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
+ }
ena_buf = tx_info->bufs;
dma_unmap_single(tx_ring->dev,
@@ -1109,7 +1121,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
- if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
+ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
napi_complete_done(napi, 0);
return 0;
}
@@ -1117,26 +1130,40 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
- if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
- napi_complete_done(napi, rx_work_done);
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
napi_comp_call = 1;
- /* Tx and Rx share the same interrupt vector */
- if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
- ena_adjust_intr_moderation(rx_ring, tx_ring);
- /* Update intr register: rx intr delay, tx intr delay and
- * interrupt unmask
+ /* Update numa and unmask the interrupt only when schedule
+ * from the interrupt context (vs from sk_busy_loop)
*/
- ena_com_update_intr_reg(&intr_reg,
- rx_ring->smoothed_interval,
- tx_ring->smoothed_interval,
- true);
+ if (napi_complete_done(napi, rx_work_done)) {
+ /* Tx and Rx share the same interrupt vector */
+ if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
+ ena_adjust_intr_moderation(rx_ring, tx_ring);
+
+ /* Update intr register: rx intr delay,
+ * tx intr delay and interrupt unmask
+ */
+ ena_com_update_intr_reg(&intr_reg,
+ rx_ring->smoothed_interval,
+ tx_ring->smoothed_interval,
+ true);
+
+ /* It is a shared MSI-X.
+ * Tx and Rx CQ have pointer to it.
+ * So we use one of them to reach the intr reg
+ */
+ ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+ }
- /* It is a shared MSI-X. Tx and Rx CQ have pointer to it.
- * So we use one of them to reach the intr reg
- */
- ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
ena_update_ring_numa_node(tx_ring, rx_ring);
@@ -1698,12 +1725,22 @@ static void ena_down(struct ena_adapter *adapter)
adapter->dev_stats.interface_down++;
u64_stats_update_end(&adapter->syncp);
- /* After this point the napi handler won't enable the tx queue */
- ena_napi_disable_all(adapter);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
+ /* After this point the napi handler won't enable the tx queue */
+ ena_napi_disable_all(adapter);
+
/* After destroy the queue there won't be any new interrupts */
+
+ if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+ int rc;
+
+ rc = ena_com_dev_reset(adapter->ena_dev);
+ if (rc)
+ dev_err(&adapter->pdev->dev, "Device reset failed\n");
+ }
+
ena_destroy_all_io_queues(adapter);
ena_disable_io_intr_sync(adapter);
@@ -2065,6 +2102,14 @@ static void ena_netpoll(struct net_device *netdev)
struct ena_adapter *adapter = netdev_priv(netdev);
int i;
+ /* Dont schedule NAPI if the driver is in the middle of reset
+ * or netdev is down.
+ */
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+ return;
+
for (i = 0; i < adapter->num_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi);
}
@@ -2165,32 +2210,50 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
-static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void ena_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_admin_basic_stats ena_stats;
- int rc;
+ struct ena_ring *rx_ring, *tx_ring;
+ unsigned int start;
+ u64 rx_drops;
+ int i;
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
- return NULL;
+ return;
- rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats);
- if (rc)
- return NULL;
+ for (i = 0; i < adapter->num_queues; i++) {
+ u64 bytes, packets;
- stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) |
- ena_stats.tx_bytes_low;
- stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) |
- ena_stats.rx_bytes_low;
+ tx_ring = &adapter->tx_ring[i];
- stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) |
- ena_stats.rx_pkts_low;
- stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) |
- ena_stats.tx_pkts_low;
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+ packets = tx_ring->tx_stats.cnt;
+ bytes = tx_ring->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
- stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) |
- ena_stats.rx_drops_low;
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+
+ rx_ring = &adapter->rx_ring[i];
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+ packets = rx_ring->rx_stats.cnt;
+ bytes = rx_ring->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&adapter->syncp);
+ rx_drops = adapter->dev_stats.rx_drops;
+ } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
+
+ stats->rx_dropped = rx_drops;
stats->multicast = 0;
stats->collisions = 0;
@@ -2204,8 +2267,6 @@ static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
stats->rx_errors = 0;
stats->tx_errors = 0;
-
- return stats;
}
static const struct net_device_ops ena_netdev_ops = {
@@ -2353,6 +2414,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*/
ena_com_set_admin_polling_mode(ena_dev, true);
+ ena_config_host_info(ena_dev);
+
/* Get Device Attributes*/
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
if (rc) {
@@ -2377,11 +2440,10 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
- ena_config_host_info(ena_dev);
-
return 0;
err_admin_init:
+ ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less:
ena_com_mmio_reg_read_request_destroy(ena_dev);
@@ -2433,6 +2495,14 @@ static void ena_fw_reset_device(struct work_struct *work)
bool dev_up, wd_state;
int rc;
+ if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ dev_err(&pdev->dev,
+ "device reset schedule while reset bit is off\n");
+ return;
+ }
+
+ netif_carrier_off(netdev);
+
del_timer_sync(&adapter->timer_service);
rtnl_lock();
@@ -2446,12 +2516,6 @@ static void ena_fw_reset_device(struct work_struct *work)
*/
ena_close(netdev);
- rc = ena_com_dev_reset(ena_dev);
- if (rc) {
- dev_err(&pdev->dev, "Device reset failed\n");
- goto err;
- }
-
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
@@ -2464,6 +2528,8 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_com_mmio_reg_read_request_destroy(ena_dev);
+ clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
/* Finish with the destroy part. Start the init part */
rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
@@ -2509,6 +2575,8 @@ err_device_destroy:
err:
rtnl_unlock();
+ clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
dev_err(&pdev->dev,
"Reset attempt failed. Can not reset the device\n");
}
@@ -2527,6 +2595,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
+ if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+ return;
+
budget = ENA_MONITORED_TX_QUEUES;
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
@@ -2626,7 +2697,7 @@ static void ena_timer_service(unsigned long data)
if (host_info)
ena_update_host_info(host_info, adapter->netdev);
- if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
netif_err(adapter, drv, adapter->netdev,
"Trigger reset is on\n");
ena_dump_stats_to_dmesg(adapter);
@@ -2660,7 +2731,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
io_sq_num = get_feat_ctx->max_queues.max_sq_num;
}
- io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES);
+ io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
io_queue_num = min_t(int, io_queue_num, io_sq_num);
io_queue_num = min_t(int, io_queue_num,
get_feat_ctx->max_queues.max_cq_num);
@@ -2722,7 +2793,6 @@ static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
netdev->features =
dev_features |
NETIF_F_SG |
- NETIF_F_NTUPLE |
NETIF_F_RXHASH |
NETIF_F_HIGHDMA;
@@ -3093,12 +3163,6 @@ static void ena_remove(struct pci_dev *pdev)
struct ena_com_dev *ena_dev;
struct net_device *netdev;
- if (!adapter)
- /* This device didn't load properly and it's resources
- * already released, nothing to do
- */
- return;
-
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
@@ -3118,7 +3182,9 @@ static void ena_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->resume_io_task);
- ena_com_dev_reset(ena_dev);
+ /* Reset the device only if the device is running. */
+ if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ ena_com_dev_reset(ena_dev);
ena_free_mgmnt_irq(adapter);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 69d7e9ed5bc8..ed62d8e231a1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,7 +44,7 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_MINOR 1
#define DRV_MODULE_VER_SUBMINOR 2
#define DRV_MODULE_NAME "ena"
@@ -100,7 +100,7 @@
/* Number of queues to check for missing queues per timer service */
#define ENA_MONITORED_TX_QUEUES 4
/* Max timeout packets before device reset */
-#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
+#define MAX_NUM_OF_TIMEOUTED_PACKETS 128
#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
@@ -116,9 +116,9 @@
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
/* ENA device should send keep alive msg every 1 sec.
- * We wait for 3 sec just to be on the safe side.
+ * We wait for 6 sec just to be on the safe side.
*/
-#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ)
+#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
@@ -241,6 +241,7 @@ struct ena_stats_dev {
u64 interface_up;
u64 interface_down;
u64 admin_q_pause;
+ u64 rx_drops;
};
enum ena_flags_t {
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 9595f1bc535b..7b5df562f30f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
void __iomem *mmio = lp->mmio;
struct sk_buff *skb,*new_skb;
int min_pkt_len, status;
- unsigned int intr0;
int num_rx_pkt = 0;
short pkt_len;
#if AMD8111E_VLAN_TAG_USED
short vtag;
#endif
- int rx_pkt_limit = budget;
- unsigned long flags;
- if (rx_pkt_limit <= 0)
- goto rx_not_empty;
+ while (num_rx_pkt < budget) {
+ status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+ if (status & OWN_BIT)
+ break;
- do{
- /* process receive packets until we use the quota.
- * If we own the next entry, it's a new packet. Send it up.
+ /* There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with
+ * full-sized * buffers it's possible for a
+ * jabber packet to use two buffers, with only
+ * the last correctly noting the error.
*/
- while(1) {
- status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
- if (status & OWN_BIT)
- break;
-
- /* There is a tricky error noted by John Murphy,
- * <murf@perftech.com> to Russ Nelson: Even with
- * full-sized * buffers it's possible for a
- * jabber packet to use two buffers, with only
- * the last correctly noting the error.
- */
- if(status & ERR_BIT) {
- /* resetting flags */
- lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
- goto err_next_pkt;
- }
- /* check for STP and ENP */
- if(!((status & STP_BIT) && (status & ENP_BIT))){
- /* resetting flags */
- lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
- goto err_next_pkt;
- }
- pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+ if (status & ERR_BIT) {
+ /* resetting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ /* check for STP and ENP */
+ if (!((status & STP_BIT) && (status & ENP_BIT))){
+ /* resetting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
#if AMD8111E_VLAN_TAG_USED
- vtag = status & TT_MASK;
- /*MAC will strip vlan tag*/
- if (vtag != 0)
- min_pkt_len =MIN_PKT_LEN - 4;
+ vtag = status & TT_MASK;
+ /* MAC will strip vlan tag */
+ if (vtag != 0)
+ min_pkt_len = MIN_PKT_LEN - 4;
else
#endif
- min_pkt_len =MIN_PKT_LEN;
+ min_pkt_len = MIN_PKT_LEN;
- if (pkt_len < min_pkt_len) {
- lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
- lp->drv_rx_errors++;
- goto err_next_pkt;
- }
- if(--rx_pkt_limit < 0)
- goto rx_not_empty;
- new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
- if (!new_skb) {
- /* if allocation fail,
- * ignore that pkt and go to next one
- */
- lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
- lp->drv_rx_errors++;
- goto err_next_pkt;
- }
+ if (pkt_len < min_pkt_len) {
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+ new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
+ if (!new_skb) {
+ /* if allocation fail,
+ * ignore that pkt and go to next one
+ */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
- skb_reserve(new_skb, 2);
- skb = lp->rx_skbuff[rx_index];
- pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
- lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
- skb_put(skb, pkt_len);
- lp->rx_skbuff[rx_index] = new_skb;
- lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
- new_skb->data,
- lp->rx_buff_len-2,
- PCI_DMA_FROMDEVICE);
+ skb_reserve(new_skb, 2);
+ skb = lp->rx_skbuff[rx_index];
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
+ lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[rx_index] = new_skb;
+ lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+ new_skb->data,
+ lp->rx_buff_len-2,
+ PCI_DMA_FROMDEVICE);
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, dev);
#if AMD8111E_VLAN_TAG_USED
- if (vtag == TT_VLAN_TAGGED){
- u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- }
-#endif
- netif_receive_skb(skb);
- /*COAL update rx coalescing parameters*/
- lp->coal_conf.rx_packets++;
- lp->coal_conf.rx_bytes += pkt_len;
- num_rx_pkt++;
-
- err_next_pkt:
- lp->rx_ring[rx_index].buff_phy_addr
- = cpu_to_le32(lp->rx_dma_addr[rx_index]);
- lp->rx_ring[rx_index].buff_count =
- cpu_to_le16(lp->rx_buff_len-2);
- wmb();
- lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
- rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+ if (vtag == TT_VLAN_TAGGED){
+ u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
- /* Check the interrupt status register for more packets in the
- * mean time. Process them since we have not used up our quota.
- */
- intr0 = readl(mmio + INT0);
- /*Ack receive packets */
- writel(intr0 & RINT0,mmio + INT0);
+#endif
+ napi_gro_receive(napi, skb);
+ /* COAL update rx coalescing parameters */
+ lp->coal_conf.rx_packets++;
+ lp->coal_conf.rx_bytes += pkt_len;
+ num_rx_pkt++;
+
+err_next_pkt:
+ lp->rx_ring[rx_index].buff_phy_addr
+ = cpu_to_le32(lp->rx_dma_addr[rx_index]);
+ lp->rx_ring[rx_index].buff_count =
+ cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+ rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+ }
- } while(intr0 & RINT0);
+ if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
+ unsigned long flags;
- if (rx_pkt_limit > 0) {
/* Receive descriptor is empty now */
spin_lock_irqsave(&lp->lock, flags);
- __napi_complete(napi);
writel(VAL0|RINTEN0, mmio + INTEN0);
writel(VAL2 | RDMD0, mmio + CMD0);
spin_unlock_irqrestore(&lp->lock, flags);
}
-rx_not_empty:
return num_rx_pkt;
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 41e58cca8fee..86369d7c9a0f 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -291,7 +291,10 @@ struct pcnet32_private {
int options;
unsigned int shared_irq:1, /* shared irq possible */
dxsuflo:1, /* disable transmit stop on uflo */
- mii:1; /* mii port available */
+ mii:1, /* mii port available */
+ autoneg:1, /* autoneg enabled */
+ port_tp:1, /* port set to TP */
+ fdx:1; /* full duplex enabled */
struct net_device *next;
struct mii_if_info mii_if;
struct timer_list watchdog_timer;
@@ -677,6 +680,52 @@ static void pcnet32_poll_controller(struct net_device *dev)
}
#endif
+/*
+ * lp->lock must be held.
+ */
+static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
+ int can_sleep)
+{
+ int csr5;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+
+ /* really old chips have to be stopped. */
+ if (lp->chip_version < PCNET32_79C970A)
+ return 0;
+
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
+ spin_unlock_irqrestore(&lp->lock, *flags);
+ if (can_sleep)
+ msleep(1);
+ else
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, *flags);
+ ticks++;
+ if (ticks > 200) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr)
+{
+ int csr5 = lp->a->read_csr(ioaddr, CSR5);
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND);
+}
+
static int pcnet32_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -684,12 +733,29 @@ static int pcnet32_get_link_ksettings(struct net_device *dev,
unsigned long flags;
int r = -EOPNOTSUPP;
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
+ r = 0;
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ if (lp->autoneg) {
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0)
+ cmd->base.port = PORT_AUI;
+ else
+ cmd->base.port = PORT_TP;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI;
+ }
+ cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.speed = SPEED_10;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ SUPPORTED_TP | SUPPORTED_AUI);
r = 0;
}
+ spin_unlock_irqrestore(&lp->lock, flags);
return r;
}
@@ -697,14 +763,46 @@ static int pcnet32_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct pcnet32_private *lp = netdev_priv(dev);
+ ulong ioaddr = dev->base_addr;
unsigned long flags;
int r = -EOPNOTSUPP;
+ int suspended, bcr2, bcr9, csr15;
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ suspended = pcnet32_suspend(dev, &flags, 0);
+ if (!suspended)
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+
+ lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ bcr2 = lp->a->read_bcr(ioaddr, 2);
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002);
+ } else {
+ lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002);
+
+ lp->port_tp = cmd->base.port == PORT_TP;
+ csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180;
+ if (cmd->base.port == PORT_TP)
+ csr15 |= 0x0080;
+ lp->a->write_csr(ioaddr, CSR15, csr15);
+ lp->init_block->mode = cpu_to_le16(csr15);
+
+ lp->fdx = cmd->base.duplex == DUPLEX_FULL;
+ bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003;
+ if (cmd->base.duplex == DUPLEX_FULL)
+ bcr9 |= 0x0003;
+ lp->a->write_bcr(ioaddr, 9, bcr9);
+ }
+ if (suspended)
+ pcnet32_clr_suspend(lp, ioaddr);
+ else if (netif_running(dev))
+ pcnet32_restart(dev, CSR0_NORMAL);
+ r = 0;
}
+ spin_unlock_irqrestore(&lp->lock, flags);
return r;
}
@@ -732,7 +830,14 @@ static u32 pcnet32_get_link(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
r = mii_link_ok(&lp->mii_if);
- } else if (lp->chip_version >= PCNET32_79C970A) {
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ /* only read link if port is set to TP */
+ if (!lp->autoneg && lp->port_tp)
+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ else /* link always up for AUI port or port auto select */
+ r = 1;
+ } else if (lp->chip_version > PCNET32_79C970A) {
ulong ioaddr = dev->base_addr; /* card base I/O address */
r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
} else { /* can not detect link on really old chips */
@@ -1070,45 +1175,6 @@ static int pcnet32_set_phys_id(struct net_device *dev,
}
/*
- * lp->lock must be held.
- */
-static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
- int can_sleep)
-{
- int csr5;
- struct pcnet32_private *lp = netdev_priv(dev);
- const struct pcnet32_access *a = lp->a;
- ulong ioaddr = dev->base_addr;
- int ticks;
-
- /* really old chips have to be stopped. */
- if (lp->chip_version < PCNET32_79C970A)
- return 0;
-
- /* set SUSPEND (SPND) - CSR5 bit 0 */
- csr5 = a->read_csr(ioaddr, CSR5);
- a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
-
- /* poll waiting for bit to be set */
- ticks = 0;
- while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
- spin_unlock_irqrestore(&lp->lock, *flags);
- if (can_sleep)
- msleep(1);
- else
- mdelay(1);
- spin_lock_irqsave(&lp->lock, *flags);
- ticks++;
- if (ticks > 200) {
- netif_printk(lp, hw, KERN_DEBUG, dev,
- "Error getting into suspend!\n");
- return 0;
- }
- }
- return 1;
-}
-
-/*
* process one receive descriptor entry
*/
@@ -1350,13 +1416,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
pcnet32_restart(dev, CSR0_START);
netif_wake_queue(dev);
}
- spin_unlock_irqrestore(&lp->lock, flags);
-
- if (work_done < budget) {
- spin_lock_irqsave(&lp->lock, flags);
-
- __napi_complete(napi);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
/* clear interrupt masks */
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
@@ -1364,9 +1425,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
/* Set interrupt enable. */
lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
-
- spin_unlock_irqrestore(&lp->lock, flags);
}
+
+ spin_unlock_irqrestore(&lp->lock, flags);
return work_done;
}
@@ -1430,13 +1491,8 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
}
- if (!(csr0 & CSR0_STOP)) { /* If not stopped */
- int csr5;
-
- /* clear SUSPEND (SPND) - CSR5 bit 0 */
- csr5 = a->read_csr(ioaddr, CSR5);
- a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
- }
+ if (!(csr0 & CSR0_STOP)) /* If not stopped */
+ pcnet32_clr_suspend(lp, ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
}
@@ -1817,6 +1873,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->options = PCNET32_PORT_ASEL;
else
lp->options = options_mapping[options[cards_found]];
+ /* force default port to TP on 79C970A so link detection can work */
+ if (lp->chip_version == PCNET32_79C970A)
+ lp->options = PCNET32_PORT_10BT;
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = mdio_read;
lp->mii_if.mdio_write = mdio_write;
@@ -2068,6 +2127,10 @@ static int pcnet32_open(struct net_device *dev)
(u32) (lp->rx_ring_dma_addr),
(u32) (lp->init_dma_addr));
+ lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL);
+ lp->port_tp = !!(lp->options & PCNET32_PORT_10BT);
+ lp->fdx = !!(lp->options & PCNET32_PORT_FD);
+
/* set/reset autoselect bit */
val = lp->a->read_bcr(ioaddr, 2) & ~2;
if (lp->options & PCNET32_PORT_ASEL)
@@ -2680,10 +2743,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
}
if (suspended) {
- int csr5;
- /* clear SUSPEND (SPND) - CSR5 bit 0 */
- csr5 = lp->a->read_csr(ioaddr, CSR5);
- lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+ pcnet32_clr_suspend(lp, ioaddr);
} else {
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
pcnet32_restart(dev, CSR0_NORMAL);
@@ -2794,6 +2854,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
if (lp->mii) {
curr_link = mii_link_ok(&lp->mii_if);
+ } else if (lp->chip_version == PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ /* only read link if port is set to TP */
+ if (!lp->autoneg && lp->port_tp)
+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ else /* link always up for AUI port or port auto select */
+ curr_link = 1;
} else {
ulong ioaddr = dev->base_addr; /* card base I/O address */
curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 5b7ba25e0065..8a280e7d66bd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -891,6 +891,8 @@
#define PCS_V1_WINDOW_SELECT 0x03fc
#define PCS_V2_WINDOW_DEF 0x9060
#define PCS_V2_WINDOW_SELECT 0x9064
+#define PCS_V2_RV_WINDOW_DEF 0x1060
+#define PCS_V2_RV_WINDOW_SELECT 0x1064
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index aaf0350076a9..a7d16db5c4b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
mmd_data = XPCS16_IOREAD(pdata, offset);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
XPCS16_IOWRITE(pdata, offset, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
+ }
/*
* Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 9943629fcbf9..3aa457c8ca21 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_start\n");
- hw_if->init(pdata);
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
xgbe_napi_enable(pdata, 1);
@@ -1759,8 +1761,8 @@ static void xgbe_tx_timeout(struct net_device *netdev)
schedule_work(&pdata->restart_work);
}
-static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *s)
+static void xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
@@ -1786,8 +1788,6 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
s->tx_dropped = netdev->stats.tx_dropped;
DBGPR("<--%s\n", __func__);
-
- return s;
}
static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index e76b7f65b805..c2730f15bd8b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
+ struct pci_dev *rdev;
unsigned int ma_lo, ma_hi;
unsigned int reg;
int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+ /* Set the PCS indirect addressing definition registers */
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (rdev &&
+ (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else {
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ }
+ pci_dev_put(rdev);
+
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f52a9bd05bac..00108815b55e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
unsigned int xpcs_window;
unsigned int xpcs_window_size;
unsigned int xpcs_window_mask;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 523b8eff6d7b..d0d0d12b531f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -840,7 +840,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
processed = xgene_enet_process_ring(ring, budget);
if (processed != budget) {
- napi_complete(napi);
+ napi_complete_done(napi, processed);
enable_irq(ring->irq);
}
@@ -1453,7 +1453,7 @@ err:
return ret;
}
-static struct rtnl_link_stats64 *xgene_enet_get_stats64(
+static void xgene_enet_get_stats64(
struct net_device *ndev,
struct rtnl_link_stats64 *storage)
{
@@ -1462,7 +1462,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
struct xgene_enet_desc_ring *ring;
int i;
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
for (i = 0; i < pdata->txq_cnt; i++) {
ring = pdata->tx_ring[i];
if (ring) {
@@ -1484,8 +1483,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
}
}
memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
-
- return storage;
}
static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
@@ -1967,6 +1964,30 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+ { "APMC0D05", XGENE_ENET1},
+ { "APMC0D30", XGENE_ENET1},
+ { "APMC0D31", XGENE_ENET1},
+ { "APMC0D3F", XGENE_ENET1},
+ { "APMC0D26", XGENE_ENET2},
+ { "APMC0D25", XGENE_ENET2},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static const struct of_device_id xgene_enet_of_match[] = {
+ {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+ {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
+
static int xgene_enet_probe(struct platform_device *pdev)
{
struct net_device *ndev;
@@ -2113,32 +2134,6 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
xgene_enet_remove(pdev);
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_enet_acpi_match[] = {
- { "APMC0D05", XGENE_ENET1},
- { "APMC0D30", XGENE_ENET1},
- { "APMC0D31", XGENE_ENET1},
- { "APMC0D3F", XGENE_ENET1},
- { "APMC0D26", XGENE_ENET2},
- { "APMC0D25", XGENE_ENET2},
- { }
-};
-MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_enet_of_match[] = {
- {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
- {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
- {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
- {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
- {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
- {},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
-#endif
-
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
new file mode 100644
index 000000000000..cdf78e069a39
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -0,0 +1,24 @@
+#
+# aQuantia device configuration
+#
+
+config NET_VENDOR_AQUANTIA
+ bool "aQuantia devices"
+ default y
+ ---help---
+ Set this to y if you have an Ethernet network cards that uses the aQuantia
+ AQC107/AQC108 chipset.
+
+ This option does not build any drivers; it casues the aQuantia
+ drivers that can be built to appear in the list of Ethernet drivers.
+
+
+if NET_VENDOR_AQUANTIA
+
+config AQTION
+ tristate "aQuantia AQtion(tm) Support"
+ depends on PCI && X86_64
+ ---help---
+ This enables the support for the aQuantia AQtion(tm) Ethernet card.
+
+endif # NET_VENDOR_AQUANTIA
diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile
new file mode 100644
index 000000000000..4f4897b689b2
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the aQuantia device drivers.
+#
+
+obj-$(CONFIG_AQTION) += atlantic/
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
new file mode 100644
index 000000000000..e4ae696920ef
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -0,0 +1,42 @@
+################################################################################
+#
+# aQuantia Ethernet Controller AQtion Linux Driver
+# Copyright(c) 2014-2017 aQuantia Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information: <rdc-drv@aquantia.com>
+# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
+#
+################################################################################
+
+#
+# Makefile for the AQtion(tm) Ethernet driver
+#
+
+obj-$(CONFIG_AQTION) += atlantic.o
+
+atlantic-objs := aq_main.o \
+ aq_nic.o \
+ aq_pci_func.o \
+ aq_vec.o \
+ aq_ring.o \
+ aq_hw_utils.o \
+ aq_ethtool.o \
+ hw_atl/hw_atl_a0.o \
+ hw_atl/hw_atl_b0.o \
+ hw_atl/hw_atl_utils.o \
+ hw_atl/hw_atl_llh.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
new file mode 100644
index 000000000000..5f99237a9d52
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -0,0 +1,77 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_cfg.h: Definition of configuration parameters and constants. */
+
+#ifndef AQ_CFG_H
+#define AQ_CFG_H
+
+#define AQ_CFG_VECS_DEF 4U
+#define AQ_CFG_TCS_DEF 1U
+
+#define AQ_CFG_TXDS_DEF 4096U
+#define AQ_CFG_RXDS_DEF 1024U
+
+#define AQ_CFG_IS_POLLING_DEF 0U
+
+#define AQ_CFG_FORCE_LEGACY_INT 0U
+
+#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U
+#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
+#define AQ_CFG_IRQ_MASK 0x1FFU
+
+#define AQ_CFG_VECS_MAX 8U
+#define AQ_CFG_TCS_MAX 8U
+
+#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
+
+/* LRO */
+#define AQ_CFG_IS_LRO_DEF 1U
+
+/* RSS */
+#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U
+#define AQ_CFG_RSS_HASHKEY_SIZE 320U
+
+#define AQ_CFG_IS_RSS_DEF 1U
+#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF
+#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U
+
+#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
+#define AQ_CFG_PCI_FUNC_PORTS 2U
+
+#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ)
+#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
+
+#define AQ_CFG_SKB_FRAGS_MAX 32U
+
+#define AQ_CFG_NAPI_WEIGHT 64U
+
+#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
+
+/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
+
+#define AQ_CFG_FC_MODE 3U
+
+#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
+
+#define AQ_CFG_IS_AUTONEG_DEF 1U
+#define AQ_CFG_MTU_DEF 1514U
+
+#define AQ_CFG_LOCK_TRYS 100U
+
+#define AQ_CFG_DRV_AUTHOR "aQuantia"
+#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
+#define AQ_CFG_DRV_NAME "aquantia"
+#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
+ __stringify(NIC_MINOR_DRIVER_VERSION)"."\
+ __stringify(NIC_BUILD_DRIVER_VERSION)"."\
+ __stringify(NIC_REVISION_DRIVER_VERSION)
+
+#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
new file mode 100644
index 000000000000..9eb5e222a234
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -0,0 +1,23 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_common.h: Basic includes for all files in project. */
+
+#ifndef AQ_COMMON_H
+#define AQ_COMMON_H
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "ver.h"
+#include "aq_nic.h"
+#include "aq_cfg.h"
+#include "aq_utils.h"
+
+#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
new file mode 100644
index 000000000000..a761e91471df
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -0,0 +1,262 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ethtool.c: Definition of ethertool related functions. */
+
+#include "aq_ethtool.h"
+#include "aq_nic.h"
+
+static void aq_ethtool_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+ memset(p, 0, regs_count * sizeof(u32));
+ aq_nic_get_regs(aq_nic, regs, p);
+}
+
+static int aq_ethtool_get_regs_len(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+ return regs_count * sizeof(u32);
+}
+
+static u32 aq_ethtool_get_link(struct net_device *ndev)
+{
+ return ethtool_op_get_link(ndev);
+}
+
+static int aq_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ aq_nic_get_link_ksettings(aq_nic, cmd);
+ cmd->base.speed = netif_carrier_ok(ndev) ?
+ aq_nic_get_link_speed(aq_nic) : 0U;
+
+ return 0;
+}
+
+static int
+aq_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic_set_link_ksettings(aq_nic, cmd);
+}
+
+/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
+static const unsigned int aq_ethtool_stat_queue_lines = 5U;
+static const unsigned int aq_ethtool_stat_queue_chars =
+ 5U * ETH_GSTRING_LEN;
+static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
+ "InPackets",
+ "InUCast",
+ "InMCast",
+ "InBCast",
+ "InErrors",
+ "OutPackets",
+ "OutUCast",
+ "OutMCast",
+ "OutBCast",
+ "InUCastOctects",
+ "OutUCastOctects",
+ "InMCastOctects",
+ "OutMCastOctects",
+ "InBCastOctects",
+ "OutBCastOctects",
+ "InOctects",
+ "OutOctects",
+ "InPacketsDma",
+ "OutPacketsDma",
+ "InOctetsDma",
+ "OutOctetsDma",
+ "InDroppedDma",
+ "Queue[0] InPackets",
+ "Queue[0] OutPackets",
+ "Queue[0] InJumboPackets",
+ "Queue[0] InLroPackets",
+ "Queue[0] InErrors",
+ "Queue[1] InPackets",
+ "Queue[1] OutPackets",
+ "Queue[1] InJumboPackets",
+ "Queue[1] InLroPackets",
+ "Queue[1] InErrors",
+ "Queue[2] InPackets",
+ "Queue[2] OutPackets",
+ "Queue[2] InJumboPackets",
+ "Queue[2] InLroPackets",
+ "Queue[2] InErrors",
+ "Queue[3] InPackets",
+ "Queue[3] OutPackets",
+ "Queue[3] InJumboPackets",
+ "Queue[3] InLroPackets",
+ "Queue[3] InErrors",
+ "Queue[4] InPackets",
+ "Queue[4] OutPackets",
+ "Queue[4] InJumboPackets",
+ "Queue[4] InLroPackets",
+ "Queue[4] InErrors",
+ "Queue[5] InPackets",
+ "Queue[5] OutPackets",
+ "Queue[5] InJumboPackets",
+ "Queue[5] InLroPackets",
+ "Queue[5] InErrors",
+ "Queue[6] InPackets",
+ "Queue[6] OutPackets",
+ "Queue[6] InJumboPackets",
+ "Queue[6] InLroPackets",
+ "Queue[6] InErrors",
+ "Queue[7] InPackets",
+ "Queue[7] OutPackets",
+ "Queue[7] InJumboPackets",
+ "Queue[7] InLroPackets",
+ "Queue[7] InErrors",
+};
+
+static void aq_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
+ BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
+ memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
+ aq_nic_get_stats(aq_nic, data);
+}
+
+static void aq_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
+ u32 firmware_version = aq_nic_get_fw_version(aq_nic);
+ u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+ strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
+ strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u.%u", firmware_version >> 24,
+ (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
+
+ strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
+ (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = regs_count;
+ drvinfo->eedump_len = 0;
+}
+
+static void aq_ethtool_get_strings(struct net_device *ndev,
+ u32 stringset, u8 *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, *aq_ethtool_stat_names,
+ sizeof(aq_ethtool_stat_names) -
+ (AQ_CFG_VECS_MAX - cfg->vecs) *
+ aq_ethtool_stat_queue_chars);
+}
+
+static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
+ int ret = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = ARRAY_SIZE(aq_ethtool_stat_names) -
+ (AQ_CFG_VECS_MAX - cfg->vecs) *
+ aq_ethtool_stat_queue_lines;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
+{
+ return AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+}
+
+static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ return sizeof(cfg->aq_rss.hash_secret_key);
+}
+
+static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ unsigned int i = 0U;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ if (indir) {
+ for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
+ indir[i] = cfg->aq_rss.indirection_table[i];
+ }
+ if (key)
+ memcpy(key, cfg->aq_rss.hash_secret_key,
+ sizeof(cfg->aq_rss.hash_secret_key));
+ return 0;
+}
+
+static int aq_ethtool_get_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = cfg->vecs;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+const struct ethtool_ops aq_ethtool_ops = {
+ .get_link = aq_ethtool_get_link,
+ .get_regs_len = aq_ethtool_get_regs_len,
+ .get_regs = aq_ethtool_get_regs,
+ .get_drvinfo = aq_ethtool_get_drvinfo,
+ .get_strings = aq_ethtool_get_strings,
+ .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .get_rxfh_key_size = aq_ethtool_get_rss_key_size,
+ .get_rxfh = aq_ethtool_get_rss,
+ .get_rxnfc = aq_ethtool_get_rxnfc,
+ .get_sset_count = aq_ethtool_get_sset_count,
+ .get_ethtool_stats = aq_ethtool_stats,
+ .get_link_ksettings = aq_ethtool_get_link_ksettings,
+ .set_link_ksettings = aq_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
new file mode 100644
index 000000000000..21c126eeb5eb
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -0,0 +1,19 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ethtool.h: Declaration of ethertool related functions. */
+
+#ifndef AQ_ETHTOOL_H
+#define AQ_ETHTOOL_H
+
+#include "aq_common.h"
+
+extern const struct ethtool_ops aq_ethtool_ops;
+
+#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
new file mode 100644
index 000000000000..fce0fd3f23ff
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -0,0 +1,177 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+ * functions.
+ */
+
+#ifndef AQ_HW_H
+#define AQ_HW_H
+
+#include "aq_common.h"
+
+/* NIC H/W capabilities */
+struct aq_hw_caps_s {
+ u64 hw_features;
+ u64 link_speed_msk;
+ unsigned int hw_priv_flags;
+ u32 rxds;
+ u32 txds;
+ u32 txhwb_alignment;
+ u32 irq_mask;
+ u32 vecs;
+ u32 mtu;
+ u32 mac_regs_count;
+ u8 ports;
+ u8 msix_irqs;
+ u8 tcs;
+ u8 rxd_alignment;
+ u8 rxd_size;
+ u8 txd_alignment;
+ u8 txd_size;
+ u8 tx_rings;
+ u8 rx_rings;
+ bool flow_control;
+ bool is_64_dma;
+ u32 fw_ver_expected;
+};
+
+struct aq_hw_link_status_s {
+ unsigned int mbps;
+};
+
+#define AQ_HW_IRQ_INVALID 0U
+#define AQ_HW_IRQ_LEGACY 1U
+#define AQ_HW_IRQ_MSI 2U
+#define AQ_HW_IRQ_MSIX 3U
+
+#define AQ_HW_POWER_STATE_D0 0U
+#define AQ_HW_POWER_STATE_D3 3U
+
+#define AQ_HW_FLAG_STARTED 0x00000004U
+#define AQ_HW_FLAG_STOPPING 0x00000008U
+#define AQ_HW_FLAG_RESETTING 0x00000010U
+#define AQ_HW_FLAG_CLOSING 0x00000020U
+#define AQ_HW_LINK_DOWN 0x04000000U
+#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
+#define AQ_HW_FLAG_ERR_HW 0x80000000U
+
+#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+
+struct aq_hw_s {
+ struct aq_obj_s header;
+ struct aq_nic_cfg_s *aq_nic_cfg;
+ struct aq_pci_func_s *aq_pci_func;
+ void __iomem *mmio;
+ unsigned int not_ff_addr;
+ struct aq_hw_link_status_s aq_link_status;
+};
+
+struct aq_ring_s;
+struct aq_ring_param_s;
+struct aq_nic_cfg_s;
+struct sk_buff;
+
+struct aq_hw_ops {
+ struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
+ unsigned int port, struct aq_hw_ops *ops);
+
+ void (*destroy)(struct aq_hw_s *self);
+
+ int (*get_hw_caps)(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps);
+
+ int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ unsigned int frags);
+
+ int (*hw_ring_rx_receive)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ unsigned int sw_tail_old);
+
+ int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_get_mac_permanent)(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u8 *mac);
+
+ int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+
+ int (*hw_get_link_status)(struct aq_hw_s *self,
+ struct aq_hw_link_status_s *link_status);
+
+ int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+ int (*hw_reset)(struct aq_hw_s *self);
+
+ int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
+ u8 *mac_addr);
+
+ int (*hw_start)(struct aq_hw_s *self);
+
+ int (*hw_stop)(struct aq_hw_s *self);
+
+ int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+
+ int (*hw_ring_tx_start)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_tx_stop)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_init)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+
+ int (*hw_ring_rx_start)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_stop)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask);
+
+ int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask);
+
+ int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask);
+
+ int (*hw_packet_filter_set)(struct aq_hw_s *self,
+ unsigned int packet_filter);
+
+ int (*hw_multicast_list_set)(struct aq_hw_s *self,
+ u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count);
+
+ int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
+ bool itr_enabled);
+
+ int (*hw_rss_set)(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+
+ int (*hw_rss_hash_set)(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+
+ int (*hw_get_regs)(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+
+ int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
+ unsigned int *p_count);
+
+ int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+
+ int (*hw_deinit)(struct aq_hw_s *self);
+
+ int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
+};
+
+#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
new file mode 100644
index 000000000000..5f13465995f6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -0,0 +1,68 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw_utils.c: Definitions of helper functions used across
+ * hardware layer.
+ */
+
+#include "aq_hw_utils.h"
+#include "aq_hw.h"
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val)
+{
+ if (msk ^ ~0) {
+ u32 reg_old, reg_new;
+
+ reg_old = aq_hw_read_reg(aq_hw, addr);
+ reg_new = (reg_old & (~msk)) | (val << shift);
+
+ if (reg_old != reg_new)
+ aq_hw_write_reg(aq_hw, addr, reg_new);
+ } else {
+ aq_hw_write_reg(aq_hw, addr, val);
+ }
+}
+
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
+{
+ return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
+}
+
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
+{
+ u32 value = readl(hw->mmio + reg);
+
+ if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
+ aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+
+ return value;
+}
+
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
+{
+ writel(value, hw->mmio + reg);
+}
+
+int aq_hw_err_from_flags(struct aq_hw_s *hw)
+{
+ int err = 0;
+
+ if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
new file mode 100644
index 000000000000..03b72ddbffb9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -0,0 +1,47 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw_utils.h: Declaration of helper functions used across hardware
+ * layer.
+ */
+
+#ifndef AQ_HW_UTILS_H
+#define AQ_HW_UTILS_H
+
+#include "aq_common.h"
+
+#ifndef HIDWORD
+#define LODWORD(_qw) ((u32)(_qw))
+#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff))
+#endif
+
+#define AQ_HW_SLEEP(_US_) mdelay(_US_)
+
+#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
+do { \
+ unsigned int AQ_HW_WAIT_FOR_i; \
+ for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\
+ --AQ_HW_WAIT_FOR_i) {\
+ udelay(_US_); \
+ } \
+ if (!AQ_HW_WAIT_FOR_i) {\
+ err = -ETIME; \
+ } \
+} while (0)
+
+struct aq_hw_s;
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val);
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
+int aq_hw_err_from_flags(struct aq_hw_s *hw);
+
+#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
new file mode 100644
index 000000000000..dad63623be6a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -0,0 +1,239 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_main.c: Main file for aQuantia Linux driver. */
+
+#include "aq_main.h"
+#include "aq_nic.h"
+#include "aq_pci_func.h"
+#include "aq_ethtool.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+static const struct pci_device_id aq_pci_tbl[] = {
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(AQ_CFG_DRV_VERSION);
+MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
+MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+
+static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+{
+ struct aq_hw_ops *ops = NULL;
+
+ ops = hw_atl_a0_get_ops_by_id(pdev);
+ if (!ops)
+ ops = hw_atl_b0_get_ops_by_id(pdev);
+
+ return ops;
+}
+
+static int aq_ndev_open(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = NULL;
+ int err = 0;
+
+ aq_nic = aq_nic_alloc_hot(ndev);
+ if (!aq_nic) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ err = aq_nic_init(aq_nic);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic_start(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ if (err < 0)
+ aq_nic_deinit(aq_nic);
+ return err;
+}
+
+static int aq_ndev_close(struct net_device *ndev)
+{
+ int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ err = aq_nic_stop(aq_nic);
+ if (err < 0)
+ goto err_exit;
+ aq_nic_deinit(aq_nic);
+ aq_nic_free_hot_resources(aq_nic);
+
+err_exit:
+ return err;
+}
+
+static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic_xmit(aq_nic, skb);
+}
+
+static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
+
+ if (err < 0)
+ goto err_exit;
+
+ if (netif_running(ndev)) {
+ aq_ndev_close(ndev);
+ aq_ndev_open(ndev);
+ }
+
+err_exit:
+ return err;
+}
+
+static int aq_ndev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
+ bool is_lro = false;
+
+ if (aq_cfg->hw_features & NETIF_F_LRO) {
+ is_lro = features & NETIF_F_LRO;
+
+ if (aq_cfg->is_lro != is_lro) {
+ aq_cfg->is_lro = is_lro;
+
+ if (netif_running(ndev)) {
+ aq_ndev_close(ndev);
+ aq_ndev_open(ndev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = eth_mac_addr(ndev, addr);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic_set_mac(aq_nic, ndev);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static void aq_ndev_set_multicast_settings(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
+ if (err < 0)
+ goto err_exit;
+
+ if (netdev_mc_count(ndev)) {
+ err = aq_nic_set_multicast_list(aq_nic, ndev);
+ if (err < 0)
+ goto err_exit;
+ }
+
+err_exit:;
+}
+
+static const struct net_device_ops aq_ndev_ops = {
+ .ndo_open = aq_ndev_open,
+ .ndo_stop = aq_ndev_close,
+ .ndo_start_xmit = aq_ndev_start_xmit,
+ .ndo_set_rx_mode = aq_ndev_set_multicast_settings,
+ .ndo_change_mtu = aq_ndev_change_mtu,
+ .ndo_set_mac_address = aq_ndev_set_mac_address,
+ .ndo_set_features = aq_ndev_set_features
+};
+
+static int aq_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct aq_hw_ops *aq_hw_ops = NULL;
+ struct aq_pci_func_s *aq_pci_func = NULL;
+ int err = 0;
+
+ err = pci_enable_device(pdev);
+ if (err < 0)
+ goto err_exit;
+ aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
+ aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
+ &aq_ndev_ops, &aq_ethtool_ops);
+ if (!aq_pci_func) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ err = aq_pci_func_init(aq_pci_func);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ if (err < 0) {
+ if (aq_pci_func)
+ aq_pci_func_free(aq_pci_func);
+ }
+ return err;
+}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+ aq_pci_func_deinit(aq_pci_func);
+ aq_pci_func_free(aq_pci_func);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+ return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+ pm_message_t pm_msg = PMSG_RESTORE;
+
+ return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+ .name = AQ_CFG_DRV_NAME,
+ .id_table = aq_pci_tbl,
+ .probe = aq_pci_probe,
+ .remove = aq_pci_remove,
+ .suspend = aq_pci_suspend,
+ .resume = aq_pci_resume,
+};
+
+module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
new file mode 100644
index 000000000000..9748e7e575e0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -0,0 +1,17 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_main.h: Main file for aQuantia Linux driver. */
+
+#ifndef AQ_MAIN_H
+#define AQ_MAIN_H
+
+#include "aq_common.h"
+
+#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
new file mode 100644
index 000000000000..ee78444bfb88
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -0,0 +1,990 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic.c: Definition of common code for NIC. */
+
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include "aq_pci_func.h"
+#include "aq_nic_internal.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/timer.h>
+#include <linux/cpu.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+
+static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ struct aq_rss_parameters *rss_params = &cfg->aq_rss;
+ int i = 0;
+
+ static u8 rss_key[40] = {
+ 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+ 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+ 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+ 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+ 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+ };
+
+ rss_params->hash_secret_key_size = sizeof(rss_key);
+ memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
+ rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+
+ for (i = rss_params->indirection_table_size; i--;)
+ rss_params->indirection_table[i] = i & (num_rss_queues - 1);
+}
+
+/* Fills aq_nic_cfg with valid defaults */
+static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ cfg->aq_hw_caps = &self->aq_hw_caps;
+
+ cfg->vecs = AQ_CFG_VECS_DEF;
+ cfg->tcs = AQ_CFG_TCS_DEF;
+
+ cfg->rxds = AQ_CFG_RXDS_DEF;
+ cfg->txds = AQ_CFG_TXDS_DEF;
+
+ cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
+
+ cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
+ cfg->itr = cfg->is_interrupt_moderation ?
+ AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
+
+ cfg->is_rss = AQ_CFG_IS_RSS_DEF;
+ cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
+ cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
+ cfg->flow_control = AQ_CFG_FC_MODE;
+
+ cfg->mtu = AQ_CFG_MTU_DEF;
+ cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
+ cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
+
+ cfg->is_lro = AQ_CFG_IS_LRO_DEF;
+
+ cfg->vlan_id = 0U;
+
+ aq_nic_rss_init(self, cfg->num_rss_queues);
+}
+
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+int aq_nic_cfg_start(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ /*descriptors */
+ cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
+ cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
+
+ /*rss rings */
+ cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
+ cfg->vecs = min(cfg->vecs, num_online_cpus());
+ /* cfg->vecs should be power of 2 for RSS */
+ if (cfg->vecs >= 8U)
+ cfg->vecs = 8U;
+ else if (cfg->vecs >= 4U)
+ cfg->vecs = 4U;
+ else if (cfg->vecs >= 2U)
+ cfg->vecs = 2U;
+ else
+ cfg->vecs = 1U;
+
+ cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
+
+ if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+ (self->aq_hw_caps.vecs == 1U) ||
+ (cfg->vecs == 1U)) {
+ cfg->is_rss = 0U;
+ cfg->vecs = 1U;
+ }
+
+ cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
+ cfg->hw_features = self->aq_hw_caps.hw_features;
+ return 0;
+}
+
+static void aq_nic_service_timer_cb(unsigned long param)
+{
+ struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct net_device *ndev = aq_nic_get_ndev(self);
+ int err = 0;
+ unsigned int i = 0U;
+ struct aq_hw_link_status_s link_status;
+ struct aq_ring_stats_rx_s stats_rx;
+ struct aq_ring_stats_tx_s stats_tx;
+
+ if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
+ if (err < 0)
+ goto err_exit;
+
+ self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+ self->aq_nic_cfg.is_interrupt_moderation);
+
+ if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
+ if (link_status.mbps) {
+ aq_utils_obj_set(&self->header.flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->header.flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+ } else {
+ netif_carrier_off(self->ndev);
+ aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ }
+
+ self->link_status = link_status;
+ }
+
+ memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
+ memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
+ for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ if (self->aq_vec[i])
+ aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
+ }
+
+ ndev->stats.rx_packets = stats_rx.packets;
+ ndev->stats.rx_bytes = stats_rx.bytes;
+ ndev->stats.rx_errors = stats_rx.errors;
+ ndev->stats.tx_packets = stats_tx.packets;
+ ndev->stats.tx_bytes = stats_tx.bytes;
+ ndev->stats.tx_errors = stats_tx.errors;
+
+err_exit:
+ mod_timer(&self->service_timer,
+ jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+}
+
+static void aq_nic_polling_timer_cb(unsigned long param)
+{
+ struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_isr(i, (void *)aq_vec);
+
+ mod_timer(&self->polling_timer, jiffies +
+ AQ_CFG_POLLING_TIMER_INTERVAL);
+}
+
+static struct net_device *aq_nic_ndev_alloc(void)
+{
+ return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+}
+
+struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *et_ops,
+ struct device *dev,
+ struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ const struct aq_hw_ops *aq_hw_ops)
+{
+ struct net_device *ndev = NULL;
+ struct aq_nic_s *self = NULL;
+ int err = 0;
+
+ ndev = aq_nic_ndev_alloc();
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ self = netdev_priv(ndev);
+
+ ndev->netdev_ops = ndev_ops;
+ ndev->ethtool_ops = et_ops;
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ ndev->if_port = port;
+ ndev->min_mtu = ETH_MIN_MTU;
+ self->ndev = ndev;
+
+ self->aq_pci_func = aq_pci_func;
+
+ self->aq_hw_ops = *aq_hw_ops;
+ self->port = (u8)port;
+
+ self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
+ &self->aq_hw_ops);
+ err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ aq_nic_cfg_init_defaults(self);
+
+err_exit:
+ if (err < 0) {
+ aq_nic_free_hot_resources(self);
+ self = NULL;
+ }
+ return self;
+}
+
+int aq_nic_ndev_register(struct aq_nic_s *self)
+{
+ int err = 0;
+ unsigned int i = 0U;
+
+ if (!self->ndev) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
+ self->aq_nic_cfg.aq_hw_caps,
+ self->ndev->dev_addr);
+ if (err < 0)
+ goto err_exit;
+
+#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
+ {
+ static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
+
+ ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
+ }
+#endif
+
+ netif_carrier_off(self->ndev);
+
+ for (i = AQ_CFG_VECS_MAX; i--;)
+ aq_nic_ndev_queue_stop(self, i);
+
+ err = register_netdev(self->ndev);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_ndev_init(struct aq_nic_s *self)
+{
+ struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+ struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
+
+ self->ndev->hw_features |= aq_hw_caps->hw_features;
+ self->ndev->features = aq_hw_caps->hw_features;
+ self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
+ self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
+
+ return 0;
+}
+
+void aq_nic_ndev_free(struct aq_nic_s *self)
+{
+ if (!self->ndev)
+ goto err_exit;
+
+ if (self->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(self->ndev);
+
+ if (self->aq_hw)
+ self->aq_hw_ops.destroy(self->aq_hw);
+
+ free_netdev(self->ndev);
+
+err_exit:;
+}
+
+struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
+{
+ struct aq_nic_s *self = NULL;
+ int err = 0;
+
+ if (!ndev) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ self = netdev_priv(ndev);
+
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ if (netif_running(ndev)) {
+ unsigned int i;
+
+ for (i = AQ_CFG_VECS_MAX; i--;)
+ netif_stop_subqueue(ndev, i);
+ }
+
+ for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
+ self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] =
+ aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
+ if (!self->aq_vec[self->aq_vecs]) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_nic_free_hot_resources(self);
+ self = NULL;
+ }
+ return self;
+}
+
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+ struct aq_ring_s *ring)
+{
+ self->aq_ring_tx[idx] = ring;
+}
+
+struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+ return self->ndev->dev.parent;
+}
+
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
+{
+ return self->ndev;
+}
+
+int aq_nic_init(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+
+ self->power_state = AQ_HW_POWER_STATE_D0;
+ err = self->aq_hw_ops.hw_reset(self->aq_hw);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
+ aq_nic_get_ndev(self)->dev_addr);
+ if (err < 0)
+ goto err_exit;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
+
+err_exit:
+ return err;
+}
+
+void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
+{
+ netif_start_subqueue(self->ndev, idx);
+}
+
+void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
+{
+ netif_stop_subqueue(self->ndev, idx);
+}
+
+int aq_nic_start(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+
+ err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
+ if (err < 0)
+ goto err_exit;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ err = aq_vec_start(aq_vec);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = self->aq_hw_ops.hw_start(self->aq_hw);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+ self->aq_nic_cfg.is_interrupt_moderation);
+ if (err < 0)
+ goto err_exit;
+ setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
+ (unsigned long)self);
+ mod_timer(&self->service_timer, jiffies +
+ AQ_CFG_SERVICE_TIMER_INTERVAL);
+
+ if (self->aq_nic_cfg.is_polling) {
+ setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
+ (unsigned long)self);
+ mod_timer(&self->polling_timer, jiffies +
+ AQ_CFG_POLLING_TIMER_INTERVAL);
+ } else {
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
+ self->ndev->name, aq_vec,
+ aq_vec_get_affinity_mask(aq_vec));
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
+ AQ_CFG_IRQ_MASK);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_nic_ndev_queue_start(self, i);
+
+ err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
+ if (err < 0)
+ goto err_exit;
+
+ err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
+ struct sk_buff *skb,
+ struct aq_ring_s *ring)
+{
+ unsigned int ret = 0U;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int frag_count = 0U;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
+
+ if (unlikely(skb_is_gso(skb))) {
+ dx_buff->flags = 0U;
+ dx_buff->len_pkt = skb->len;
+ dx_buff->len_l2 = ETH_HLEN;
+ dx_buff->len_l3 = ip_hdrlen(skb);
+ dx_buff->len_l4 = tcp_hdrlen(skb);
+ dx_buff->mss = skb_shinfo(skb)->gso_size;
+ dx_buff->is_txc = 1U;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+ ++ret;
+ }
+
+ dx_buff->flags = 0U;
+ dx_buff->len = skb_headlen(skb);
+ dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
+ skb->data,
+ dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
+ goto exit;
+
+ dx_buff->len_pkt = skb->len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
+ 1U : 0U;
+ dx_buff->is_tcp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
+ dx_buff->is_udp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
+ }
+
+ for (; nr_frags--; ++frag_count) {
+ unsigned int frag_len = 0U;
+ dma_addr_t frag_pa;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
+
+ frag_len = skb_frag_size(frag);
+ frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
+ frag_len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
+ goto mapping_error;
+
+ while (frag_len > AQ_CFG_TX_FRAME_MAX) {
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = AQ_CFG_TX_FRAME_MAX;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+
+ frag_len -= AQ_CFG_TX_FRAME_MAX;
+ frag_pa += AQ_CFG_TX_FRAME_MAX;
+ ++ret;
+ }
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = frag_len;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+ }
+
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = skb;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->is_txc && dx_buff->pa) {
+ if (unlikely(dx_buff->is_sop)) {
+ dma_unmap_single(aq_nic_get_dev(self),
+ dx_buff->pa,
+ dx_buff->len,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(aq_nic_get_dev(self),
+ dx_buff->pa,
+ dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+ }
+ }
+
+exit:
+ return ret;
+}
+
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
+__releases(&ring->lock)
+__acquires(&ring->lock)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int frags = 0U;
+ unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
+ unsigned int tc = 0U;
+ unsigned int trys = AQ_CFG_LOCK_TRYS;
+ int err = NETDEV_TX_OK;
+ bool is_nic_in_bad_state;
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+
+ ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX) {
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
+ AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
+ (aq_ring_avail_dx(ring) <
+ AQ_CFG_SKB_FRAGS_MAX);
+
+ if (is_nic_in_bad_state) {
+ aq_nic_ndev_queue_stop(self, ring->idx);
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
+ do {
+ if (spin_trylock(&ring->header.lock)) {
+ frags = aq_nic_map_skb(self, skb, ring);
+
+ if (likely(frags)) {
+ err = self->aq_hw_ops.hw_ring_tx_xmit(
+ self->aq_hw,
+ ring, frags);
+ if (err >= 0) {
+ if (aq_ring_avail_dx(ring) <
+ AQ_CFG_SKB_FRAGS_MAX + 1)
+ aq_nic_ndev_queue_stop(
+ self,
+ ring->idx);
+
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ } else {
+ err = NETDEV_TX_BUSY;
+ }
+
+ spin_unlock(&ring->header.lock);
+ break;
+ }
+ } while (--trys);
+
+ if (!trys) {
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
+{
+ int err = 0;
+
+ err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
+ if (err < 0)
+ goto err_exit;
+
+ self->packet_filter = flags;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+{
+ struct netdev_hw_addr *ha = NULL;
+ unsigned int i = 0U;
+
+ self->mc_list.count = 0U;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+ ++self->mc_list.count;
+ }
+
+ return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+}
+
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
+{
+ int err = 0;
+
+ if (new_mtu > self->aq_hw_caps.mtu) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ self->aq_nic_cfg.mtu = new_mtu;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
+{
+ return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+}
+
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
+{
+ return self->link_status.mbps;
+}
+
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
+{
+ u32 *regs_buff = p;
+ int err = 0;
+
+ regs->version = 1;
+
+ err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
+ &self->aq_hw_caps, regs_buff);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_get_regs_count(struct aq_nic_s *self)
+{
+ return self->aq_hw_caps.mac_regs_count;
+}
+
+void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+ unsigned int count = 0U;
+ int err = 0;
+
+ err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
+ if (err < 0)
+ goto err_exit;
+
+ data += count;
+ count = 0U;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ data += count;
+ aq_vec_get_sw_stats(aq_vec, data, &count);
+ }
+
+err_exit:;
+ (void)err;
+}
+
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+ struct ethtool_link_ksettings *cmd)
+{
+ cmd->base.port = PORT_TP;
+ /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+
+ if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseT_Full);
+
+ if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 5000baseT_Full);
+
+ if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 2500baseT_Full);
+
+ if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Full);
+
+ if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Full);
+
+ if (self->aq_hw_caps.flow_control)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Pause);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ if (self->aq_nic_cfg.is_autoneg)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 5000baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 2500baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Full);
+
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
+
+ if (self->aq_nic_cfg.flow_control)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+}
+
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u32 speed = 0U;
+ u32 rate = 0U;
+ int err = 0;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ rate = self->aq_hw_caps.link_speed_msk;
+ self->aq_nic_cfg.is_autoneg = true;
+ } else {
+ speed = cmd->base.speed;
+
+ switch (speed) {
+ case SPEED_100:
+ rate = AQ_NIC_RATE_100M;
+ break;
+
+ case SPEED_1000:
+ rate = AQ_NIC_RATE_1G;
+ break;
+
+ case SPEED_2500:
+ rate = AQ_NIC_RATE_2GS;
+ break;
+
+ case SPEED_5000:
+ rate = AQ_NIC_RATE_5G;
+ break;
+
+ case SPEED_10000:
+ rate = AQ_NIC_RATE_10G;
+ break;
+
+ default:
+ err = -1;
+ goto err_exit;
+ break;
+ }
+ if (!(self->aq_hw_caps.link_speed_msk & rate)) {
+ err = -1;
+ goto err_exit;
+ }
+
+ self->aq_nic_cfg.is_autoneg = false;
+ }
+
+ err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
+ if (err < 0)
+ goto err_exit;
+
+ self->aq_nic_cfg.link_speed_msk = rate;
+
+err_exit:
+ return err;
+}
+
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
+{
+ return &self->aq_nic_cfg;
+}
+
+u32 aq_nic_get_fw_version(struct aq_nic_s *self)
+{
+ u32 fw_version = 0U;
+
+ self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
+
+ return fw_version;
+}
+
+int aq_nic_stop(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_nic_ndev_queue_stop(self, i);
+
+ del_timer_sync(&self->service_timer);
+
+ self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+
+ if (self->aq_nic_cfg.is_polling)
+ del_timer_sync(&self->polling_timer);
+ else
+ aq_pci_func_free_irqs(self->aq_pci_func);
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_stop(aq_vec);
+
+ return self->aq_hw_ops.hw_stop(self->aq_hw);
+}
+
+void aq_nic_deinit(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_deinit(aq_vec);
+
+ if (self->power_state == AQ_HW_POWER_STATE_D0) {
+ (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
+ } else {
+ (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
+ self->power_state);
+ }
+
+err_exit:;
+}
+
+void aq_nic_free_hot_resources(struct aq_nic_s *self)
+{
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ if (self->aq_vec[i])
+ aq_vec_free(self->aq_vec[i]);
+ }
+
+err_exit:;
+}
+
+int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
+{
+ int err = 0;
+
+ if (!netif_running(self->ndev)) {
+ err = 0;
+ goto out;
+ }
+ rtnl_lock();
+ if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
+ self->power_state = AQ_HW_POWER_STATE_D3;
+ netif_device_detach(self->ndev);
+ netif_tx_stop_all_queues(self->ndev);
+
+ err = aq_nic_stop(self);
+ if (err < 0)
+ goto err_exit;
+
+ aq_nic_deinit(self);
+ } else {
+ err = aq_nic_init(self);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_start(self);
+ if (err < 0)
+ goto err_exit;
+
+ netif_device_attach(self->ndev);
+ netif_tx_start_all_queues(self->ndev);
+ }
+
+err_exit:
+ rtnl_unlock();
+out:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
new file mode 100644
index 000000000000..7fc2a5ecb2b7
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -0,0 +1,110 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic.h: Declaration of common code for NIC. */
+
+#ifndef AQ_NIC_H
+#define AQ_NIC_H
+
+#include "aq_common.h"
+#include "aq_rss.h"
+
+struct aq_ring_s;
+struct aq_pci_func_s;
+struct aq_hw_ops;
+
+#define AQ_NIC_FC_OFF 0U
+#define AQ_NIC_FC_TX 1U
+#define AQ_NIC_FC_RX 2U
+#define AQ_NIC_FC_FULL 3U
+#define AQ_NIC_FC_AUTO 4U
+
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2GS BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+
+struct aq_nic_cfg_s {
+ struct aq_hw_caps_s *aq_hw_caps;
+ u64 hw_features;
+ u32 rxds; /* rx ring size, descriptors # */
+ u32 txds; /* tx ring size, descriptors # */
+ u32 vecs; /* vecs==allocated irqs */
+ u32 irq_type;
+ u32 itr;
+ u32 num_rss_queues;
+ u32 mtu;
+ u32 ucp_0x364;
+ u32 flow_control;
+ u32 link_speed_msk;
+ u32 vlan_id;
+ u16 is_mc_list_enabled;
+ u16 mc_list_count;
+ bool is_autoneg;
+ bool is_interrupt_moderation;
+ bool is_polling;
+ bool is_rss;
+ bool is_lro;
+ u8 tcs;
+ struct aq_rss_parameters aq_rss;
+};
+
+#define AQ_NIC_FLAG_STARTED 0x00000004U
+#define AQ_NIC_FLAG_STOPPING 0x00000008U
+#define AQ_NIC_FLAG_RESETTING 0x00000010U
+#define AQ_NIC_FLAG_CLOSING 0x00000020U
+#define AQ_NIC_LINK_DOWN 0x04000000U
+#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
+#define AQ_NIC_FLAG_ERR_HW 0x80000000U
+
+#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
+ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+
+struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *et_ops,
+ struct device *dev,
+ struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ const struct aq_hw_ops *aq_hw_ops);
+int aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+ struct aq_ring_s *ring);
+struct device *aq_nic_get_dev(struct aq_nic_s *self);
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
+int aq_nic_init(struct aq_nic_s *self);
+int aq_nic_cfg_start(struct aq_nic_s *self);
+int aq_nic_ndev_register(struct aq_nic_s *self);
+void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
+void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
+void aq_nic_ndev_free(struct aq_nic_s *self);
+int aq_nic_start(struct aq_nic_s *self);
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
+int aq_nic_get_regs_count(struct aq_nic_s *self);
+void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
+int aq_nic_stop(struct aq_nic_s *self);
+void aq_nic_deinit(struct aq_nic_s *self);
+void aq_nic_free_hot_resources(struct aq_nic_s *self);
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev);
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self);
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+ struct ethtool_link_ksettings *cmd);
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+ const struct ethtool_link_ksettings *cmd);
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
+u32 aq_nic_get_fw_version(struct aq_nic_s *self);
+int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+
+#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
new file mode 100644
index 000000000000..e7d2711dc165
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
@@ -0,0 +1,45 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic_internal.h: Definition of private object structure. */
+
+#ifndef AQ_NIC_INTERNAL_H
+#define AQ_NIC_INTERNAL_H
+
+struct aq_nic_s {
+ struct aq_obj_s header;
+ struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_hw_s *aq_hw;
+ struct net_device *ndev;
+ struct aq_pci_func_s *aq_pci_func;
+ unsigned int aq_vecs;
+ unsigned int packet_filter;
+ unsigned int power_state;
+ u8 port;
+ struct aq_hw_ops aq_hw_ops;
+ struct aq_hw_caps_s aq_hw_caps;
+ struct aq_nic_cfg_s aq_nic_cfg;
+ struct timer_list service_timer;
+ struct timer_list polling_timer;
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+ u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+};
+
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+ AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+ AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+ AQ_NIC_LINK_DOWN)
+
+#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
new file mode 100644
index 000000000000..581de71a958a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -0,0 +1,292 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_pci_func.c: Definition of PCI functions. */
+
+#include "aq_pci_func.h"
+#include "aq_nic.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include <linux/interrupt.h>
+
+struct aq_pci_func_s {
+ struct pci_dev *pdev;
+ struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
+ void __iomem *mmio;
+ void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
+ resource_size_t mmio_pa;
+ unsigned int msix_entry_mask;
+ unsigned int ports;
+ bool is_pci_enabled;
+ bool is_regions;
+ bool is_pci_using_dac;
+ struct aq_hw_caps_s aq_hw_caps;
+};
+
+struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
+ struct pci_dev *pdev,
+ const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *eth_ops)
+{
+ struct aq_pci_func_s *self = NULL;
+ int err = 0;
+ unsigned int port = 0U;
+
+ if (!aq_hw_ops) {
+ err = -EFAULT;
+ goto err_exit;
+ }
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ pci_set_drvdata(pdev, self);
+ self->pdev = pdev;
+
+ err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ self->ports = self->aq_hw_caps.ports;
+
+ for (port = 0; port < self->ports; ++port) {
+ struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
+ &pdev->dev, self,
+ port, aq_hw_ops);
+
+ if (!aq_nic) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ self->port[port] = aq_nic;
+ }
+
+err_exit:
+ if (err < 0) {
+ if (self)
+ aq_pci_func_free(self);
+ self = NULL;
+ }
+
+ (void)err;
+ return self;
+}
+
+int aq_pci_func_init(struct aq_pci_func_s *self)
+{
+ int err = 0;
+ unsigned int bar = 0U;
+ unsigned int port = 0U;
+
+ err = pci_enable_device(self->pdev);
+ if (err < 0)
+ goto err_exit;
+
+ self->is_pci_enabled = true;
+
+ err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
+ self->is_pci_using_dac = 1;
+ }
+ if (err) {
+ err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(self->pdev,
+ DMA_BIT_MASK(32));
+ self->is_pci_using_dac = 0;
+ }
+ if (err != 0) {
+ err = -ENOSR;
+ goto err_exit;
+ }
+
+ err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
+ if (err < 0)
+ goto err_exit;
+
+ self->is_regions = true;
+
+ pci_set_master(self->pdev);
+
+ for (bar = 0; bar < 4; ++bar) {
+ if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
+ resource_size_t reg_sz;
+
+ self->mmio_pa = pci_resource_start(self->pdev, bar);
+ if (self->mmio_pa == 0U) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+ reg_sz = pci_resource_len(self->pdev, bar);
+ if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+ self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
+ if (!self->mmio) {
+ err = -EIO;
+ goto err_exit;
+ }
+ break;
+ }
+ }
+
+ /*enable interrupts */
+#if !AQ_CFG_FORCE_LEGACY_INT
+ err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
+ self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (err < 0)
+ goto err_exit;
+ }
+#endif
+
+ /* net device init */
+ for (port = 0; port < self->ports; ++port) {
+ if (!self->port[port])
+ continue;
+
+ err = aq_nic_cfg_start(self->port[port]);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_ndev_init(self->port[port]);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_ndev_register(self->port[port]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0)
+ aq_pci_func_deinit(self);
+ return err;
+}
+
+int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+ char *name, void *aq_vec, cpumask_t *affinity_mask)
+{
+ struct pci_dev *pdev = self->pdev;
+ int err = 0;
+
+ if (pdev->msix_enabled || pdev->msi_enabled)
+ err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
+ name, aq_vec);
+ else
+ err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
+ IRQF_SHARED, name, aq_vec);
+
+ if (err >= 0) {
+ self->msix_entry_mask |= (1 << i);
+ self->aq_vec[i] = aq_vec;
+
+ if (pdev->msix_enabled)
+ irq_set_affinity_hint(pci_irq_vector(pdev, i),
+ affinity_mask);
+ }
+
+ return err;
+}
+
+void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
+{
+ struct pci_dev *pdev = self->pdev;
+ unsigned int i = 0U;
+
+ for (i = 32U; i--;) {
+ if (!((1U << i) & self->msix_entry_mask))
+ continue;
+
+ free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
+ if (pdev->msix_enabled)
+ irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ self->msix_entry_mask &= ~(1U << i);
+ }
+}
+
+void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
+{
+ return self->mmio;
+}
+
+unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
+{
+ if (self->pdev->msix_enabled)
+ return AQ_HW_IRQ_MSIX;
+ if (self->pdev->msi_enabled)
+ return AQ_HW_IRQ_MSIX;
+ return AQ_HW_IRQ_LEGACY;
+}
+
+void aq_pci_func_deinit(struct aq_pci_func_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ aq_pci_func_free_irqs(self);
+ pci_free_irq_vectors(self->pdev);
+
+ if (self->is_regions)
+ pci_release_regions(self->pdev);
+
+ if (self->is_pci_enabled)
+ pci_disable_device(self->pdev);
+
+err_exit:;
+}
+
+void aq_pci_func_free(struct aq_pci_func_s *self)
+{
+ unsigned int port = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (port = 0; port < self->ports; ++port) {
+ if (!self->port[port])
+ continue;
+
+ aq_nic_ndev_free(self->port[port]);
+ }
+
+ kfree(self);
+
+err_exit:;
+}
+
+int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
+ pm_message_t *pm_msg)
+{
+ int err = 0;
+ unsigned int port = 0U;
+
+ if (!self) {
+ err = -EFAULT;
+ goto err_exit;
+ }
+ for (port = 0; port < self->ports; ++port) {
+ if (!self->port[port])
+ continue;
+
+ (void)aq_nic_change_pm_state(self->port[port], pm_msg);
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
new file mode 100644
index 000000000000..ecb033791203
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_pci_func.h: Declaration of PCI functions. */
+
+#ifndef AQ_PCI_FUNC_H
+#define AQ_PCI_FUNC_H
+
+#include "aq_common.h"
+
+struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
+ struct pci_dev *pdev,
+ const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *eth_ops);
+int aq_pci_func_init(struct aq_pci_func_s *self);
+int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+ char *name, void *aq_vec,
+ cpumask_t *affinity_mask);
+void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
+int aq_pci_func_start(struct aq_pci_func_s *self);
+void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
+void aq_pci_func_deinit(struct aq_pci_func_s *self);
+void aq_pci_func_free(struct aq_pci_func_s *self);
+int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
+ pm_message_t *pm_msg);
+
+#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
new file mode 100644
index 000000000000..0358e6072d45
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -0,0 +1,326 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
+
+#include "aq_ring.h"
+#include "aq_nic.h"
+#include "aq_hw.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic)
+{
+ int err = 0;
+
+ self->buff_ring =
+ kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
+
+ if (!self->buff_ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
+ self->size * self->dx_size,
+ &self->dx_ring_pa, GFP_KERNEL);
+ if (!self->dx_ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->txds;
+ self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
+
+ self = aq_ring_alloc(self, aq_nic);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->rxds;
+ self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+
+ self = aq_ring_alloc(self, aq_nic);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+int aq_ring_init(struct aq_ring_s *self)
+{
+ self->hw_head = 0;
+ self->sw_head = 0;
+ self->sw_tail = 0;
+ return 0;
+}
+
+void aq_ring_tx_clean(struct aq_ring_s *self)
+{
+ struct device *dev = aq_nic_get_dev(self->aq_nic);
+
+ for (; self->sw_head != self->hw_head;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+ if (likely(buff->is_mapped)) {
+ if (unlikely(buff->is_sop))
+ dma_unmap_single(dev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ }
+
+ if (unlikely(buff->is_eop))
+ dev_kfree_skb_any(buff->skb);
+ }
+}
+
+static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
+ unsigned int t)
+{
+ return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+}
+
+#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
+{
+ struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
+ int err = 0;
+ bool is_rsc_completed = true;
+
+ for (; (self->sw_head != self->hw_head) && budget;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ unsigned int i = 0U;
+ struct aq_ring_buff_s *buff_ = NULL;
+
+ if (buff->is_error) {
+ __free_pages(buff->page, 0);
+ continue;
+ }
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ for (next_ = buff->next,
+ buff_ = &self->buff_ring[next_]; true;
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_]) {
+ is_rsc_completed =
+ aq_ring_dx_in_range(self->sw_head,
+ next_,
+ self->hw_head);
+
+ if (unlikely(!is_rsc_completed)) {
+ is_rsc_completed = false;
+ break;
+ }
+
+ if (buff_->is_eop)
+ break;
+ }
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ }
+
+ /* for single fragment packets use build_skb() */
+ if (buff->is_eop) {
+ skb = build_skb(page_address(buff->page),
+ buff->len + AQ_SKB_ALIGN);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ skb_put(skb, buff->len);
+ } else {
+ skb = netdev_alloc_skb(ndev, ETH_HLEN);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ skb_put(skb, ETH_HLEN);
+ memcpy(skb->data, page_address(buff->page), ETH_HLEN);
+
+ skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
+ buff->len - ETH_HLEN,
+ SKB_TRUESIZE(buff->len - ETH_HLEN));
+
+ for (i = 1U, next_ = buff->next,
+ buff_ = &self->buff_ring[next_]; true;
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_], ++i) {
+ skb_add_rx_frag(skb, i, buff_->page, 0,
+ buff_->len,
+ SKB_TRUESIZE(buff->len -
+ ETH_HLEN));
+ buff_->is_cleaned = 1;
+
+ if (buff_->is_eop)
+ break;
+ }
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (unlikely(buff->is_cso_err)) {
+ ++self->stats.rx.errors;
+ __skb_mark_checksum_bad(skb);
+ } else {
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ }
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+
+ skb_record_rx_queue(skb, self->idx);
+
+ netif_receive_skb(skb);
+
+ ++self->stats.rx.packets;
+ self->stats.rx.bytes += skb->len;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_fill(struct aq_ring_s *self)
+{
+ unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
+ (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+ struct aq_ring_buff_s *buff = NULL;
+ int err = 0;
+ int i = 0;
+
+ for (i = aq_ring_avail_dx(self); i--;
+ self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
+ buff = &self->buff_ring[self->sw_tail];
+
+ buff->flags = 0U;
+ buff->len = AQ_CFG_RX_FRAME_MAX;
+
+ buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
+ __GFP_COMP, pages_order);
+ if (!buff->page) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
+ buff->page, 0,
+ AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ buff = NULL;
+ }
+
+err_exit:
+ if (err < 0) {
+ if (buff && buff->page)
+ __free_pages(buff->page, 0);
+ }
+
+ return err;
+}
+
+void aq_ring_rx_deinit(struct aq_ring_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ for (; self->sw_head != self->sw_tail;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+ dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
+ AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
+
+ __free_pages(buff->page, 0);
+ }
+
+err_exit:;
+}
+
+void aq_ring_free(struct aq_ring_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ kfree(self->buff_ring);
+
+ if (self->dx_ring)
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size, self->dx_ring,
+ self->dx_ring_pa);
+
+err_exit:;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
new file mode 100644
index 000000000000..257254645068
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -0,0 +1,153 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
+
+#ifndef AQ_RING_H
+#define AQ_RING_H
+
+#include "aq_common.h"
+
+struct page;
+
+/* TxC SOP DX EOP
+ * +----------+----------+----------+-----------
+ * 8bytes|len l3,l4 | pa | pa | pa
+ * +----------+----------+----------+-----------
+ * 4/8bytes|len pkt |len pkt | | skb
+ * +----------+----------+----------+-----------
+ * 4/8bytes|is_txc |len,flags |len |len,is_eop
+ * +----------+----------+----------+-----------
+ *
+ * This aq_ring_buff_s doesn't have endianness dependency.
+ * It is __packed for cache line optimizations.
+ */
+struct __packed aq_ring_buff_s {
+ union {
+ /* RX */
+ struct {
+ u32 rss_hash;
+ u16 next;
+ u8 is_hash_l4;
+ u8 rsvd1;
+ struct page *page;
+ };
+ /* EOP */
+ struct {
+ dma_addr_t pa_eop;
+ struct sk_buff *skb;
+ };
+ /* DX */
+ struct {
+ dma_addr_t pa;
+ };
+ /* SOP */
+ struct {
+ dma_addr_t pa_sop;
+ u32 len_pkt_sop;
+ };
+ /* TxC */
+ struct {
+ u32 mss;
+ u8 len_l2;
+ u8 len_l3;
+ u8 len_l4;
+ u8 rsvd2;
+ u32 len_pkt;
+ };
+ };
+ union {
+ struct {
+ u32 len:16;
+ u32 is_ip_cso:1;
+ u32 is_udp_cso:1;
+ u32 is_tcp_cso:1;
+ u32 is_cso_err:1;
+ u32 is_sop:1;
+ u32 is_eop:1;
+ u32 is_txc:1;
+ u32 is_mapped:1;
+ u32 is_cleaned:1;
+ u32 is_error:1;
+ u32 rsvd3:6;
+ };
+ u32 flags;
+ };
+};
+
+struct aq_ring_stats_rx_s {
+ u64 errors;
+ u64 packets;
+ u64 bytes;
+ u64 lro_packets;
+ u64 jumbo_packets;
+};
+
+struct aq_ring_stats_tx_s {
+ u64 errors;
+ u64 packets;
+ u64 bytes;
+};
+
+union aq_ring_stats_s {
+ struct aq_ring_stats_rx_s rx;
+ struct aq_ring_stats_tx_s tx;
+};
+
+struct aq_ring_s {
+ struct aq_obj_s header;
+ struct aq_ring_buff_s *buff_ring;
+ u8 *dx_ring; /* descriptors ring, dma shared mem */
+ struct aq_nic_s *aq_nic;
+ unsigned int idx; /* for HW layer registers operations */
+ unsigned int hw_head;
+ unsigned int sw_head;
+ unsigned int sw_tail;
+ unsigned int size; /* descriptors number */
+ unsigned int dx_size; /* TX or RX descriptor size, */
+ /* stored here for fater math */
+ union aq_ring_stats_s stats;
+ dma_addr_t dx_ring_pa;
+};
+
+struct aq_ring_param_s {
+ unsigned int vec_idx;
+ unsigned int cpu;
+ cpumask_t affinity_mask;
+};
+
+static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
+ unsigned int dx)
+{
+ return (++dx >= self->size) ? 0U : dx;
+}
+
+static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
+{
+ return (((self->sw_tail >= self->sw_head)) ?
+ (self->size - 1) - self->sw_tail + self->sw_head :
+ self->sw_head - self->sw_tail - 1);
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_ring_init(struct aq_ring_s *self);
+void aq_ring_rx_deinit(struct aq_ring_s *self);
+void aq_ring_free(struct aq_ring_s *self);
+void aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget);
+int aq_ring_rx_fill(struct aq_ring_s *self);
+
+#endif /* AQ_RING_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h
new file mode 100644
index 000000000000..1db6eb20a8f2
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h
@@ -0,0 +1,26 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_rss.h: Receive Side Scaling definitions. */
+
+#ifndef AQ_RSS_H
+#define AQ_RSS_H
+
+#include "aq_common.h"
+#include "aq_cfg.h"
+
+struct aq_rss_parameters {
+ u16 base_cpu_number;
+ u16 indirection_table_size;
+ u16 hash_secret_key_size;
+ u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)];
+ u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX];
+};
+
+#endif /* AQ_RSS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
new file mode 100644
index 000000000000..f6012b34abe6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -0,0 +1,49 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_utils.h: Useful macro and structures used in all layers of driver. */
+
+#ifndef AQ_UTILS_H
+#define AQ_UTILS_H
+
+#include "aq_common.h"
+
+#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
+
+struct aq_obj_s {
+ spinlock_t lock; /* spinlock for nic/rings processing */
+ atomic_t flags;
+};
+
+static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
+{
+ unsigned long flags_old, flags_new;
+
+ do {
+ flags_old = atomic_read(flags);
+ flags_new = flags_old | (mask);
+ } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask)
+{
+ unsigned long flags_old, flags_new;
+
+ do {
+ flags_old = atomic_read(flags);
+ flags_new = flags_old & ~(mask);
+ } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask)
+{
+ return atomic_read(flags) & mask;
+}
+
+#endif /* AQ_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
new file mode 100644
index 000000000000..ad5b4d4dac7f
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -0,0 +1,396 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
+ * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
+ */
+
+#include "aq_vec.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
+#include <linux/netdevice.h>
+
+struct aq_vec_s {
+ struct aq_obj_s header;
+ struct aq_hw_ops *aq_hw_ops;
+ struct aq_hw_s *aq_hw;
+ struct aq_nic_s *aq_nic;
+ unsigned int tx_rings;
+ unsigned int rx_rings;
+ struct aq_ring_param_s aq_ring_param;
+ struct napi_struct napi;
+ struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
+};
+
+#define AQ_VEC_TX_ID 0
+#define AQ_VEC_RX_ID 1
+
+static int aq_vec_poll(struct napi_struct *napi, int budget)
+__releases(&self->lock)
+__acquires(&self->lock)
+{
+ struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
+ struct aq_ring_s *ring = NULL;
+ int work_done = 0;
+ int err = 0;
+ unsigned int i = 0U;
+ unsigned int sw_tail_old = 0U;
+ bool was_tx_cleaned = false;
+
+ if (!self) {
+ err = -EINVAL;
+ } else if (spin_trylock(&self->header.lock)) {
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ if (self->aq_hw_ops->hw_ring_tx_head_update) {
+ err = self->aq_hw_ops->hw_ring_tx_head_update(
+ self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ if (ring[AQ_VEC_TX_ID].sw_head !=
+ ring[AQ_VEC_TX_ID].hw_head) {
+ aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+
+ if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
+ AQ_CFG_SKB_FRAGS_MAX) {
+ aq_nic_ndev_queue_start(self->aq_nic,
+ ring[AQ_VEC_TX_ID].idx);
+ }
+ was_tx_cleaned = true;
+ }
+
+ err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ if (ring[AQ_VEC_RX_ID].sw_head !=
+ ring[AQ_VEC_RX_ID].hw_head) {
+ err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
+ &work_done,
+ budget - work_done);
+ if (err < 0)
+ goto err_exit;
+
+ sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
+
+ err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_fill(
+ self->aq_hw,
+ &ring[AQ_VEC_RX_ID], sw_tail_old);
+ if (err < 0)
+ goto err_exit;
+ }
+ }
+
+ if (was_tx_cleaned)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+ 1U << self->aq_ring_param.vec_idx);
+ }
+
+err_exit:
+ spin_unlock(&self->header.lock);
+ }
+
+ return work_done;
+}
+
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ struct aq_vec_s *self = NULL;
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ self->aq_nic = aq_nic;
+ self->aq_ring_param.vec_idx = idx;
+ self->aq_ring_param.cpu =
+ idx + aq_nic_cfg->aq_rss.base_cpu_number;
+
+ cpumask_set_cpu(self->aq_ring_param.cpu,
+ &self->aq_ring_param.affinity_mask);
+
+ self->tx_rings = 0;
+ self->rx_rings = 0;
+
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
+ aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+
+ for (i = 0; i < aq_nic_cfg->tcs; ++i) {
+ unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
+ self->tx_rings,
+ self->aq_ring_param.vec_idx);
+
+ ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
+ idx_ring, aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ++self->tx_rings;
+
+ aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+
+ ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
+ idx_ring, aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ++self->rx_rings;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_vec_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+ struct aq_hw_s *aq_hw)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ self->aq_hw_ops = aq_hw_ops;
+ self->aq_hw = aq_hw;
+
+ spin_lock_init(&self->header.lock);
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
+ &ring[AQ_VEC_TX_ID],
+ &self->aq_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
+ &ring[AQ_VEC_RX_ID],
+ &self->aq_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
+ &ring[AQ_VEC_RX_ID], 0U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_vec_start(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ napi_enable(&self->napi);
+
+err_exit:
+ return err;
+}
+
+void aq_vec_stop(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+
+ self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ }
+
+ napi_disable(&self->napi);
+}
+
+void aq_vec_deinit(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+ aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
+ }
+err_exit:;
+}
+
+void aq_vec_free(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ aq_ring_free(&ring[AQ_VEC_TX_ID]);
+ aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
+
+ netif_napi_del(&self->napi);
+
+ kfree(self);
+
+err_exit:;
+}
+
+irqreturn_t aq_vec_isr(int irq, void *private)
+{
+ struct aq_vec_s *self = private;
+ int err = 0;
+
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ napi_schedule(&self->napi);
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+irqreturn_t aq_vec_isr_legacy(int irq, void *private)
+{
+ struct aq_vec_s *self = private;
+ u64 irq_mask = 0U;
+ irqreturn_t err = 0;
+
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
+ if (err < 0)
+ goto err_exit;
+
+ if (irq_mask) {
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw,
+ 1U << self->aq_ring_param.vec_idx);
+ napi_schedule(&self->napi);
+ } else {
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
+ err = IRQ_NONE;
+ }
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
+{
+ return &self->aq_ring_param.affinity_mask;
+}
+
+void aq_vec_add_stats(struct aq_vec_s *self,
+ struct aq_ring_stats_rx_s *stats_rx,
+ struct aq_ring_stats_tx_s *stats_tx)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int r = 0U;
+
+ for (r = 0U, ring = self->ring[0];
+ self->tx_rings > r; ++r, ring = self->ring[r]) {
+ struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
+ struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
+
+ stats_rx->packets += rx->packets;
+ stats_rx->bytes += rx->bytes;
+ stats_rx->errors += rx->errors;
+ stats_rx->jumbo_packets += rx->jumbo_packets;
+ stats_rx->lro_packets += rx->lro_packets;
+
+ stats_tx->packets += tx->packets;
+ stats_tx->bytes += tx->bytes;
+ stats_tx->errors += tx->errors;
+ }
+}
+
+int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
+{
+ unsigned int count = 0U;
+ struct aq_ring_stats_rx_s stats_rx;
+ struct aq_ring_stats_tx_s stats_tx;
+
+ memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
+ memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
+ aq_vec_add_stats(self, &stats_rx, &stats_tx);
+
+ data[count] += stats_rx.packets;
+ data[++count] += stats_tx.packets;
+ data[++count] += stats_rx.jumbo_packets;
+ data[++count] += stats_rx.lro_packets;
+ data[++count] += stats_rx.errors;
+
+ if (p_count)
+ *p_count = ++count;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
new file mode 100644
index 000000000000..6c68b184236c
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -0,0 +1,42 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
+ * Declaration of functions for Rx and Tx rings.
+ */
+
+#ifndef AQ_VEC_H
+#define AQ_VEC_H
+
+#include "aq_common.h"
+#include <linux/irqreturn.h>
+
+struct aq_hw_s;
+struct aq_hw_ops;
+struct aq_ring_stats_rx_s;
+struct aq_ring_stats_tx_s;
+
+irqreturn_t aq_vec_isr(int irq, void *private);
+irqreturn_t aq_vec_isr_legacy(int irq, void *private);
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+ struct aq_hw_s *aq_hw);
+void aq_vec_deinit(struct aq_vec_s *self);
+void aq_vec_free(struct aq_vec_s *self);
+int aq_vec_start(struct aq_vec_s *self);
+void aq_vec_stop(struct aq_vec_s *self);
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
+int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
+ unsigned int *p_count);
+void aq_vec_add_stats(struct aq_vec_s *self,
+ struct aq_ring_stats_rx_s *stats_rx,
+ struct aq_ring_stats_tx_s *stats_tx);
+
+#endif /* AQ_VEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
new file mode 100644
index 000000000000..a2b746a2dd50
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -0,0 +1,905 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "hw_atl_a0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_a0_internal.h"
+
+static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
+ return 0;
+}
+
+static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ struct aq_hw_ops *ops)
+{
+ struct hw_atl_s *self = NULL;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self)
+ goto err_exit;
+
+ self->base.aq_pci_func = aq_pci_func;
+
+ self->base.not_ff_addr = 0x10U;
+
+err_exit:
+ return (struct aq_hw_s *)self;
+}
+
+static void hw_atl_a0_destroy(struct aq_hw_s *self)
+{
+ kfree(self);
+}
+
+static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ glb_glb_reg_res_dis_set(self, 1U);
+ pci_pci_reg_res_dis_set(self, 0U);
+ rx_rx_reg_res_dis_set(self, 0U);
+ tx_tx_reg_res_dis_set(self, 0U);
+
+ HW_ATL_FLUSH();
+ glb_soft_res_set(self, 1);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ itr_irq_reg_res_dis_set(self, 0U);
+ itr_res_irq_set(self, 1U);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
+{
+ u32 tc = 0U;
+ u32 buff_size = 0U;
+ unsigned int i_priority = 0U;
+ bool is_rx_flow_control = false;
+
+ /* TPS Descriptor rate init */
+ tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ /* TPS TC credits init */
+ tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+ tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+
+ /* Tx buf size */
+ buff_size = HW_ATL_A0_TXBUF_MAX;
+
+ tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 66U) /
+ 100U, tc);
+ tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 50U) /
+ 100U, tc);
+
+ /* QoS Rx buf size per TC */
+ tc = 0;
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+ buff_size = HW_ATL_A0_RXBUF_MAX;
+
+ rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (i_priority = 8U; i_priority--;)
+ rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ struct aq_nic_cfg_s *cfg = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+ unsigned int addr = 0U;
+
+ cfg = self->aq_nic_cfg;
+
+ for (i = 10, addr = 0U; i--; ++addr) {
+ u32 key_data = cfg->is_rss ?
+ __swab32(rss_params->hash_secret_key[i]) : 0U;
+ rpf_rss_key_wr_data_set(self, key_data);
+ rpf_rss_key_addr_set(self, addr);
+ rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ u32 i = 0U;
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ int err = 0;
+ u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX *
+ HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+
+ memset(bitary, 0, sizeof(bitary));
+
+ for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) {
+ (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+ ((indirection_table[i] % num_rss_queues) <<
+ ((i * 3U) & 0xFU));
+ }
+
+ for (i = AQ_DIMOF(bitary); i--;) {
+ rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ rpf_rss_redir_tbl_addr_set(self, i);
+ rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ /* TX checksums offloads*/
+ tpo_ipv4header_crc_offload_en_set(self, 1);
+ tpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* RX checksums offloads*/
+ rpo_ipv4header_crc_offload_en_set(self, 1);
+ rpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* LSO offloads*/
+ tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
+{
+ thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ 0x00010000U : 0x00000000U);
+ tdm_tx_dca_en_set(self, 0U);
+ tdm_tx_dca_mode_set(self, 0U);
+
+ tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+ /* Rx flow control */
+ rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ /* RSS Ring selection */
+ reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ 0xB3333333U : 0x00000000U);
+
+ /* Multicast filters */
+ for (i = HW_ATL_A0_MAC_MAX; i--;) {
+ rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+ /* Vlan filters */
+ rpf_vlan_outer_etht_set(self, 0x88A8U);
+ rpf_vlan_inner_etht_set(self, 0x8100U);
+ rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Rx Interrupts */
+ rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ rpfl2broadcast_flr_act_set(self, 1U);
+ rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ rdm_rx_dca_en_set(self, 0U);
+ rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+{
+ int err = 0;
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+ rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+ rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+ rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_init(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg,
+ u8 *mac_addr)
+{
+ static u32 aq_hw_atl_igcr_table_[4][2] = {
+ { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
+ { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
+ { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
+ { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ };
+
+ int err = 0;
+
+ self->aq_nic_cfg = aq_nic_cfg;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &PHAL_ATLANTIC_A0->chip_features);
+
+ hw_atl_a0_hw_init_tx_path(self);
+ hw_atl_a0_hw_init_rx_path(self);
+
+ hw_atl_a0_hw_mac_addr_set(self, mac_addr);
+
+ hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+
+ reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+ reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+
+ hw_atl_a0_hw_qos_set(self);
+ hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ reg_gen_irq_map_set(self,
+ ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+ ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+ ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+
+ hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_start(struct aq_hw_s *self)
+{
+ tpb_tx_buff_en_set(self, 1);
+ rpb_rx_buff_en_set(self, 1);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ return 0;
+}
+
+static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int frags)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ unsigned int buff_pa_len = 0U;
+ unsigned int pkt_len = 0U;
+ unsigned int frag_count = 0U;
+ bool is_gso = false;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+ pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+ for (frag_count = 0; frag_count < frags; frag_count++) {
+ txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+ HW_ATL_A0_TXD_SIZE];
+ txd->ctl = 0;
+ txd->ctl2 = 0;
+ txd->buf_addr = 0;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+
+ if (buff->is_txc) {
+ txd->ctl |= (buff->len_l3 << 31) |
+ (buff->len_l2 << 24) |
+ HW_ATL_A0_TXD_CTL_CMD_TCP |
+ HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl2 |= (buff->mss << 16) |
+ (buff->len_l4 << 8) |
+ (buff->len_l3 >> 1);
+
+ pkt_len -= (buff->len_l4 +
+ buff->len_l3 +
+ buff->len_l2);
+ is_gso = true;
+ } else {
+ buff_pa_len = buff->len;
+
+ txd->buf_addr = buff->pa;
+ txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN &
+ ((u32)buff_pa_len << 4));
+ txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD;
+ /* PAY_LEN */
+ txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14);
+
+ if (is_gso) {
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO;
+ txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN;
+ }
+
+ /* Tx checksum offloads */
+ if (buff->is_ip_cso)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO;
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO;
+
+ if (unlikely(buff->is_eop)) {
+ txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+ }
+ }
+
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+ }
+
+ hw_atl_a0_hw_tx_ring_tail_update(self, ring);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+ reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
+
+ reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
+
+ rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->idx);
+
+ rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+
+ /* Rx ring set mode */
+
+ /* Mapping interrupt vector */
+ itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+ rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
+
+ reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
+
+ tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
+
+ /* Set Tx threshold */
+ tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+ /* Mapping interrupt vector */
+ itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+ tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
+{
+ for (; sw_tail_old != ring->sw_tail;
+ sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+ HW_ATL_A0_RXD_SIZE];
+
+ struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+ rxd->buf_addr = buff->pa;
+ rxd->hdr_addr = 0U;
+ }
+
+ reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ int err = 0;
+ unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ ring->hw_head = hw_head_;
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ struct device *ndev = aq_nic_get_dev(ring->aq_nic);
+
+ for (; ring->hw_head != ring->sw_tail;
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+ &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE];
+
+ unsigned int is_err = 1U;
+ unsigned int is_rx_check_sum_enabled = 0U;
+ unsigned int pkt_type = 0U;
+
+ if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
+ if ((1U << 4) &
+ reg_rx_dma_desc_status_get(self, ring->idx)) {
+ rdm_rx_desc_en_set(self, false, ring->idx);
+ rdm_rx_desc_res_set(self, true, ring->idx);
+ rdm_rx_desc_res_set(self, false, ring->idx);
+ rdm_rx_desc_en_set(self, true, ring->idx);
+ }
+
+ if (ring->hw_head ||
+ (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+ break;
+ } else if (!(rxd_wb->status & 0x1U)) {
+ struct hw_atl_rxd_wb_s *rxd_wb1 =
+ (struct hw_atl_rxd_wb_s *)
+ (&ring->dx_ring[(1U) *
+ HW_ATL_A0_RXD_SIZE]);
+
+ if ((rxd_wb1->status & 0x1U)) {
+ rxd_wb->pkt_len = 1514U;
+ rxd_wb->status = 3U;
+ } else {
+ break;
+ }
+ }
+ }
+
+ buff = &ring->buff_ring[ring->hw_head];
+
+ if (0x3U != (rxd_wb->status & 0x3U))
+ rxd_wb->status |= 4;
+
+ is_err = (0x0000001CU & rxd_wb->status);
+ is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+ pkt_type = 0xFFU & (rxd_wb->type >> 4);
+
+ if (is_rx_check_sum_enabled) {
+ if (0x0U == (pkt_type & 0x3U))
+ buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1;
+
+ if (0x4U == (pkt_type & 0x1CU))
+ buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
+ else if (0x0U == (pkt_type & 0x1CU))
+ buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+ }
+
+ is_err &= ~0x18U;
+ is_err &= ~0x04U;
+
+ dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
+
+ if (is_err || rxd_wb->type & 0x1000U) {
+ /* status error or DMA error */
+ buff->is_error = 1U;
+ } else {
+ if (self->aq_nic_cfg->is_rss) {
+ /* last 4 byte */
+ u16 rss_type = rxd_wb->type & 0xFU;
+
+ if (rss_type && rss_type < 0x8U) {
+ buff->is_hash_l4 = (rss_type == 0x4 ||
+ rss_type == 0x5);
+ buff->rss_hash = rxd_wb->rss_hash;
+ }
+ }
+
+ if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+ buff->len = rxd_wb->pkt_len %
+ AQ_CFG_RX_FRAME_MAX;
+ buff->len = buff->len ?
+ buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->next = 0U;
+ buff->is_eop = 1U;
+ } else {
+ /* jumbo */
+ buff->next = aq_ring_next_dx(ring,
+ ring->hw_head);
+ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+ (1U << HW_ATL_A0_ERR_INT));
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+ if ((1U << 16) & reg_gen_irq_status_get(self))
+
+ atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+ *mask = itr_irq_statuslsw_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ unsigned int i = 0U;
+
+ rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+ self->aq_nic_cfg->is_mc_list_enabled =
+ IS_FILTER_ENABLED(IFF_MULTICAST);
+
+ for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ int err = 0;
+
+ if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
+ err = EBADRQC;
+ goto err_exit;
+ }
+ for (self->aq_nic_cfg->mc_list_count = 0U;
+ self->aq_nic_cfg->mc_list_count < count;
+ ++self->aq_nic_cfg->mc_list_count) {
+ u32 i = self->aq_nic_cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_A0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_A0_MAC_MIN + i);
+
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_A0_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
+ bool itr_enabled)
+{
+ unsigned int i = 0U;
+
+ if (itr_enabled && self->aq_nic_cfg->itr) {
+ if (self->aq_nic_cfg->itr != 0xFFFFU) {
+ u32 itr_ = (self->aq_nic_cfg->itr >> 1);
+
+ itr_ = min(AQ_CFG_IRQ_MASK, itr_);
+
+ PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
+ (itr_ << 0x10);
+ } else {
+ u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
+
+ if (n < self->aq_link_status.mbps) {
+ PHAL_ATLANTIC_A0->itr_rx = 0U;
+ } else {
+ static unsigned int hw_timers_tbl_[] = {
+ 0x01CU, /* 10Gbit */
+ 0x039U, /* 5Gbit */
+ 0x039U, /* 5Gbit 5GS */
+ 0x073U, /* 2.5Gbit */
+ 0x120U, /* 1Gbit */
+ 0x1FFU, /* 100Mbit */
+ };
+
+ unsigned int speed_index =
+ hw_atl_utils_mbps_2_speed_index(
+ self->aq_link_status.mbps);
+
+ PHAL_ATLANTIC_A0->itr_rx =
+ 0x80000000U |
+ (hw_timers_tbl_[speed_index] << 0x10U);
+ }
+
+ aq_hw_write_reg(self, 0x00002A00U, 0x40000000U);
+ aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
+ }
+ } else {
+ PHAL_ATLANTIC_A0->itr_rx = 0U;
+ }
+
+ for (i = HW_ATL_A0_RINGS_MAX; i--;)
+ reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
+{
+ int err = 0;
+
+ err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static struct aq_hw_ops hw_atl_ops_ = {
+ .create = hw_atl_a0_create,
+ .destroy = hw_atl_a0_destroy,
+ .get_hw_caps = hw_atl_a0_get_hw_caps,
+
+ .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
+ .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
+ .hw_set_link_speed = hw_atl_a0_hw_set_speed,
+ .hw_init = hw_atl_a0_hw_init,
+ .hw_deinit = hw_atl_utils_hw_deinit,
+ .hw_set_power = hw_atl_utils_hw_set_power,
+ .hw_reset = hw_atl_a0_hw_reset,
+ .hw_start = hw_atl_a0_hw_start,
+ .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop,
+ .hw_stop = hw_atl_a0_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_a0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_a0_hw_irq_disable,
+ .hw_irq_read = hw_atl_a0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set,
+ .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl_a0_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
+ .hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+};
+
+struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
+{
+ bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
+ bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D100) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D107) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D108) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D109));
+
+ bool is_rev_ok = (pdev->revision == 1U);
+
+ return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
new file mode 100644
index 000000000000..6e1d527954c9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_A0_H
+#define HW_ATL_A0_H
+
+#include "../aq_common.h"
+
+#ifndef PCI_VENDOR_ID_AQUANTIA
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+#define HW_ATL_DEVICE_ID_0001 0x0001
+#define HW_ATL_DEVICE_ID_D100 0xD100
+#define HW_ATL_DEVICE_ID_D107 0xD107
+#define HW_ATL_DEVICE_ID_D108 0xD108
+#define HW_ATL_DEVICE_ID_D109 0xD109
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+
+#endif
+
+struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+
+#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
new file mode 100644
index 000000000000..1093ea18823a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -0,0 +1,155 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_A0_INTERNAL_H
+#define HW_ATL_A0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_A0_MTU_JUMBO 9014U
+
+#define HW_ATL_A0_TX_RINGS 4U
+#define HW_ATL_A0_RX_RINGS 4U
+
+#define HW_ATL_A0_RINGS_MAX 32U
+#define HW_ATL_A0_TXD_SIZE 16U
+#define HW_ATL_A0_RXD_SIZE 16U
+
+#define HW_ATL_A0_MAC 0U
+#define HW_ATL_A0_MAC_MIN 1U
+#define HW_ATL_A0_MAC_MAX 33U
+
+/* interrupts */
+#define HW_ATL_A0_ERR_INT 8U
+#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU
+
+#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U
+#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U
+#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U
+
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U
+#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U
+#define HW_ATL_A0_TXD_CTL_DD 0x00100000U
+#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22)
+#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23)
+#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24)
+#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25)
+#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26)
+#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27)
+#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28)
+
+#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21)
+#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22)
+
+#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_A0_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_A0_MPI_SPEED_SHIFT 16U
+
+#define HW_ATL_A0_RATE_10G BIT(0)
+#define HW_ATL_A0_RATE_5G BIT(1)
+#define HW_ATL_A0_RATE_2G5 BIT(3)
+#define HW_ATL_A0_RATE_1G BIT(4)
+#define HW_ATL_A0_RATE_100M BIT(5)
+
+#define HW_ATL_A0_TXBUF_MAX 160U
+#define HW_ATL_A0_RXBUF_MAX 320U
+
+#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U
+
+#define HW_ATL_A0_TC_MAX 1U
+#define HW_ATL_A0_RSS_MAX 8U
+
+#define HW_ATL_A0_FW_SEMA_RAM 0x2U
+
+#define HW_ATL_A0_RXD_DD 0x1U
+#define HW_ATL_A0_RXD_NCEA0 0x1U
+
+#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U
+
+#define HW_ATL_A0_UCP_0X370_REG 0x370U
+
+#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
+/* HW layer capabilities */
+static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
+ .ports = 1U,
+ .is_64_dma = true,
+ .msix_irqs = 4U,
+ .irq_mask = ~0U,
+ .vecs = HW_ATL_A0_RSS_MAX,
+ .tcs = HW_ATL_A0_TC_MAX,
+ .rxd_alignment = 1U,
+ .rxd_size = HW_ATL_A0_RXD_SIZE,
+ .rxds = 248U,
+ .txd_alignment = 1U,
+ .txd_size = HW_ATL_A0_TXD_SIZE,
+ .txds = 8U * 1024U,
+ .txhwb_alignment = 4096U,
+ .tx_rings = HW_ATL_A0_TX_RINGS,
+ .rx_rings = HW_ATL_A0_RX_RINGS,
+ .hw_features = NETIF_F_HW_CSUM |
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_TSO,
+ .hw_priv_flags = IFF_UNICAST_FLT,
+ .link_speed_msk = (HW_ATL_A0_RATE_10G |
+ HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M),
+ .flow_control = true,
+ .mtu = HW_ATL_A0_MTU_JUMBO,
+ .mac_regs_count = 88,
+ .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
+};
+
+#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
new file mode 100644
index 000000000000..cab2931dab9a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -0,0 +1,958 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "hw_atl_b0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_b0_internal.h"
+
+static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
+ return 0;
+}
+
+static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ struct aq_hw_ops *ops)
+{
+ struct hw_atl_s *self = NULL;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self)
+ goto err_exit;
+
+ self->base.aq_pci_func = aq_pci_func;
+
+ self->base.not_ff_addr = 0x10U;
+
+err_exit:
+ return (struct aq_hw_s *)self;
+}
+
+static void hw_atl_b0_destroy(struct aq_hw_s *self)
+{
+ kfree(self);
+}
+
+static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ glb_glb_reg_res_dis_set(self, 1U);
+ pci_pci_reg_res_dis_set(self, 0U);
+ rx_rx_reg_res_dis_set(self, 0U);
+ tx_tx_reg_res_dis_set(self, 0U);
+
+ HW_ATL_FLUSH();
+ glb_soft_res_set(self, 1);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ itr_irq_reg_res_dis_set(self, 0U);
+ itr_res_irq_set(self, 1U);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
+{
+ u32 tc = 0U;
+ u32 buff_size = 0U;
+ unsigned int i_priority = 0U;
+ bool is_rx_flow_control = false;
+
+ /* TPS Descriptor rate init */
+ tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ /* TPS TC credits init */
+ tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+ tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+
+ /* Tx buf size */
+ buff_size = HW_ATL_B0_TXBUF_MAX;
+
+ tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 66U) /
+ 100U, tc);
+ tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 50U) /
+ 100U, tc);
+
+ /* QoS Rx buf size per TC */
+ tc = 0;
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+ buff_size = HW_ATL_B0_RXBUF_MAX;
+
+ rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (i_priority = 8U; i_priority--;)
+ rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ struct aq_nic_cfg_s *cfg = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+ unsigned int addr = 0U;
+
+ cfg = self->aq_nic_cfg;
+
+ for (i = 10, addr = 0U; i--; ++addr) {
+ u32 key_data = cfg->is_rss ?
+ __swab32(rss_params->hash_secret_key[i]) : 0U;
+ rpf_rss_key_wr_data_set(self, key_data);
+ rpf_rss_key_addr_set(self, addr);
+ rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ u32 i = 0U;
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ int err = 0;
+ u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
+ HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+
+ memset(bitary, 0, sizeof(bitary));
+
+ for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
+ (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+ ((indirection_table[i] % num_rss_queues) <<
+ ((i * 3U) & 0xFU));
+ }
+
+ for (i = AQ_DIMOF(bitary); i--;) {
+ rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ rpf_rss_redir_tbl_addr_set(self, i);
+ rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+ unsigned int i;
+
+ /* TX checksums offloads*/
+ tpo_ipv4header_crc_offload_en_set(self, 1);
+ tpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* RX checksums offloads*/
+ rpo_ipv4header_crc_offload_en_set(self, 1);
+ rpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* LSO offloads*/
+ tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ if (err < 0)
+ goto err_exit;
+
+/* LRO offloads */
+ {
+ unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
+ ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
+ ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
+
+ for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
+ rpo_lro_max_num_of_descriptors_set(self, val, i);
+
+ rpo_lro_time_base_divider_set(self, 0x61AU);
+ rpo_lro_inactive_interval_set(self, 0);
+ rpo_lro_max_coalescing_interval_set(self, 2);
+
+ rpo_lro_qsessions_lim_set(self, 1U);
+
+ rpo_lro_total_desc_lim_set(self, 2U);
+
+ rpo_lro_patch_optimization_en_set(self, 0U);
+
+ rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+
+ rpo_lro_pkt_lim_set(self, 1U);
+
+ rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ }
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
+{
+ thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ 0x00010000U : 0x00000000U);
+ tdm_tx_dca_en_set(self, 0U);
+ tdm_tx_dca_mode_set(self, 0U);
+
+ tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+ /* Rx flow control */
+ rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ /* RSS Ring selection */
+ reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ 0xB3333333U : 0x00000000U);
+
+ /* Multicast filters */
+ for (i = HW_ATL_B0_MAC_MAX; i--;) {
+ rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+ /* Vlan filters */
+ rpf_vlan_outer_etht_set(self, 0x88A8U);
+ rpf_vlan_inner_etht_set(self, 0x8100U);
+
+ if (cfg->vlan_id) {
+ rpf_vlan_flr_act_set(self, 1U, 0U);
+ rpf_vlan_id_flr_set(self, 0U, 0U);
+ rpf_vlan_flr_en_set(self, 0U, 0U);
+
+ rpf_vlan_accept_untagged_packets_set(self, 1U);
+ rpf_vlan_untagged_act_set(self, 1U);
+
+ rpf_vlan_flr_act_set(self, 1U, 1U);
+ rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+ rpf_vlan_flr_en_set(self, 1U, 1U);
+ } else {
+ rpf_vlan_prom_mode_en_set(self, 1);
+ }
+
+ /* Rx Interrupts */
+ rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00005040U,
+ IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
+
+ rpfl2broadcast_flr_act_set(self, 1U);
+ rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ rdm_rx_dca_en_set(self, 0U);
+ rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+{
+ int err = 0;
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_init(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg,
+ u8 *mac_addr)
+{
+ static u32 aq_hw_atl_igcr_table_[4][2] = {
+ { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
+ { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
+ { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
+ { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ };
+
+ int err = 0;
+
+ self->aq_nic_cfg = aq_nic_cfg;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &PHAL_ATLANTIC_B0->chip_features);
+
+ hw_atl_b0_hw_init_tx_path(self);
+ hw_atl_b0_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+
+ hw_atl_b0_hw_qos_set(self);
+ hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ reg_gen_irq_map_set(self,
+ ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+
+ hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_start(struct aq_hw_s *self)
+{
+ tpb_tx_buff_en_set(self, 1);
+ rpb_rx_buff_en_set(self, 1);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ return 0;
+}
+
+static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int frags)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ unsigned int buff_pa_len = 0U;
+ unsigned int pkt_len = 0U;
+ unsigned int frag_count = 0U;
+ bool is_gso = false;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+ pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+ for (frag_count = 0; frag_count < frags; frag_count++) {
+ txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+ HW_ATL_B0_TXD_SIZE];
+ txd->ctl = 0;
+ txd->ctl2 = 0;
+ txd->buf_addr = 0;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+
+ if (buff->is_txc) {
+ txd->ctl |= (buff->len_l3 << 31) |
+ (buff->len_l2 << 24) |
+ HW_ATL_B0_TXD_CTL_CMD_TCP |
+ HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl2 |= (buff->mss << 16) |
+ (buff->len_l4 << 8) |
+ (buff->len_l3 >> 1);
+
+ pkt_len -= (buff->len_l4 +
+ buff->len_l3 +
+ buff->len_l2);
+ is_gso = true;
+ } else {
+ buff_pa_len = buff->len;
+
+ txd->buf_addr = buff->pa;
+ txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
+ ((u32)buff_pa_len << 4));
+ txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
+ /* PAY_LEN */
+ txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
+
+ if (is_gso) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
+ txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
+ }
+
+ /* Tx checksum offloads */
+ if (buff->is_ip_cso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
+
+ if (unlikely(buff->is_eop)) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+ }
+ }
+
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+ }
+
+ hw_atl_b0_hw_tx_ring_tail_update(self, ring);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+ reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
+
+ reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
+
+ rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->idx);
+
+ rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+
+ /* Rx ring set mode */
+
+ /* Mapping interrupt vector */
+ itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+ rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
+
+ reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
+
+ tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
+
+ /* Set Tx threshold */
+ tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+ /* Mapping interrupt vector */
+ itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+ tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
+{
+ for (; sw_tail_old != ring->sw_tail;
+ sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+ HW_ATL_B0_RXD_SIZE];
+
+ struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+ rxd->buf_addr = buff->pa;
+ rxd->hdr_addr = 0U;
+ }
+
+ reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ int err = 0;
+ unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ ring->hw_head = hw_head_;
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ struct device *ndev = aq_nic_get_dev(ring->aq_nic);
+
+ for (; ring->hw_head != ring->sw_tail;
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+ &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
+
+ unsigned int is_err = 1U;
+ unsigned int is_rx_check_sum_enabled = 0U;
+ unsigned int pkt_type = 0U;
+
+ if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
+ break;
+ }
+
+ buff = &ring->buff_ring[ring->hw_head];
+
+ is_err = (0x0000003CU & rxd_wb->status);
+
+ is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+ is_err &= ~0x20U; /* exclude validity bit */
+
+ pkt_type = 0xFFU & (rxd_wb->type >> 4);
+
+ if (is_rx_check_sum_enabled) {
+ if (0x0U == (pkt_type & 0x3U))
+ buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+
+ if (0x4U == (pkt_type & 0x1CU))
+ buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+ else if (0x0U == (pkt_type & 0x1CU))
+ buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
+ }
+
+ is_err &= ~0x18U;
+
+ dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
+
+ if (is_err || rxd_wb->type & 0x1000U) {
+ /* status error or DMA error */
+ buff->is_error = 1U;
+ } else {
+ if (self->aq_nic_cfg->is_rss) {
+ /* last 4 byte */
+ u16 rss_type = rxd_wb->type & 0xFU;
+
+ if (rss_type && rss_type < 0x8U) {
+ buff->is_hash_l4 = (rss_type == 0x4 ||
+ rss_type == 0x5);
+ buff->rss_hash = rxd_wb->rss_hash;
+ }
+ }
+
+ if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+ buff->len = rxd_wb->pkt_len %
+ AQ_CFG_RX_FRAME_MAX;
+ buff->len = buff->len ?
+ buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->next = 0U;
+ buff->is_eop = 1U;
+ } else {
+ if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
+ rxd_wb->status) {
+ /* LRO */
+ buff->next = rxd_wb->next_desc_ptr;
+ ++ring->stats.rx.lro_packets;
+ } else {
+ /* jumbo */
+ buff->next =
+ aq_ring_next_dx(ring,
+ ring->hw_head);
+ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_setlsw_set(self, LODWORD(mask));
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+ atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+ *mask = itr_irq_statuslsw_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ unsigned int i = 0U;
+
+ rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+
+ rpfl2_accept_all_mc_packets_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI));
+
+ rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+ self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
+
+ for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ int err = 0;
+
+ if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (self->aq_nic_cfg->mc_list_count = 0U;
+ self->aq_nic_cfg->mc_list_count < count;
+ ++self->aq_nic_cfg->mc_list_count) {
+ u32 i = self->aq_nic_cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_B0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_B0_MAC_MIN + i);
+
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_B0_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
+ bool itr_enabled)
+{
+ unsigned int i = 0U;
+
+ if (itr_enabled && self->aq_nic_cfg->itr) {
+ tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ tdm_tdm_intr_moder_en_set(self, 1U);
+ rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ rdm_rdm_intr_moder_en_set(self, 1U);
+
+ PHAL_ATLANTIC_B0->itr_tx = 2U;
+ PHAL_ATLANTIC_B0->itr_rx = 2U;
+
+ if (self->aq_nic_cfg->itr != 0xFFFFU) {
+ unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
+ unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
+
+ max_timer = min(0x1FFU, max_timer);
+ min_timer = min(0xFFU, min_timer);
+
+ PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
+ PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
+ PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
+ PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
+ } else {
+ static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
+ {0xffU, 0xffU}, /* 10Gbit */
+ {0xffU, 0x1ffU}, /* 5Gbit */
+ {0xffU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xffU, 0x1ffU}, /* 2.5Gbit */
+ {0xffU, 0x1ffU}, /* 1Gbit */
+ {0xffU, 0x1ffU}, /* 100Mbit */
+ };
+
+ static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
+ {0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+
+ unsigned int speed_index =
+ hw_atl_utils_mbps_2_speed_index(
+ self->aq_link_status.mbps);
+
+ PHAL_ATLANTIC_B0->itr_tx |=
+ hw_atl_b0_timers_table_tx_[speed_index]
+ [0] << 0x8U; /* set min timer value */
+ PHAL_ATLANTIC_B0->itr_tx |=
+ hw_atl_b0_timers_table_tx_[speed_index]
+ [1] << 0x10U; /* set max timer value */
+
+ PHAL_ATLANTIC_B0->itr_rx |=
+ hw_atl_b0_timers_table_rx_[speed_index]
+ [0] << 0x8U; /* set min timer value */
+ PHAL_ATLANTIC_B0->itr_rx |=
+ hw_atl_b0_timers_table_rx_[speed_index]
+ [1] << 0x10U; /* set max timer value */
+ }
+ } else {
+ tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ tdm_tdm_intr_moder_en_set(self, 0U);
+ rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ rdm_rdm_intr_moder_en_set(self, 0U);
+ PHAL_ATLANTIC_B0->itr_tx = 0U;
+ PHAL_ATLANTIC_B0->itr_rx = 0U;
+ }
+
+ for (i = HW_ATL_B0_RINGS_MAX; i--;) {
+ reg_tx_intr_moder_ctrl_set(self,
+ PHAL_ATLANTIC_B0->itr_tx, i);
+ reg_rx_intr_moder_ctrl_set(self,
+ PHAL_ATLANTIC_B0->itr_rx, i);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
+{
+ int err = 0;
+
+ err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static struct aq_hw_ops hw_atl_ops_ = {
+ .create = hw_atl_b0_create,
+ .destroy = hw_atl_b0_destroy,
+ .get_hw_caps = hw_atl_b0_get_hw_caps,
+
+ .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
+ .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
+ .hw_set_link_speed = hw_atl_b0_hw_set_speed,
+ .hw_init = hw_atl_b0_hw_init,
+ .hw_deinit = hw_atl_utils_hw_deinit,
+ .hw_set_power = hw_atl_utils_hw_set_power,
+ .hw_reset = hw_atl_b0_hw_reset,
+ .hw_start = hw_atl_b0_hw_start,
+ .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
+ .hw_stop = hw_atl_b0_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_b0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_b0_hw_irq_disable,
+ .hw_irq_read = hw_atl_b0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl_b0_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+};
+
+struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
+{
+ bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
+ bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D100) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D107) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D108) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D109));
+
+ bool is_rev_ok = (pdev->revision == 2U);
+
+ return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
new file mode 100644
index 000000000000..a1e1bce6c1f3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_B0_H
+#define HW_ATL_B0_H
+
+#include "../aq_common.h"
+
+#ifndef PCI_VENDOR_ID_AQUANTIA
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+#define HW_ATL_DEVICE_ID_0001 0x0001
+#define HW_ATL_DEVICE_ID_D100 0xD100
+#define HW_ATL_DEVICE_ID_D107 0xD107
+#define HW_ATL_DEVICE_ID_D108 0xD108
+#define HW_ATL_DEVICE_ID_D109 0xD109
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+
+#endif
+
+struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+
+#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
new file mode 100644
index 000000000000..8bdee3ddd5a0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -0,0 +1,207 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_B0_INTERNAL_H
+#define HW_ATL_B0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_B0_MTU_JUMBO (16000U)
+#define HW_ATL_B0_MTU 1514U
+
+#define HW_ATL_B0_TX_RINGS 4U
+#define HW_ATL_B0_RX_RINGS 4U
+
+#define HW_ATL_B0_RINGS_MAX 32U
+#define HW_ATL_B0_TXD_SIZE (16U)
+#define HW_ATL_B0_RXD_SIZE (16U)
+
+#define HW_ATL_B0_MAC 0U
+#define HW_ATL_B0_MAC_MIN 1U
+#define HW_ATL_B0_MAC_MAX 33U
+
+/* UCAST/MCAST filters */
+#define HW_ATL_B0_UCAST_FILTERS_MAX 38
+#define HW_ATL_B0_MCAST_FILTERS_MAX 8
+
+/* interrupts */
+#define HW_ATL_B0_ERR_INT 8U
+#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000)
+#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000)
+#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000)
+
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001)
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002)
+#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0)
+#define HW_ATL_B0_TXD_CTL_DD (0x00100000)
+#define HW_ATL_B0_TXD_CTL_EOP (0x00200000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22)
+#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23)
+#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24)
+#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25)
+#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26)
+#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27)
+#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28)
+
+#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21)
+#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22)
+
+#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_B0_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
+
+#define HW_ATL_B0_RATE_10G BIT(0)
+#define HW_ATL_B0_RATE_5G BIT(1)
+#define HW_ATL_B0_RATE_2G5 BIT(3)
+#define HW_ATL_B0_RATE_1G BIT(4)
+#define HW_ATL_B0_RATE_100M BIT(5)
+
+#define HW_ATL_B0_TXBUF_MAX 160U
+#define HW_ATL_B0_RXBUF_MAX 320U
+
+#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
+#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
+
+#define HW_ATL_B0_TCRSS_4_8 1
+#define HW_ATL_B0_TC_MAX 1U
+#define HW_ATL_B0_RSS_MAX 8U
+
+#define HW_ATL_B0_LRO_RXD_MAX 2U
+#define HW_ATL_B0_RS_SLIP_ENABLED 0U
+
+/* (256k -1(max pay_len) - 54(header)) */
+#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U
+
+/* (256k -1(max pay_len) - 74(header)) */
+#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U
+
+#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U
+#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU
+
+#define HW_ATL_B0_FW_SEMA_RAM 0x2U
+
+#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000)
+
+#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007)
+#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008)
+#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0)
+#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000)
+#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000)
+
+#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */
+#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */
+#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000)
+
+#define HW_ATL_B0_RXD_DD (0x1)
+#define HW_ATL_B0_RXD_NCEA0 (0x1)
+
+#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F)
+#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0)
+#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000)
+#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000)
+#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000)
+
+#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001)
+#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002)
+#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C)
+#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004)
+#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008)
+#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010)
+#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0)
+#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000)
+
+#define L2_FILTER_ACTION_DISCARD (0x0)
+#define L2_FILTER_ACTION_HOST (0x1)
+
+#define HW_ATL_B0_UCP_0X370_REG (0x370)
+
+#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10)
+
+#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
+
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
+/* HW layer capabilities */
+static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
+ .ports = 1U,
+ .is_64_dma = true,
+ .msix_irqs = 4U,
+ .irq_mask = ~0U,
+ .vecs = HW_ATL_B0_RSS_MAX,
+ .tcs = HW_ATL_B0_TC_MAX,
+ .rxd_alignment = 1U,
+ .rxd_size = HW_ATL_B0_RXD_SIZE,
+ .rxds = 8U * 1024U,
+ .txd_alignment = 1U,
+ .txd_size = HW_ATL_B0_TXD_SIZE,
+ .txds = 8U * 1024U,
+ .txhwb_alignment = 4096U,
+ .tx_rings = HW_ATL_B0_TX_RINGS,
+ .rx_rings = HW_ATL_B0_RX_RINGS,
+ .hw_features = NETIF_F_HW_CSUM |
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_LRO,
+ .hw_priv_flags = IFF_UNICAST_FLT,
+ .link_speed_msk = (HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M),
+ .flow_control = true,
+ .mtu = HW_ATL_B0_MTU_JUMBO,
+ .mac_regs_count = 88,
+ .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
+};
+
+#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
new file mode 100644
index 000000000000..3de651afa8c7
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -0,0 +1,1394 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
+#include "../aq_hw_utils.h"
+
+/* global */
+void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+{
+ aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+}
+
+u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+{
+ return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+}
+
+void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
+ glb_reg_res_dis_msk,
+ glb_reg_res_dis_shift,
+ glb_reg_res_dis);
+}
+
+void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+{
+ aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
+ glb_soft_res_shift, soft_res);
+}
+
+u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
+ glb_soft_res_msk,
+ glb_soft_res_shift);
+}
+
+u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+}
+
+u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+}
+
+/* stats */
+u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+}
+
+u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+}
+
+u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+}
+
+u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+}
+
+u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+}
+
+u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+}
+
+u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+}
+
+u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+}
+
+u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+}
+
+/* interrupt */
+void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+{
+ aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+}
+
+void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_msk[32] = {
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
+ };
+
+/* lower bit position of bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_shift[32] = {
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx],
+ itr_imr_rxren_msk[rx],
+ itr_imr_rxren_shift[rx],
+ irq_map_en_rx);
+}
+
+void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_msk[32] = {
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_shift[32] = {
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx],
+ itr_imr_txten_msk[tx],
+ itr_imr_txten_shift[tx],
+ irq_map_en_tx);
+}
+
+void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_msk[32] = {
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+ };
+
+/* lower bit position of bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_shift[32] = {
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx],
+ itr_imr_rxr_msk[rx],
+ itr_imr_rxr_shift[rx],
+ irq_map_rx);
+}
+
+void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_msk[32] = {
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_shift[32] = {
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx],
+ itr_imr_txt_msk[tx],
+ itr_imr_txt_shift[tx],
+ irq_map_tx);
+}
+
+void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+}
+
+void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+{
+ aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+}
+
+void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
+ itr_reg_res_dsbl_msk,
+ itr_reg_res_dsbl_shift, irq_reg_res_dis);
+}
+
+void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+}
+
+u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+}
+
+u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
+ itr_res_shift);
+}
+
+void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+{
+ aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
+ itr_res_shift, res_irq);
+}
+
+/* rdm */
+void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
+ rdm_dcadcpuid_msk,
+ rdm_dcadcpuid_shift, cpuid);
+}
+
+void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
+ rdm_dca_en_shift, rx_dca_en);
+}
+
+void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
+ rdm_dca_mode_shift, rx_dca_mode);
+}
+
+void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
+ rdm_descddata_size_msk,
+ rdm_descddata_size_shift,
+ rx_desc_data_buff_size);
+}
+
+void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
+ rdm_dcaddesc_en_msk,
+ rdm_dcaddesc_en_shift,
+ rx_desc_dca_en);
+}
+
+void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
+ rdm_descden_msk,
+ rdm_descden_shift,
+ rx_desc_en);
+}
+
+void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
+ rdm_descdhdr_size_msk,
+ rdm_descdhdr_size_shift,
+ rx_desc_head_buff_size);
+}
+
+void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
+ rdm_descdhdr_split_msk,
+ rdm_descdhdr_split_shift,
+ rx_desc_head_splitting);
+}
+
+u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
+ rdm_descdhd_msk, rdm_descdhd_shift);
+}
+
+void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
+ rdm_descdlen_msk, rdm_descdlen_shift,
+ rx_desc_len);
+}
+
+void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
+ rdm_descdreset_msk, rdm_descdreset_shift,
+ rx_desc_res);
+}
+
+void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
+ rdm_int_desc_wrb_en_msk,
+ rdm_int_desc_wrb_en_shift,
+ rx_desc_wr_wb_irq_en);
+}
+
+void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
+ rdm_dcadhdr_en_msk,
+ rdm_dcadhdr_en_shift,
+ rx_head_dca_en);
+}
+
+void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
+ rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+ rx_pld_dca_en);
+}
+
+void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
+ rdm_int_rim_en_msk,
+ rdm_int_rim_en_shift,
+ rdm_intr_moder_en);
+}
+
+/* reg */
+void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+{
+ aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+}
+
+u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+}
+
+void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+{
+ aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+}
+
+void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+{
+ aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+}
+
+void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+ rx_dma_desc_base_addrlsw);
+}
+
+void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+ rx_dma_desc_base_addrmsw);
+}
+
+u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+}
+
+void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr, u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+ rx_dma_desc_tail_ptr);
+}
+
+void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+}
+
+void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+}
+
+void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+}
+
+void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+}
+
+void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+ rx_intr_moderation_ctl);
+}
+
+void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+}
+
+void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+ tx_dma_desc_base_addrlsw);
+}
+
+void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+ tx_dma_desc_base_addrmsw);
+}
+
+void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr, u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+ tx_dma_desc_tail_ptr);
+}
+
+void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+ tx_intr_moderation_ctl);
+}
+
+/* RPB: rx packet buffer */
+void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
+ rpb_dma_sys_lbk_msk,
+ rpb_dma_sys_lbk_shift, dma_sys_lbk);
+}
+
+void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
+ rpb_rpf_rx_tc_mode_msk,
+ rpb_rpf_rx_tc_mode_shift,
+ rx_traf_class_mode);
+}
+
+void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
+ rpb_rx_buf_en_shift, rx_buff_en);
+}
+
+void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
+ rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+ rx_buff_hi_threshold_per_tc);
+}
+
+void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
+ rpb_rxblo_thresh_msk,
+ rpb_rxblo_thresh_shift,
+ rx_buff_lo_threshold_per_tc);
+}
+
+void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
+ rpb_rx_fc_mode_msk,
+ rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+}
+
+void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc, u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
+ rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+ rx_pkt_buff_size_per_tc);
+}
+
+void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
+ rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+ rx_xoff_en_per_tc);
+}
+
+/* rpf */
+
+void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
+ rpfl2bc_thresh_msk,
+ rpfl2bc_thresh_shift,
+ l2broadcast_count_threshold);
+}
+
+void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
+ rpfl2bc_en_shift, l2broadcast_en);
+}
+
+void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
+ rpfl2bc_act_shift, l2broadcast_flr_act);
+}
+
+void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
+ rpfl2mc_enf_msk,
+ rpfl2mc_enf_shift, l2multicast_flr_en);
+}
+
+void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
+ rpfl2promis_mode_msk,
+ rpfl2promis_mode_shift,
+ l2promiscuous_mode_en);
+}
+
+void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
+ rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+ l2unicast_flr_act);
+}
+
+void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
+ rpfl2uc_enf_msk,
+ rpfl2uc_enf_shift, l2unicast_flr_en);
+}
+
+void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+ l2unicast_dest_addresslsw);
+}
+
+void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
+ rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+ l2unicast_dest_addressmsw);
+}
+
+void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
+ rpfl2mc_accept_all_msk,
+ rpfl2mc_accept_all_shift,
+ l2_accept_all_mc_packets);
+}
+
+void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc)
+{
+/* register address for bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_adr[8] = {
+ 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
+ 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+ };
+
+/* bitmask for bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_msk[8] = {
+ 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
+ 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
+ };
+
+/* lower bit position of bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_shft[8] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
+ rpf_rpb_rx_tc_upt_msk[tc],
+ rpf_rpb_rx_tc_upt_shft[tc],
+ user_priority_tc_map);
+}
+
+void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
+ rpf_rss_key_addr_msk,
+ rpf_rss_key_addr_shift,
+ rss_key_addr);
+}
+
+void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+{
+ aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+ rss_key_wr_data);
+}
+
+u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
+ rpf_rss_key_wr_eni_msk,
+ rpf_rss_key_wr_eni_shift);
+}
+
+void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
+ rpf_rss_key_wr_eni_msk,
+ rpf_rss_key_wr_eni_shift,
+ rss_key_wr_en);
+}
+
+void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
+ rpf_rss_redir_addr_msk,
+ rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+}
+
+void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
+ rpf_rss_redir_wr_data_msk,
+ rpf_rss_redir_wr_data_shift,
+ rss_redir_tbl_wr_data);
+}
+
+u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
+ rpf_rss_redir_wr_eni_msk,
+ rpf_rss_redir_wr_eni_shift);
+}
+
+void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
+ rpf_rss_redir_wr_eni_msk,
+ rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+}
+
+void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
+ rpf_tpo_rpf_sys_lbk_msk,
+ rpf_tpo_rpf_sys_lbk_shift,
+ tpo_to_rpf_sys_lbk);
+}
+
+void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
+ rpf_vl_inner_tpid_msk,
+ rpf_vl_inner_tpid_shift,
+ vlan_inner_etht);
+}
+
+void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
+ rpf_vl_outer_tpid_msk,
+ rpf_vl_outer_tpid_shift,
+ vlan_outer_etht);
+}
+
+void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
+ rpf_vl_promis_mode_msk,
+ rpf_vl_promis_mode_shift,
+ vlan_prom_mode_en);
+}
+
+void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_accept_untagged_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
+ rpf_vl_accept_untagged_mode_msk,
+ rpf_vl_accept_untagged_mode_shift,
+ vlan_accept_untagged_packets);
+}
+
+void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
+ rpf_vl_untagged_act_msk,
+ rpf_vl_untagged_act_shift,
+ vlan_untagged_act);
+}
+
+void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
+ rpf_vl_en_f_msk,
+ rpf_vl_en_f_shift,
+ vlan_flr_en);
+}
+
+void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
+ rpf_vl_act_f_msk,
+ rpf_vl_act_f_shift,
+ vlan_flr_act);
+}
+
+void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
+ rpf_vl_id_f_msk,
+ rpf_vl_id_f_shift,
+ vlan_id_flr);
+}
+
+void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
+ rpf_et_enf_msk,
+ rpf_et_enf_shift, etht_flr_en);
+}
+
+void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
+ rpf_et_upfen_msk, rpf_et_upfen_shift,
+ etht_user_priority_en);
+}
+
+void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
+ rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+ etht_rx_queue_en);
+}
+
+void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
+ rpf_et_upf_msk,
+ rpf_et_upf_shift, etht_user_priority);
+}
+
+void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
+ rpf_et_rxqf_msk,
+ rpf_et_rxqf_shift, etht_rx_queue);
+}
+
+void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
+ rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+ etht_mgt_queue);
+}
+
+void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
+ rpf_et_actf_msk,
+ rpf_et_actf_shift, etht_flr_act);
+}
+
+void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
+ rpf_et_valf_msk,
+ rpf_et_valf_shift, etht_flr);
+}
+
+/* RPO: rx packet offload */
+void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
+ rpo_ipv4chk_en_msk,
+ rpo_ipv4chk_en_shift,
+ ipv4header_crc_offload_en);
+}
+
+void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
+ rpo_descdvl_strip_msk,
+ rpo_descdvl_strip_shift,
+ rx_desc_vlan_stripping);
+}
+
+void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
+ rpol4chk_en_shift, tcp_udp_crc_offload_en);
+}
+
+void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+{
+ aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+}
+
+void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
+ rpo_lro_ptopt_en_msk,
+ rpo_lro_ptopt_en_shift,
+ lro_patch_optimization_en);
+}
+
+void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
+ rpo_lro_qses_lmt_msk,
+ rpo_lro_qses_lmt_shift,
+ lro_qsessions_lim);
+}
+
+void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
+ rpo_lro_tot_dsc_lmt_msk,
+ rpo_lro_tot_dsc_lmt_shift,
+ lro_total_desc_lim);
+}
+
+void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
+ rpo_lro_pkt_min_msk,
+ rpo_lro_pkt_min_shift,
+ lro_min_pld_of_first_pkt);
+}
+
+void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+{
+ aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+}
+
+void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
+{
+/* Register address for bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_adr[32] = {
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
+ };
+
+/* Bitmask for bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_msk[32] = {
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
+ };
+
+/* Lower bit position of bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_shift[32] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro],
+ rpo_lro_ldes_max_msk[lro],
+ rpo_lro_ldes_max_shift[lro],
+ lro_max_number_of_descriptors);
+}
+
+void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
+ rpo_lro_tb_div_msk,
+ rpo_lro_tb_div_shift,
+ lro_time_base_divider);
+}
+
+void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
+ rpo_lro_ina_ival_msk,
+ rpo_lro_ina_ival_shift,
+ lro_inactive_interval);
+}
+
+void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coalescing_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
+ rpo_lro_max_ival_msk,
+ rpo_lro_max_ival_shift,
+ lro_max_coalescing_interval);
+}
+
+/* rx */
+void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
+ rx_reg_res_dsbl_msk,
+ rx_reg_res_dsbl_shift,
+ rx_reg_res_dis);
+}
+
+/* tdm */
+void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
+ tdm_dcadcpuid_msk,
+ tdm_dcadcpuid_shift, cpuid);
+}
+
+void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
+{
+ aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+}
+
+void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
+ tdm_dca_en_shift, tx_dca_en);
+}
+
+void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
+ tdm_dca_mode_shift, tx_dca_mode);
+}
+
+void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
+ tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+ tx_desc_dca_en);
+}
+
+void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
+ tdm_descden_msk,
+ tdm_descden_shift,
+ tx_desc_en);
+}
+
+u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
+ tdm_descdhd_msk, tdm_descdhd_shift);
+}
+
+void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
+ tdm_descdlen_msk,
+ tdm_descdlen_shift,
+ tx_desc_len);
+}
+
+void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
+ tdm_int_desc_wrb_en_msk,
+ tdm_int_desc_wrb_en_shift,
+ tx_desc_wr_wb_irq_en);
+}
+
+void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
+ tdm_descdwrb_thresh_msk,
+ tdm_descdwrb_thresh_shift,
+ tx_desc_wr_wb_threshold);
+}
+
+void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
+ tdm_int_mod_en_msk,
+ tdm_int_mod_en_shift,
+ tdm_irq_moderation_en);
+}
+
+/* thm */
+void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
+ thm_lso_tcp_flag_first_msk,
+ thm_lso_tcp_flag_first_shift,
+ lso_tcp_flag_of_first_pkt);
+}
+
+void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
+ thm_lso_tcp_flag_last_msk,
+ thm_lso_tcp_flag_last_shift,
+ lso_tcp_flag_of_last_pkt);
+}
+
+void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
+ thm_lso_tcp_flag_mid_msk,
+ thm_lso_tcp_flag_mid_shift,
+ lso_tcp_flag_of_middle_pkt);
+}
+
+/* TPB: tx packet buffer */
+void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
+ tpb_tx_buf_en_shift, tx_buff_en);
+}
+
+void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
+ tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+ tx_buff_hi_threshold_per_tc);
+}
+
+void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
+ tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+ tx_buff_lo_threshold_per_tc);
+}
+
+void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
+ tpb_dma_sys_lbk_msk,
+ tpb_dma_sys_lbk_shift,
+ tx_dma_sys_lbk_en);
+}
+
+void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
+ tpb_txbbuf_size_msk,
+ tpb_txbbuf_size_shift,
+ tx_pkt_buff_size_per_tc);
+}
+
+void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
+ tpb_tx_scp_ins_en_msk,
+ tpb_tx_scp_ins_en_shift,
+ tx_path_scp_ins_en);
+}
+
+/* TPO: tx packet offload */
+void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
+ tpo_ipv4chk_en_msk,
+ tpo_ipv4chk_en_shift,
+ ipv4header_crc_offload_en);
+}
+
+void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
+ tpol4chk_en_msk,
+ tpol4chk_en_shift,
+ tcp_udp_crc_offload_en);
+}
+
+void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
+ tpo_pkt_sys_lbk_msk,
+ tpo_pkt_sys_lbk_shift,
+ tx_pkt_sys_lbk_en);
+}
+
+/* TPS: tx packet scheduler */
+void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
+ tps_data_tc_arb_mode_msk,
+ tps_data_tc_arb_mode_shift,
+ tx_pkt_shed_data_arb_mode);
+}
+
+void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
+ tps_desc_rate_ta_rst_msk,
+ tps_desc_rate_ta_rst_shift,
+ curr_time_res);
+}
+
+void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
+ tps_desc_rate_lim_msk,
+ tps_desc_rate_lim_shift,
+ tx_pkt_shed_desc_rate_lim);
+}
+
+void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
+ tps_desc_tc_arb_mode_msk,
+ tps_desc_tc_arb_mode_shift,
+ tx_pkt_shed_desc_tc_arb_mode);
+}
+
+void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_max_credit,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
+ tps_desc_tctcredit_max_msk,
+ tps_desc_tctcredit_max_shift,
+ tx_pkt_shed_desc_tc_max_credit);
+}
+
+void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
+ tps_desc_tctweight_msk,
+ tps_desc_tctweight_shift,
+ tx_pkt_shed_desc_tc_weight);
+}
+
+void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_vm_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
+ tps_desc_vm_arb_mode_msk,
+ tps_desc_vm_arb_mode_shift,
+ tx_pkt_shed_desc_vm_arb_mode);
+}
+
+void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_max_credit,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
+ tps_data_tctcredit_max_msk,
+ tps_data_tctcredit_max_shift,
+ tx_pkt_shed_tc_data_max_credit);
+}
+
+void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight, u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
+ tps_data_tctweight_msk,
+ tps_data_tctweight_shift,
+ tx_pkt_shed_tc_data_weight);
+}
+
+/* tx */
+void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
+ tx_reg_res_dsbl_msk,
+ tx_reg_res_dsbl_shift, tx_reg_res_dis);
+}
+
+/* msm */
+u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
+ msm_reg_access_busy_msk,
+ msm_reg_access_busy_shift);
+}
+
+void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
+ msm_reg_addr_msk,
+ msm_reg_addr_shift,
+ reg_addr_for_indirect_addr);
+}
+
+void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
+ msm_reg_rd_strobe_msk,
+ msm_reg_rd_strobe_shift,
+ reg_rd_strobe);
+}
+
+u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+}
+
+void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+{
+ aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+}
+
+void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
+ msm_reg_wr_strobe_msk,
+ msm_reg_wr_strobe_shift,
+ reg_wr_strobe);
+}
+
+/* pci */
+void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
+ pci_reg_res_dsbl_msk,
+ pci_reg_res_dsbl_shift,
+ pci_reg_res_dis);
+}
+
+void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
+{
+ aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+ glb_cpu_scratch_scp);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
new file mode 100644
index 000000000000..ed1085b95adb
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -0,0 +1,677 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_H
+#define HW_ATL_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* global */
+
+/* set global microprocessor semaphore */
+void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
+
+/* get global microprocessor semaphore */
+u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+
+/* set global register reset disable */
+void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+
+/* set soft reset */
+void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+
+/* get soft reset */
+u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+
+/* stats */
+
+u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter lsw */
+u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter lsw */
+u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter lsw */
+u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter lsw */
+u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter msw */
+u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter msw */
+u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter msw */
+u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter msw */
+u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx errors counter register */
+u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast frames counter register */
+u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx multicast frames counter register */
+u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast frames counter register */
+u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast octets counter register 1 */
+u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast octets counter register 0 */
+u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get rx dma statistics counter 7 */
+u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+
+/* get msm tx errors counter register */
+u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast frames counter register */
+u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast frames counter register */
+u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast frames counter register */
+u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast octets counter register 1 */
+u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast octets counter register 1 */
+u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast octets counter register 0 */
+u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get global mif identification */
+u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+
+/* interrupt */
+
+/* set interrupt auto mask lsw */
+void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+
+/* set interrupt mapping enable rx */
+void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+
+/* set interrupt mapping enable tx */
+void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+
+/* set interrupt mapping rx */
+void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+
+/* set interrupt mapping tx */
+void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+
+/* set interrupt mask clear lsw */
+void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+
+/* set interrupt mask set lsw */
+void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+
+/* set interrupt register reset disable */
+void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+
+/* set interrupt status clear lsw */
+void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
+
+/* get interrupt status lsw */
+u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+
+/* get reset interrupt */
+u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+
+/* set reset interrupt */
+void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+
+/* rdm */
+
+/* set cpu id */
+void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set rx dca enable */
+void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+
+/* set rx dca mode */
+void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+
+/* set rx descriptor data buffer size */
+void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor dca enable */
+void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
+
+/* set rx descriptor enable */
+void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
+
+/* set rx descriptor header splitting */
+void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor);
+
+/* get rx descriptor head pointer */
+u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx descriptor length */
+void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
+
+/* set rx descriptor write-back interrupt enable */
+void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
+
+/* set rx header dca enable */
+void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
+
+/* set rx payload dca enable */
+void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+
+/* set rx descriptor header buffer size */
+void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor reset */
+void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
+
+/* Set RDM Interrupt Moderation Enable */
+void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+
+/* reg */
+
+/* set general interrupt mapping register */
+void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+
+/* get general interrupt status register */
+u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+
+/* set interrupt global control register */
+void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+
+/* set interrupt throttle register */
+void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+
+/* set rx dma descriptor base address lsw */
+void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set rx dma descriptor base address msw */
+void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* get rx dma descriptor status register */
+u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx dma descriptor tail pointer register */
+void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* set rx filter multicast filter mask register */
+void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
+
+/* set rx filter multicast filter register */
+void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
+
+/* set rx filter rss control register 1 */
+void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
+
+/* Set RX Filter Control Register 2 */
+void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+
+/* Set RX Interrupt Moderation Control Register */
+void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue);
+
+/* set tx dma debug control */
+void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+
+/* set tx dma descriptor base address lsw */
+void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set tx dma descriptor base address msw */
+void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* set tx dma descriptor tail pointer register */
+void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* Set TX Interrupt Moderation Control Register */
+void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/* set global microprocessor scratch pad */
+void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp, u32 scratch_scp);
+
+/* rpb */
+
+/* set dma system loopback */
+void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+
+/* set rx traffic class mode */
+void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
+
+/* set rx buffer enable */
+void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+
+/* set rx buffer high threshold (per tc) */
+void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set rx buffer low threshold (per tc) */
+void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set rx flow control mode */
+void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+
+/* set rx packet buffer size (per tc) */
+void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
+
+/* set rx xoff enable (per tc) */
+void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer);
+
+/* rpf */
+
+/* set l2 broadcast count threshold */
+void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
+
+/* set l2 broadcast enable */
+void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+
+/* set l2 broadcast filter action */
+void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
+
+/* set l2 multicast filter enable */
+void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
+ u32 filter);
+
+/* set l2 promiscuous mode enable */
+void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
+
+/* set l2 unicast filter action */
+void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
+ u32 filter);
+
+/* set l2 unicast filter enable */
+void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
+
+/* set l2 unicast destination address lsw */
+void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter);
+
+/* set l2 unicast destination address msw */
+void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter);
+
+/* Set L2 Accept all Multicast packets */
+void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
+
+/* set user-priority tc mapping */
+void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
+
+/* set rss key address */
+void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+
+/* set rss key write data */
+void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+
+/* get rss key write enable */
+u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss key write enable */
+void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+
+/* set rss redirection table address */
+void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
+
+/* set rss redirection table write data */
+void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
+
+/* get rss redirection write enable */
+u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss redirection write enable */
+void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+
+/* set tpo to rpf system loopback */
+void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
+
+/* set vlan inner ethertype */
+void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+
+/* set vlan outer ethertype */
+void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+
+/* set vlan promiscuous mode enable */
+void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+
+/* Set VLAN untagged action */
+void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+
+/* Set VLAN accept untagged packets */
+void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_accept_untagged_packets);
+
+/* Set VLAN filter enable */
+void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+
+/* Set VLAN Filter Action */
+void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
+
+/* Set VLAN ID Filter */
+void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+
+/* set ethertype filter enable */
+void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+
+/* set ethertype user-priority enable */
+void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter);
+
+/* set ethertype rx queue enable */
+void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
+ u32 filter);
+
+/* set ethertype rx queue */
+void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
+
+/* set ethertype user-priority */
+void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
+ u32 filter);
+
+/* set ethertype management queue */
+void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
+
+/* set ethertype filter action */
+void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
+
+/* set ethertype filter */
+void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+
+/* rpo */
+
+/* set ipv4 header checksum offload enable */
+void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set rx descriptor vlan stripping */
+void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
+
+/* set tcp/udp checksum offload enable */
+void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* Set LRO Patch Optimization Enable. */
+void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
+
+/* Set Large Receive Offload Enable */
+void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+
+/* Set LRO Q Sessions Limit */
+void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+
+/* Set LRO Total Descriptor Limit */
+void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+
+/* Set LRO Min Payload of First Packet */
+void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
+
+/* Set LRO Packet Limit */
+void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+
+/* Set LRO Max Number of Descriptors */
+void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
+
+/* Set LRO Time Base Divider */
+void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
+
+/*Set LRO Inactive Interval */
+void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
+
+/*Set LRO Max Coalescing Interval */
+void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coalescing_interval);
+
+/* rx */
+
+/* set rx register reset disable */
+void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+
+/* tdm */
+
+/* set cpu id */
+void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set large send offload enable */
+void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
+
+/* set tx descriptor enable */
+void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+
+/* set tx dca enable */
+void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+
+/* set tx dca mode */
+void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+
+/* set tx descriptor dca enable */
+void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+
+/* get tx descriptor head pointer */
+u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set tx descriptor length */
+void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
+
+/* set tx descriptor write-back interrupt enable */
+void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
+
+/* set tx descriptor write-back threshold */
+void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor);
+
+/* Set TDM Interrupt Moderation Enable */
+void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
+/* thm */
+
+/* set lso tcp flag of first packet */
+void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
+
+/* set lso tcp flag of last packet */
+void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
+
+/* set lso tcp flag of middle packet */
+void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
+
+/* tpb */
+
+/* set tx buffer enable */
+void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+
+/* set tx buffer high threshold (per tc) */
+void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set tx buffer low threshold (per tc) */
+void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set tx dma system loopback enable */
+void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+
+/* set tx packet buffer size (per tc) */
+void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer);
+
+/* set tx path pad insert enable */
+void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+
+/* tpo */
+
+/* set ipv4 header checksum offload enable */
+void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set tcp/udp checksum offload enable */
+void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* set tx pkt system loopback enable */
+void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+
+/* tps */
+
+/* set tx packet scheduler data arbitration mode */
+void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
+
+/* set tx packet scheduler descriptor rate current time reset */
+void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
+
+/* set tx packet scheduler descriptor rate limit */
+void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
+
+/* set tx packet scheduler descriptor tc arbitration mode */
+void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_arb_mode);
+
+/* set tx packet scheduler descriptor tc max credit */
+void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_max_credit,
+ u32 tc);
+
+/* set tx packet scheduler descriptor tc weight */
+void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc);
+
+/* set tx packet scheduler descriptor vm arbitration mode */
+void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_vm_arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_max_credit,
+ u32 tc);
+
+/* set tx packet scheduler tc data weight */
+void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc);
+
+/* tx */
+
+/* set tx register reset disable */
+void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+
+/* msm */
+
+/* get register access status */
+u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+
+/* set register address for indirect address */
+void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
+
+/* set register read strobe */
+void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+
+/* get register read data */
+u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+
+/* set register write data */
+void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+
+/* set register write strobe */
+void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+
+/* pci */
+
+/* set pci register reset disable */
+void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+
+#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
new file mode 100644
index 000000000000..5527fc0e5942
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -0,0 +1,2375 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh_internal.h: Preprocessor definitions
+ * for Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_INTERNAL_H
+#define HW_ATL_LLH_INTERNAL_H
+
+/* global microprocessor semaphore definitions
+ * base address: 0x000003a0
+ * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
+ */
+#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4)
+/* register address for bitfield rx dma good octet counter lsw [1f:0] */
+#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+/* register address for bitfield rx dma good packet counter lsw [1f:0] */
+#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+/* register address for bitfield tx dma good octet counter lsw [1f:0] */
+#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+/* register address for bitfield tx dma good packet counter lsw [1f:0] */
+#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+
+/* register address for bitfield rx dma good octet counter msw [3f:20] */
+#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+/* register address for bitfield rx dma good packet counter msw [3f:20] */
+#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+/* register address for bitfield tx dma good octet counter msw [3f:20] */
+#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+/* register address for bitfield tx dma good packet counter msw [3f:20] */
+#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+
+/* preprocessor definitions for msm rx errors counter register */
+#define mac_msm_rx_errs_cnt_adr 0x00000120u
+
+/* preprocessor definitions for msm rx unicast frames counter register */
+#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+
+/* preprocessor definitions for msm rx multicast frames counter register */
+#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+
+/* preprocessor definitions for msm rx broadcast frames counter register */
+#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 1 */
+#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 2 */
+#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+
+/* preprocessor definitions for msm rx unicast octets counter register 0 */
+#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+
+/* preprocessor definitions for rx dma statistics counter 7 */
+#define rx_dma_stat_counter7_adr 0x00006818u
+
+/* preprocessor definitions for msm tx unicast frames counter register */
+#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+
+/* preprocessor definitions for msm tx multicast frames counter register */
+#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+
+/* preprocessor definitions for global mif identification */
+#define glb_mif_id_adr 0x0000001cu
+
+/* register address for bitfield iamr_lsw[1f:0] */
+#define itr_iamrlsw_adr 0x00002090
+/* register address for bitfield rx dma drop packet counter [1f:0] */
+#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+
+/* register address for bitfield imcr_lsw[1f:0] */
+#define itr_imcrlsw_adr 0x00002070
+/* register address for bitfield imsr_lsw[1f:0] */
+#define itr_imsrlsw_adr 0x00002060
+/* register address for bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_adr 0x00002300
+/* bitmask for bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_msk 0x20000000
+/* lower bit position of bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_shift 29
+/* register address for bitfield iscr_lsw[1f:0] */
+#define itr_iscrlsw_adr 0x00002050
+/* register address for bitfield isr_lsw[1f:0] */
+#define itr_isrlsw_adr 0x00002000
+/* register address for bitfield itr_reset */
+#define itr_res_adr 0x00002300
+/* bitmask for bitfield itr_reset */
+#define itr_res_msk 0x80000000
+/* lower bit position of bitfield itr_reset */
+#define itr_res_shift 31
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_msk 0x000000ff
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_shift 0
+/* register address for bitfield dca_en */
+#define rdm_dca_en_adr 0x00006180
+
+/* rx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_rdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define rdm_dca_en_adr 0x00006180
+/* bitmask for bitfield dca_en */
+#define rdm_dca_en_msk 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define rdm_dca_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define rdm_dca_en_shift 31
+/* width of bitfield dca_en */
+#define rdm_dca_en_width 1
+/* default value of bitfield dca_en */
+#define rdm_dca_en_default 0x1
+
+/* rx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_rdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_adr 0x00006180
+/* bitmask for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_msk 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_mskn 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_shift 0
+/* width of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_width 4
+/* default value of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_default 0x0
+
+/* rx desc{d}_data_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_data_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_msk 0x0000001f
+/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_mskn 0xffffffe0
+/* lower bit position of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_shift 0
+/* width of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_width 5
+/* default value of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_default 0x0
+
+/* rx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_msk 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_shift 31
+/* width of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_width 1
+/* default value of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_default 0x0
+
+/* rx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_en */
+#define rdm_descden_msk 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define rdm_descden_mskn 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define rdm_descden_shift 31
+/* width of bitfield desc{d}_en */
+#define rdm_descden_width 1
+/* default value of bitfield desc{d}_en */
+#define rdm_descden_default 0x0
+
+/* rx desc{d}_hdr_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_hdr_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_msk 0x00001f00
+/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_mskn 0xffffe0ff
+/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_shift 8
+/* width of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_width 5
+/* default value of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_default 0x0
+
+/* rx desc{d}_hdr_split bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_split".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_hdr_split_i[0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_msk 0x10000000
+/* inverted bitmask for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_mskn 0xefffffff
+/* lower bit position of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_shift 28
+/* width of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_width 1
+/* default value of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_default 0x0
+
+/* rx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="rdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_msk 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_mskn 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_shift 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_width 13
+
+/* rx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_msk 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_mskn 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_shift 3
+/* width of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_width 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_default 0x0
+
+/* rx desc{d}_reset bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_reset".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_q_pf_res_i[0]"
+ */
+
+/* register address for bitfield desc{d}_reset */
+#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_reset */
+#define rdm_descdreset_msk 0x02000000
+/* inverted bitmask for bitfield desc{d}_reset */
+#define rdm_descdreset_mskn 0xfdffffff
+/* lower bit position of bitfield desc{d}_reset */
+#define rdm_descdreset_shift 25
+/* width of bitfield desc{d}_reset */
+#define rdm_descdreset_width 1
+/* default value of bitfield desc{d}_reset */
+#define rdm_descdreset_default 0x0
+
+/* rx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_rdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_adr 0x00005a30
+/* bitmask for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_msk 0x00000004
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+/* lower bit position of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_shift 2
+/* width of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_width 1
+/* default value of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_default 0x0
+
+/* rx dca{d}_hdr_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_hdr_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_hdr_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_msk 0x40000000
+/* inverted bitmask for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_mskn 0xbfffffff
+/* lower bit position of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_shift 30
+/* width of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_width 1
+/* default value of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_default 0x0
+
+/* rx dca{d}_pay_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_pay_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_pay_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_msk 0x20000000
+/* inverted bitmask for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_mskn 0xdfffffff
+/* lower bit position of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_shift 29
+/* width of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_width 1
+/* default value of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_default 0x0
+
+/* RX rdm_int_rim_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rdm_int_rim_en".
+ * PORT="pif_rdm_int_rim_en_i"
+ */
+
+/* Register address for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_adr 0x00005A30
+/* Bitmask for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_msk 0x00000008
+/* Inverted bitmask for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_mskn 0xFFFFFFF7
+/* Lower bit position of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_shift 3
+/* Width of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_width 1
+/* Default value of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_default 0x0
+
+/* general interrupt mapping register definitions
+ * preprocessor definitions for general interrupt mapping register
+ * base address: 0x00002180
+ * parameter: regidx {f} | stride size 0x4 | range [0, 3]
+ */
+#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+
+/* general interrupt status register definitions
+ * preprocessor definitions for general interrupt status register
+ * address: 0x000021A0
+ */
+
+#define gen_intr_stat_adr 0x000021A4U
+
+/* interrupt global control register definitions
+ * preprocessor definitions for interrupt global control register
+ * address: 0x00002300
+ */
+#define intr_glb_ctl_adr 0x00002300u
+
+/* interrupt throttle register definitions
+ * preprocessor definitions for interrupt throttle register
+ * base address: 0x00002800
+ * parameter: throttle {t} | stride size 0x4 | range [0, 31]
+ */
+#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+
+/* rx dma descriptor base address lsw definitions
+ * preprocessor definitions for rx dma descriptor base address lsw
+ * base address: 0x00005b00
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+(0x00005b00u + (descriptor) * 0x20)
+
+/* rx dma descriptor base address msw definitions
+ * preprocessor definitions for rx dma descriptor base address msw
+ * base address: 0x00005b04
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+(0x00005b04u + (descriptor) * 0x20)
+
+/* rx dma descriptor status register definitions
+ * preprocessor definitions for rx dma descriptor status register
+ * base address: 0x00005b14
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+
+/* rx dma descriptor tail pointer register definitions
+ * preprocessor definitions for rx dma descriptor tail pointer register
+ * base address: 0x00005b10
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+
+/* rx interrupt moderation control register definitions
+ * Preprocessor definitions for RX Interrupt Moderation Control Register
+ * Base Address: 0x00005A40
+ * Parameter: RIM {R} | stride size 0x4 | range [0, 31]
+ */
+#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+
+/* rx filter multicast filter mask register definitions
+ * preprocessor definitions for rx filter multicast filter mask register
+ * address: 0x00005270
+ */
+#define rx_flr_mcst_flr_msk_adr 0x00005270u
+
+/* rx filter multicast filter register definitions
+ * preprocessor definitions for rx filter multicast filter register
+ * base address: 0x00005250
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ */
+#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+
+/* RX Filter RSS Control Register 1 Definitions
+ * Preprocessor definitions for RX Filter RSS Control Register 1
+ * Address: 0x000054C0
+ */
+#define rx_flr_rss_control1_adr 0x000054C0u
+
+/* RX Filter Control Register 2 Definitions
+ * Preprocessor definitions for RX Filter Control Register 2
+ * Address: 0x00005104
+ */
+#define rx_flr_control2_adr 0x00005104u
+
+/* tx tx dma debug control [1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
+ * port="pif_tdm_debug_cntl_i[31:0]"
+ */
+
+/* register address for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_adr 0x00008920
+/* bitmask for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+/* inverted bitmask for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+/* lower bit position of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_shift 0
+/* width of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_width 32
+/* default value of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_default 0x0
+
+/* tx dma descriptor base address lsw definitions
+ * preprocessor definitions for tx dma descriptor base address lsw
+ * base address: 0x00007c00
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+ (0x00007c00u + (descriptor) * 0x40)
+
+/* tx dma descriptor tail pointer register definitions
+ * preprocessor definitions for tx dma descriptor tail pointer register
+ * base address: 0x00007c10
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+
+/* rx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_rpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_adr 0x00005000
+/* bitmask for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_msk 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_mskn 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_shift 6
+/* width of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_width 1
+/* default value of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_default 0x0
+
+/* rx rx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "rx_tc_mode".
+ * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
+ */
+
+/* register address for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_adr 0x00005700
+/* bitmask for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_msk 0x00000100
+/* inverted bitmask for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+/* lower bit position of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_shift 8
+/* width of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_width 1
+/* default value of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_default 0x0
+
+/* rx rx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx_buf_en".
+ * port="pif_rpb_rx_buf_en_i"
+ */
+
+/* register address for bitfield rx_buf_en */
+#define rpb_rx_buf_en_adr 0x00005700
+/* bitmask for bitfield rx_buf_en */
+#define rpb_rx_buf_en_msk 0x00000001
+/* inverted bitmask for bitfield rx_buf_en */
+#define rpb_rx_buf_en_mskn 0xfffffffe
+/* lower bit position of bitfield rx_buf_en */
+#define rpb_rx_buf_en_shift 0
+/* width of bitfield rx_buf_en */
+#define rpb_rx_buf_en_width 1
+/* default value of bitfield rx_buf_en */
+#define rpb_rx_buf_en_default 0x0
+
+/* rx rx{b}_hi_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_hi_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_msk 0x3fff0000
+/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_mskn 0xc000ffff
+/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_shift 16
+/* width of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_width 14
+/* default value of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_default 0x0
+
+/* rx rx{b}_lo_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_lo_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_msk 0x00003fff
+/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_mskn 0xffffc000
+/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_shift 0
+/* width of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_width 14
+/* default value of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_default 0x0
+
+/* rx rx_fc_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
+ * port="pif_rpb_rx_fc_mode_i[1:0]"
+ */
+
+/* register address for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_adr 0x00005700
+/* bitmask for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_msk 0x00000030
+/* inverted bitmask for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_mskn 0xffffffcf
+/* lower bit position of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_shift 4
+/* width of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_width 2
+/* default value of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_default 0x0
+
+/* rx rx{b}_buf_size[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_buf_size_i[8:0]"
+ */
+
+/* register address for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_msk 0x000001ff
+/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_mskn 0xfffffe00
+/* lower bit position of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_shift 0
+/* width of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_width 9
+/* default value of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_default 0x0
+
+/* rx rx{b}_xoff_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_xoff_en".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx_xoff_en_i[0]"
+ */
+
+/* register address for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_msk 0x80000000
+/* inverted bitmask for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_mskn 0x7fffffff
+/* lower bit position of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_shift 31
+/* width of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_width 1
+/* default value of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_default 0x0
+
+/* rx l2_bc_thresh[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
+ * port="pif_rpf_l2_bc_thresh_i[15:0]"
+ */
+
+/* register address for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_adr 0x00005100
+/* bitmask for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_msk 0xffff0000
+/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_mskn 0x0000ffff
+/* lower bit position of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_shift 16
+/* width of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_width 16
+/* default value of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_default 0x0
+
+/* rx l2_bc_en bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_en".
+ * port="pif_rpf_l2_bc_en_i"
+ */
+
+/* register address for bitfield l2_bc_en */
+#define rpfl2bc_en_adr 0x00005100
+/* bitmask for bitfield l2_bc_en */
+#define rpfl2bc_en_msk 0x00000001
+/* inverted bitmask for bitfield l2_bc_en */
+#define rpfl2bc_en_mskn 0xfffffffe
+/* lower bit position of bitfield l2_bc_en */
+#define rpfl2bc_en_shift 0
+/* width of bitfield l2_bc_en */
+#define rpfl2bc_en_width 1
+/* default value of bitfield l2_bc_en */
+#define rpfl2bc_en_default 0x0
+
+/* rx l2_bc_act[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_act[2:0]".
+ * port="pif_rpf_l2_bc_act_i[2:0]"
+ */
+
+/* register address for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_adr 0x00005100
+/* bitmask for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_msk 0x00007000
+/* inverted bitmask for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_mskn 0xffff8fff
+/* lower bit position of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_shift 12
+/* width of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_width 3
+/* default value of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_default 0x0
+
+/* rx l2_mc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_mc_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ * port="pif_rpf_l2_mc_en_i[0]"
+ */
+
+/* register address for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+/* bitmask for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_msk 0x80000000
+/* inverted bitmask for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_mskn 0x7fffffff
+/* lower bit position of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_shift 31
+/* width of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_width 1
+/* default value of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_default 0x0
+
+/* rx l2_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "l2_promis_mode".
+ * port="pif_rpf_l2_promis_mode_i"
+ */
+
+/* register address for bitfield l2_promis_mode */
+#define rpfl2promis_mode_adr 0x00005100
+/* bitmask for bitfield l2_promis_mode */
+#define rpfl2promis_mode_msk 0x00000008
+/* inverted bitmask for bitfield l2_promis_mode */
+#define rpfl2promis_mode_mskn 0xfffffff7
+/* lower bit position of bitfield l2_promis_mode */
+#define rpfl2promis_mode_shift 3
+/* width of bitfield l2_promis_mode */
+#define rpfl2promis_mode_width 1
+/* default value of bitfield l2_promis_mode */
+#define rpfl2promis_mode_default 0x0
+
+/* rx l2_uc_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_act0_i[2:0]"
+ */
+
+/* register address for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_msk 0x00070000
+/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_mskn 0xfff8ffff
+/* lower bit position of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_shift 16
+/* width of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_width 3
+/* default value of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_default 0x0
+
+/* rx l2_uc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_en{f}".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_en_i[0]"
+ */
+
+/* register address for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_msk 0x80000000
+/* inverted bitmask for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_mskn 0x7fffffff
+/* lower bit position of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_shift 31
+/* width of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_width 1
+/* default value of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_default 0x0
+
+/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
+#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+/* register address for bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_msk 0x0000ffff
+/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_shift 0
+
+/* rx l2_mc_accept_all bitfield definitions
+ * Preprocessor definitions for the bitfield "l2_mc_accept_all".
+ * PORT="pif_rpf_l2_mc_all_accept_i"
+ */
+
+/* Register address for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_adr 0x00005270
+/* Bitmask for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_msk 0x00004000
+/* Inverted bitmask for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+/* Lower bit position of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_shift 14
+/* Width of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_width 1
+/* Default value of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_default 0x0
+
+/* width of bitfield rx_tc_up{t}[2:0] */
+#define rpf_rpb_rx_tc_upt_width 3
+/* default value of bitfield rx_tc_up{t}[2:0] */
+#define rpf_rpb_rx_tc_upt_default 0x0
+
+/* rx rss_key_addr[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_addr[4:0]".
+ * port="pif_rpf_rss_key_addr_i[4:0]"
+ */
+
+/* register address for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_adr 0x000054d0
+/* bitmask for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_msk 0x0000001f
+/* inverted bitmask for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_mskn 0xffffffe0
+/* lower bit position of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_shift 0
+/* width of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_width 5
+/* default value of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_default 0x0
+
+/* rx rss_key_wr_data[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
+ * port="pif_rpf_rss_key_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_adr 0x000054d4
+/* bitmask for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_msk 0xffffffff
+/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_mskn 0x00000000
+/* lower bit position of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_shift 0
+/* width of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_width 32
+/* default value of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_default 0x0
+
+/* rx rss_key_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_en_i".
+ * port="pif_rpf_rss_key_wr_en_i"
+ */
+
+/* register address for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_adr 0x000054d0
+/* bitmask for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_msk 0x00000020
+/* inverted bitmask for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+/* lower bit position of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_shift 5
+/* width of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_width 1
+/* default value of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_default 0x0
+
+/* rx rss_redir_addr[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
+ * port="pif_rpf_rss_redir_addr_i[3:0]"
+ */
+
+/* register address for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_adr 0x000054e0
+/* bitmask for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_msk 0x0000000f
+/* inverted bitmask for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_mskn 0xfffffff0
+/* lower bit position of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_shift 0
+/* width of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_width 4
+/* default value of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_default 0x0
+
+/* rx rss_redir_wr_data[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
+ * port="pif_rpf_rss_redir_wr_data_i[15:0]"
+ */
+
+/* register address for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_adr 0x000054e4
+/* bitmask for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_msk 0x0000ffff
+/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_mskn 0xffff0000
+/* lower bit position of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_shift 0
+/* width of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_width 16
+/* default value of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_default 0x0
+
+/* rx rss_redir_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_en_i".
+ * port="pif_rpf_rss_redir_wr_en_i"
+ */
+
+/* register address for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_adr 0x000054e0
+/* bitmask for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_msk 0x00000010
+/* inverted bitmask for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+/* lower bit position of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_shift 4
+/* width of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_width 1
+/* default value of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_default 0x0
+
+/* rx tpo_rpf_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
+ * port="pif_rpf_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+/* bitmask for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+/* lower bit position of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_shift 8
+/* width of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_width 1
+/* default value of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_default 0x0
+
+/* rx vl_inner_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
+ * port="pif_rpf_vl_inner_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_adr 0x00005284
+/* bitmask for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_msk 0x0000ffff
+/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_mskn 0xffff0000
+/* lower bit position of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_shift 0
+/* width of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_width 16
+/* default value of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_default 0x8100
+
+/* rx vl_outer_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
+ * port="pif_rpf_vl_outer_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_adr 0x00005284
+/* bitmask for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_msk 0xffff0000
+/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_mskn 0x0000ffff
+/* lower bit position of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_shift 16
+/* width of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_width 16
+/* default value of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_default 0x88a8
+
+/* rx vl_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "vl_promis_mode".
+ * port="pif_rpf_vl_promis_mode_i"
+ */
+
+/* register address for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_adr 0x00005280
+/* bitmask for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_msk 0x00000002
+/* inverted bitmask for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_mskn 0xfffffffd
+/* lower bit position of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_shift 1
+/* width of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_width 1
+/* default value of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_default 0x0
+
+/* RX vl_accept_untagged_mode Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
+ * PORT="pif_rpf_vl_accept_untagged_i"
+ */
+
+/* Register address for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_adr 0x00005280
+/* Bitmask for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_msk 0x00000004
+/* Inverted bitmask for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+/* Lower bit position of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_shift 2
+/* Width of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_width 1
+/* Default value of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_default 0x0
+
+/* rX vl_untagged_act[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
+ * PORT="pif_rpf_vl_untagged_act_i[2:0]"
+ */
+
+/* Register address for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_adr 0x00005280
+/* Bitmask for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_msk 0x00000038
+/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+/* Lower bit position of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_shift 3
+/* Width of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_width 3
+/* Default value of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_default 0x0
+
+/* RX vl_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_en_i[0]"
+ */
+
+/* Register address for bitfield vl_en{F} */
+#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_en{F} */
+#define rpf_vl_en_f_msk 0x80000000
+/* Inverted bitmask for bitfield vl_en{F} */
+#define rpf_vl_en_f_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield vl_en{F} */
+#define rpf_vl_en_f_shift 31
+/* Width of bitfield vl_en{F} */
+#define rpf_vl_en_f_width 1
+/* Default value of bitfield vl_en{F} */
+#define rpf_vl_en_f_default 0x0
+
+/* RX vl_act{F}[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_act0_i[2:0]"
+ */
+
+/* Register address for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_msk 0x00070000
+/* Inverted bitmask for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_mskn 0xFFF8FFFF
+/* Lower bit position of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_shift 16
+/* Width of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_width 3
+/* Default value of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_default 0x0
+
+/* RX vl_id{F}[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_id0_i[11:0]"
+ */
+
+/* Register address for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_msk 0x00000FFF
+/* Inverted bitmask for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_mskn 0xFFFFF000
+/* Lower bit position of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_shift 0
+/* Width of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_width 12
+/* Default value of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_default 0x0
+
+/* RX et_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "et_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_et_en_i[0]"
+ */
+
+/* Register address for bitfield et_en{F} */
+#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+/* Bitmask for bitfield et_en{F} */
+#define rpf_et_en_f_msk 0x80000000
+/* Inverted bitmask for bitfield et_en{F} */
+#define rpf_et_en_f_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield et_en{F} */
+#define rpf_et_en_f_shift 31
+/* Width of bitfield et_en{F} */
+#define rpf_et_en_f_width 1
+/* Default value of bitfield et_en{F} */
+#define rpf_et_en_f_default 0x0
+
+/* rx et_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_en_i[0]"
+ */
+
+/* register address for bitfield et_en{f} */
+#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_en{f} */
+#define rpf_et_enf_msk 0x80000000
+/* inverted bitmask for bitfield et_en{f} */
+#define rpf_et_enf_mskn 0x7fffffff
+/* lower bit position of bitfield et_en{f} */
+#define rpf_et_enf_shift 31
+/* width of bitfield et_en{f} */
+#define rpf_et_enf_width 1
+/* default value of bitfield et_en{f} */
+#define rpf_et_enf_default 0x0
+
+/* rx et_up{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up_en_i[0]"
+ */
+
+/* register address for bitfield et_up{f}_en */
+#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}_en */
+#define rpf_et_upfen_msk 0x40000000
+/* inverted bitmask for bitfield et_up{f}_en */
+#define rpf_et_upfen_mskn 0xbfffffff
+/* lower bit position of bitfield et_up{f}_en */
+#define rpf_et_upfen_shift 30
+/* width of bitfield et_up{f}_en */
+#define rpf_et_upfen_width 1
+/* default value of bitfield et_up{f}_en */
+#define rpf_et_upfen_default 0x0
+
+/* rx et_rxq{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq_en_i[0]"
+ */
+
+/* register address for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_msk 0x20000000
+/* inverted bitmask for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_mskn 0xdfffffff
+/* lower bit position of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_shift 29
+/* width of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_width 1
+/* default value of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_default 0x0
+
+/* rx et_up{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up0_i[2:0]"
+ */
+
+/* register address for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_msk 0x1c000000
+/* inverted bitmask for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_mskn 0xe3ffffff
+/* lower bit position of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_shift 26
+/* width of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_width 3
+/* default value of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_default 0x0
+
+/* rx et_rxq{f}[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq0_i[4:0]"
+ */
+
+/* register address for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_msk 0x01f00000
+/* inverted bitmask for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_mskn 0xfe0fffff
+/* lower bit position of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_shift 20
+/* width of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_width 5
+/* default value of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_default 0x0
+
+/* rx et_mng_rxq{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_mng_rxq{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_mng_rxq_i[0]"
+ */
+
+/* register address for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_msk 0x00080000
+/* inverted bitmask for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+/* lower bit position of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_shift 19
+/* width of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_width 1
+/* default value of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_default 0x0
+
+/* rx et_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_act0_i[2:0]"
+ */
+
+/* register address for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_msk 0x00070000
+/* inverted bitmask for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_mskn 0xfff8ffff
+/* lower bit position of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_shift 16
+/* width of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_width 3
+/* default value of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_default 0x0
+
+/* rx et_val{f}[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_val{f}[f:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_val0_i[15:0]"
+ */
+
+/* register address for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_msk 0x0000ffff
+/* inverted bitmask for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_mskn 0xffff0000
+/* lower bit position of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_shift 0
+/* width of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_width 16
+/* default value of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_default 0x0
+
+/* rx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_rpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_adr 0x00005580
+/* bitmask for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_msk 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_mskn 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_shift 1
+/* width of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_width 1
+/* default value of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_default 0x0
+
+/* rx desc{d}_vl_strip bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_vl_strip".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rpo_desc_vl_strip_i[0]"
+ */
+
+/* register address for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_msk 0x20000000
+/* inverted bitmask for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_mskn 0xdfffffff
+/* lower bit position of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_shift 29
+/* width of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_width 1
+/* default value of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_default 0x0
+
+/* rx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_rpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define rpol4chk_en_adr 0x00005580
+/* bitmask for bitfield l4_chk_en */
+#define rpol4chk_en_msk 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define rpol4chk_en_mskn 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define rpol4chk_en_shift 0
+/* width of bitfield l4_chk_en */
+#define rpol4chk_en_width 1
+/* default value of bitfield l4_chk_en */
+#define rpol4chk_en_default 0x0
+
+/* rx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_rx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_adr 0x00005000
+/* bitmask for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_default 0x1
+
+/* tx dca{d}_cpuid[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_msk 0x000000ff
+/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_mskn 0xffffff00
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_shift 0
+/* width of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_width 8
+/* default value of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_default 0x0
+
+/* tx lso_en[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_en[1f:0]".
+ * port="pif_tdm_lso_en_i[31:0]"
+ */
+
+/* register address for bitfield lso_en[1f:0] */
+#define tdm_lso_en_adr 0x00007810
+/* bitmask for bitfield lso_en[1f:0] */
+#define tdm_lso_en_msk 0xffffffff
+/* inverted bitmask for bitfield lso_en[1f:0] */
+#define tdm_lso_en_mskn 0x00000000
+/* lower bit position of bitfield lso_en[1f:0] */
+#define tdm_lso_en_shift 0
+/* width of bitfield lso_en[1f:0] */
+#define tdm_lso_en_width 32
+/* default value of bitfield lso_en[1f:0] */
+#define tdm_lso_en_default 0x0
+
+/* tx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_tdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define tdm_dca_en_adr 0x00008480
+/* bitmask for bitfield dca_en */
+#define tdm_dca_en_msk 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define tdm_dca_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define tdm_dca_en_shift 31
+/* width of bitfield dca_en */
+#define tdm_dca_en_width 1
+/* default value of bitfield dca_en */
+#define tdm_dca_en_default 0x1
+
+/* tx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_tdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_adr 0x00008480
+/* bitmask for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_msk 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_mskn 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_shift 0
+/* width of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_width 4
+/* default value of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_default 0x0
+
+/* tx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_msk 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_shift 31
+/* width of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_width 1
+/* default value of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_default 0x0
+
+/* tx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_en */
+#define tdm_descden_msk 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define tdm_descden_mskn 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define tdm_descden_shift 31
+/* width of bitfield desc{d}_en */
+#define tdm_descden_width 1
+/* default value of bitfield desc{d}_en */
+#define tdm_descden_default 0x0
+
+/* tx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_msk 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_mskn 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_shift 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_width 13
+
+/* tx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_msk 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_mskn 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_shift 3
+/* width of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_width 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_default 0x0
+
+/* tx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_tdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_adr 0x00007b40
+/* bitmask for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_msk 0x00000002
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+/* lower bit position of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_shift 1
+/* width of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_width 1
+/* default value of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_default 0x0
+
+/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* register address for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_msk 0x00007f00
+/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_mskn 0xffff80ff
+/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_shift 8
+/* width of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_width 7
+/* default value of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_default 0x0
+
+/* tx lso_tcp_flag_first[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
+ * port="pif_thm_lso_tcp_flag_first_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_adr 0x00007820
+/* bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_msk 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_mskn 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_shift 0
+/* width of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_width 12
+/* default value of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_default 0x0
+
+/* tx lso_tcp_flag_last[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
+ * port="pif_thm_lso_tcp_flag_last_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_adr 0x00007824
+/* bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_msk 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_mskn 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_shift 0
+/* width of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_width 12
+/* default value of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_default 0x0
+
+/* tx lso_tcp_flag_mid[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
+ * port="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+
+/* Register address for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_adr 0x00005598
+/* Bitmask for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_mskn 0x00000000
+/* Lower bit position of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_shift 0
+/* Width of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_width 32
+/* Default value of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_default 0x0
+
+/* RX lro_en[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_en[1F:0]".
+ * PORT="pif_rpo_lro_en_i[31:0]"
+ */
+
+/* Register address for bitfield lro_en[1F:0] */
+#define rpo_lro_en_adr 0x00005590
+/* Bitmask for bitfield lro_en[1F:0] */
+#define rpo_lro_en_msk 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_en[1F:0] */
+#define rpo_lro_en_mskn 0x00000000
+/* Lower bit position of bitfield lro_en[1F:0] */
+#define rpo_lro_en_shift 0
+/* Width of bitfield lro_en[1F:0] */
+#define rpo_lro_en_width 32
+/* Default value of bitfield lro_en[1F:0] */
+#define rpo_lro_en_default 0x0
+
+/* RX lro_ptopt_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ptopt_en".
+ * PORT="pif_rpo_lro_ptopt_en_i"
+ */
+
+/* Register address for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_adr 0x00005594
+/* Bitmask for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_msk 0x00008000
+/* Inverted bitmask for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+/* Lower bit position of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_shift 15
+/* Width of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_width 1
+/* Default value of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_defalt 0x1
+
+/* RX lro_q_ses_lmt Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_q_ses_lmt".
+ * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_adr 0x00005594
+/* Bitmask for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_msk 0x00003000
+/* Inverted bitmask for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+/* Lower bit position of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_shift 12
+/* Width of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_width 2
+/* Default value of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_default 0x1
+
+/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
+ * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_shift 5
+/* Width of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_width 2
+/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_defalt 0x1
+
+/* RX lro_pkt_min[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
+ * PORT="pif_rpo_lro_pkt_min_i[4:0]"
+ */
+
+/* Register address for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_adr 0x00005594
+/* Bitmask for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_msk 0x0000001F
+/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+/* Lower bit position of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_shift 0
+/* Width of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_width 5
+/* Default value of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_default 0x8
+
+/* Width of bitfield lro{L}_des_max[1:0] */
+#define rpo_lro_ldes_max_width 2
+/* Default value of bitfield lro{L}_des_max[1:0] */
+#define rpo_lro_ldes_max_default 0x0
+
+/* RX lro_tb_div[11:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
+ * PORT="pif_rpo_lro_tb_div_i[11:0]"
+ */
+
+/* Register address for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_adr 0x00005620
+/* Bitmask for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_msk 0xFFF00000
+/* Inverted bitmask for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_mskn 0x000FFFFF
+/* Lower bit position of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_shift 20
+/* Width of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_width 12
+/* Default value of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_default 0xC35
+
+/* RX lro_ina_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
+ * PORT="pif_rpo_lro_ina_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_adr 0x00005620
+/* Bitmask for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_msk 0x000FFC00
+/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_mskn 0xFFF003FF
+/* Lower bit position of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_shift 10
+/* Width of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_width 10
+/* Default value of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_default 0xA
+
+/* RX lro_max_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
+ * PORT="pif_rpo_lro_max_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_adr 0x00005620
+/* Bitmask for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_msk 0x000003FF
+/* Inverted bitmask for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_mskn 0xFFFFFC00
+/* Lower bit position of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_shift 0
+/* Width of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_width 10
+/* Default value of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_default 0x19
+
+/* TX dca{D}_cpuid[7:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* Register address for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_msk 0x000000FF
+/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_shift 0
+/* Width of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_width 8
+/* Default value of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_default 0x0
+
+/* TX dca{D}_desc_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_desc_en".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* Register address for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_msk 0x80000000
+/* Inverted bitmask for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_shift 31
+/* Width of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_width 1
+/* Default value of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_default 0x0
+
+/* TX desc{D}_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_en".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc_en_i[0]"
+ */
+
+/* Register address for bitfield desc{D}_en */
+#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_en */
+#define tdm_desc_den_msk 0x80000000
+/* Inverted bitmask for bitfield desc{D}_en */
+#define tdm_desc_den_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield desc{D}_en */
+#define tdm_desc_den_shift 31
+/* Width of bitfield desc{D}_en */
+#define tdm_desc_den_width 1
+/* Default value of bitfield desc{D}_en */
+#define tdm_desc_den_default 0x0
+
+/* TX desc{D}_hd[C:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* Register address for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_msk 0x00001FFF
+/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_mskn 0xFFFFE000
+/* Lower bit position of bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_shift 0
+/* Width of bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_width 13
+
+/* TX desc{D}_len[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* Register address for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_msk 0x00001FF8
+/* Inverted bitmask for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_mskn 0xFFFFE007
+/* Lower bit position of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_shift 3
+/* Width of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_width 10
+/* Default value of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_default 0x0
+
+/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_adr(descriptor) \
+ (0x00007C18 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_msk 0x00007F00
+/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_shift 8
+/* Width of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_width 7
+/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_default 0x0
+
+/* TX tdm_int_mod_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "tdm_int_mod_en".
+ * PORT="pif_tdm_int_mod_en_i"
+ */
+
+/* Register address for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_adr 0x00007B40
+/* Bitmask for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_msk 0x00000010
+/* Inverted bitmask for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_mskn 0xFFFFFFEF
+/* Lower bit position of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_shift 4
+/* Width of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_width 1
+/* Default value of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_default 0x0
+
+/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
+ * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+/* register address for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_adr 0x00007820
+/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_shift 16
+/* width of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_width 12
+/* default value of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_default 0x0
+
+/* tx tx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buf_en".
+ * port="pif_tpb_tx_buf_en_i"
+ */
+
+/* register address for bitfield tx_buf_en */
+#define tpb_tx_buf_en_adr 0x00007900
+/* bitmask for bitfield tx_buf_en */
+#define tpb_tx_buf_en_msk 0x00000001
+/* inverted bitmask for bitfield tx_buf_en */
+#define tpb_tx_buf_en_mskn 0xfffffffe
+/* lower bit position of bitfield tx_buf_en */
+#define tpb_tx_buf_en_shift 0
+/* width of bitfield tx_buf_en */
+#define tpb_tx_buf_en_width 1
+/* default value of bitfield tx_buf_en */
+#define tpb_tx_buf_en_default 0x0
+
+/* tx tx{b}_hi_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_hi_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_msk 0x1fff0000
+/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_mskn 0xe000ffff
+/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_shift 16
+/* width of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_width 13
+/* default value of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_default 0x0
+
+/* tx tx{b}_lo_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_lo_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_msk 0x00001fff
+/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_mskn 0xffffe000
+/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_shift 0
+/* width of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_width 13
+/* default value of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_default 0x0
+
+/* tx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_tpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_adr 0x00007000
+/* bitmask for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_msk 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_mskn 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_shift 6
+/* width of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_width 1
+/* default value of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_default 0x0
+
+/* tx tx{b}_buf_size[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_buf_size_i[7:0]"
+ */
+
+/* register address for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_msk 0x000000ff
+/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_mskn 0xffffff00
+/* lower bit position of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_shift 0
+/* width of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_width 8
+/* default value of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_default 0x0
+
+/* tx tx_scp_ins_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_scp_ins_en".
+ * port="pif_tpb_scp_ins_en_i"
+ */
+
+/* register address for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_adr 0x00007900
+/* bitmask for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_msk 0x00000004
+/* inverted bitmask for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+/* lower bit position of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_shift 2
+/* width of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_width 1
+/* default value of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_default 0x0
+
+/* tx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_tpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_adr 0x00007800
+/* bitmask for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_msk 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_mskn 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_shift 1
+/* width of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_width 1
+/* default value of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_default 0x0
+
+/* tx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_tpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define tpol4chk_en_adr 0x00007800
+/* bitmask for bitfield l4_chk_en */
+#define tpol4chk_en_msk 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define tpol4chk_en_mskn 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define tpol4chk_en_shift 0
+/* width of bitfield l4_chk_en */
+#define tpol4chk_en_width 1
+/* default value of bitfield l4_chk_en */
+#define tpol4chk_en_default 0x0
+
+/* tx pkt_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "pkt_sys_loopback".
+ * port="pif_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_adr 0x00007000
+/* bitmask for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_msk 0x00000080
+/* inverted bitmask for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+/* lower bit position of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_shift 7
+/* width of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_width 1
+/* default value of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_default 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_adr 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_msk 0x00000001
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_mskn 0xfffffffe
+/* lower bit position of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_shift 0
+/* width of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_width 1
+/* default value of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_default 0x0
+
+/* tx desc_rate_ta_rst bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_ta_rst".
+ * port="pif_tps_desc_rate_ta_rst_i"
+ */
+
+/* register address for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_adr 0x00007310
+/* bitmask for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_msk 0x80000000
+/* inverted bitmask for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+/* lower bit position of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_shift 31
+/* width of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_width 1
+/* default value of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_default 0x0
+
+/* tx desc_rate_limit[a:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
+ * port="pif_tps_desc_rate_lim_i[10:0]"
+ */
+
+/* register address for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_adr 0x00007310
+/* bitmask for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_msk 0x000007ff
+/* inverted bitmask for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_mskn 0xfffff800
+/* lower bit position of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_shift 0
+/* width of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_width 11
+/* default value of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_default 0x0
+
+/* tx desc_tc_arb_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
+ * port="pif_tps_desc_tc_arb_mode_i[1:0]"
+ */
+
+/* register address for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_adr 0x00007200
+/* bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_msk 0x00000003
+/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_shift 0
+/* width of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_width 2
+/* default value of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_default 0x0
+
+/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_msk 0x0fff0000
+/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_mskn 0xf000ffff
+/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_shift 16
+/* width of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_width 12
+/* default value of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_default 0x0
+
+/* tx desc_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_msk 0x000001ff
+/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_mskn 0xfffffe00
+/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_shift 0
+/* width of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_width 9
+/* default value of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_default 0x0
+
+/* tx desc_vm_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "desc_vm_arb_mode".
+ * port="pif_tps_desc_vm_arb_mode_i"
+ */
+
+/* register address for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_adr 0x00007300
+/* bitmask for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_msk 0x00000001
+/* inverted bitmask for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+/* lower bit position of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_shift 0
+/* width of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_width 1
+/* default value of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_default 0x0
+
+/* tx data_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_msk 0x0fff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_mskn 0xf000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_shift 16
+/* width of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_width 12
+/* default value of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_default 0x0
+
+/* tx data_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_msk 0x000001ff
+/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_mskn 0xfffffe00
+/* lower bit position of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_shift 0
+/* width of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_width 9
+/* default value of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_default 0x0
+
+/* tx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_tx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_adr 0x00007000
+/* bitmask for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_default 0x1
+
+/* mac_phy register access busy bitfield definitions
+ * preprocessor definitions for the bitfield "register access busy".
+ * port="msm_pif_reg_busy_o"
+ */
+
+/* register address for bitfield register access busy */
+#define msm_reg_access_busy_adr 0x00004400
+/* bitmask for bitfield register access busy */
+#define msm_reg_access_busy_msk 0x00001000
+/* inverted bitmask for bitfield register access busy */
+#define msm_reg_access_busy_mskn 0xffffefff
+/* lower bit position of bitfield register access busy */
+#define msm_reg_access_busy_shift 12
+/* width of bitfield register access busy */
+#define msm_reg_access_busy_width 1
+
+/* mac_phy msm register address[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register address[7:0]".
+ * port="pif_msm_reg_addr_i[7:0]"
+ */
+
+/* register address for bitfield msm register address[7:0] */
+#define msm_reg_addr_adr 0x00004400
+/* bitmask for bitfield msm register address[7:0] */
+#define msm_reg_addr_msk 0x000000ff
+/* inverted bitmask for bitfield msm register address[7:0] */
+#define msm_reg_addr_mskn 0xffffff00
+/* lower bit position of bitfield msm register address[7:0] */
+#define msm_reg_addr_shift 0
+/* width of bitfield msm register address[7:0] */
+#define msm_reg_addr_width 8
+/* default value of bitfield msm register address[7:0] */
+#define msm_reg_addr_default 0x0
+
+/* mac_phy register read strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register read strobe".
+ * port="pif_msm_reg_rden_i"
+ */
+
+/* register address for bitfield register read strobe */
+#define msm_reg_rd_strobe_adr 0x00004400
+/* bitmask for bitfield register read strobe */
+#define msm_reg_rd_strobe_msk 0x00000200
+/* inverted bitmask for bitfield register read strobe */
+#define msm_reg_rd_strobe_mskn 0xfffffdff
+/* lower bit position of bitfield register read strobe */
+#define msm_reg_rd_strobe_shift 9
+/* width of bitfield register read strobe */
+#define msm_reg_rd_strobe_width 1
+/* default value of bitfield register read strobe */
+#define msm_reg_rd_strobe_default 0x0
+
+/* mac_phy msm register read data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register read data[31:0]".
+ * port="msm_pif_reg_rd_data_o[31:0]"
+ */
+
+/* register address for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_adr 0x00004408
+/* bitmask for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_msk 0xffffffff
+/* inverted bitmask for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_mskn 0x00000000
+/* lower bit position of bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_shift 0
+/* width of bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_width 32
+
+/* mac_phy msm register write data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register write data[31:0]".
+ * port="pif_msm_reg_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_adr 0x00004404
+/* bitmask for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_msk 0xffffffff
+/* inverted bitmask for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_mskn 0x00000000
+/* lower bit position of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_shift 0
+/* width of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_width 32
+/* default value of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_default 0x0
+
+/* mac_phy register write strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register write strobe".
+ * port="pif_msm_reg_wren_i"
+ */
+
+/* register address for bitfield register write strobe */
+#define msm_reg_wr_strobe_adr 0x00004400
+/* bitmask for bitfield register write strobe */
+#define msm_reg_wr_strobe_msk 0x00000100
+/* inverted bitmask for bitfield register write strobe */
+#define msm_reg_wr_strobe_mskn 0xfffffeff
+/* lower bit position of bitfield register write strobe */
+#define msm_reg_wr_strobe_shift 8
+/* width of bitfield register write strobe */
+#define msm_reg_wr_strobe_width 1
+/* default value of bitfield register write strobe */
+#define msm_reg_wr_strobe_default 0x0
+
+/* mif soft reset bitfield definitions
+ * preprocessor definitions for the bitfield "soft reset".
+ * port="pif_glb_res_i"
+ */
+
+/* register address for bitfield soft reset */
+#define glb_soft_res_adr 0x00000000
+/* bitmask for bitfield soft reset */
+#define glb_soft_res_msk 0x00008000
+/* inverted bitmask for bitfield soft reset */
+#define glb_soft_res_mskn 0xffff7fff
+/* lower bit position of bitfield soft reset */
+#define glb_soft_res_shift 15
+/* width of bitfield soft reset */
+#define glb_soft_res_width 1
+/* default value of bitfield soft reset */
+#define glb_soft_res_default 0x0
+
+/* mif register reset disable bitfield definitions
+ * preprocessor definitions for the bitfield "register reset disable".
+ * port="pif_glb_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield register reset disable */
+#define glb_reg_res_dis_adr 0x00000000
+/* bitmask for bitfield register reset disable */
+#define glb_reg_res_dis_msk 0x00004000
+/* inverted bitmask for bitfield register reset disable */
+#define glb_reg_res_dis_mskn 0xffffbfff
+/* lower bit position of bitfield register reset disable */
+#define glb_reg_res_dis_shift 14
+/* width of bitfield register reset disable */
+#define glb_reg_res_dis_width 1
+/* default value of bitfield register reset disable */
+#define glb_reg_res_dis_default 0x1
+
+/* tx dma debug control definitions */
+#define tx_dma_debug_ctl_adr 0x00008920u
+
+/* tx dma descriptor base address msw definitions */
+#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+ (0x00007c04u + (descriptor) * 0x40)
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00008980
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+
+/* pcie reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_pci_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_adr 0x00001000
+/* bitmask for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_default 0x1
+
+/* global microprocessor scratch pad definitions */
+#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+
+#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
new file mode 100644
index 000000000000..8d6d8f5804da
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -0,0 +1,570 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#include <linux/random.h>
+
+#define HW_ATL_UCP_0X370_REG 0x0370U
+
+#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+
+static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt)
+{
+ int err = 0;
+
+ AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
+ HW_ATL_FW_SM_RAM) == 1U,
+ 1U, 10000U);
+
+ if (err < 0) {
+ bool is_locked;
+
+ reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ if (!is_locked) {
+ err = -ETIME;
+ goto err_exit;
+ }
+ }
+
+ aq_hw_write_reg(self, 0x00000208U, a);
+
+ for (++cnt; --cnt;) {
+ u32 i = 0U;
+
+ aq_hw_write_reg(self, 0x00000200U, 0x00008000U);
+
+ for (i = 1024U;
+ (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
+ }
+
+ *(p++) = aq_hw_read_reg(self, 0x0000020CU);
+ }
+
+ reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
+ u32 cnt)
+{
+ int err = 0;
+ bool is_locked;
+
+ is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ if (!is_locked) {
+ err = -ETIME;
+ goto err_exit;
+ }
+
+ aq_hw_write_reg(self, 0x00000208U, a);
+
+ for (++cnt; --cnt;) {
+ u32 i = 0U;
+
+ aq_hw_write_reg(self, 0x0000020CU, *(p++));
+ aq_hw_write_reg(self, 0x00000200U, 0xC000U);
+
+ for (i = 1024U;
+ (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
+ }
+ }
+
+ reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+{
+ int err = 0;
+ const u32 dw_major_mask = 0xff000000U;
+ const u32 dw_minor_mask = 0x00ffffffU;
+
+ err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
+ if (err < 0)
+ goto err_exit;
+ err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
+ -EOPNOTSUPP : 0;
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ int err = 0;
+
+ if (!aq_hw_read_reg(self, 0x370U)) {
+ unsigned int rnd = 0U;
+ unsigned int ucp_0x370 = 0U;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+ aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
+
+ err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
+ aq_hw_read_reg(self, 0x18U));
+ return err;
+}
+
+#define HW_ATL_RPC_CONTROL_ADR 0x0338U
+#define HW_ATL_RPC_STATE_ADR 0x033CU
+
+struct aq_hw_atl_utils_fw_rpc_tid_s {
+ union {
+ u32 val;
+ struct {
+ u16 tid;
+ u16 len;
+ };
+ };
+};
+
+#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
+
+static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
+{
+ int err = 0;
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+
+ if (!IS_CHIP_FEATURE(MIPS)) {
+ err = -1;
+ goto err_exit;
+ }
+ err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
+ (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+ (rpc_size + sizeof(u32) -
+ sizeof(u8)) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+ sw.len = (u16)rpc_size;
+ aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_fw_rpc **rpc)
+{
+ int err = 0;
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+
+ do {
+ sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
+
+ PHAL_ATLANTIC->rpc_tid = sw.tid;
+
+ AQ_HW_WAIT_FOR(sw.tid ==
+ (fw.val =
+ aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
+ fw.tid), 1000U, 100U);
+ if (err < 0)
+ goto err_exit;
+
+ if (fw.len == 0xFFFFU) {
+ err = hw_atl_utils_fw_rpc_call(self, sw.len);
+ if (err < 0)
+ goto err_exit;
+ }
+ } while (sw.tid != fw.tid || 0xFFFFU == fw.len);
+ if (err < 0)
+ goto err_exit;
+
+ if (rpc) {
+ if (fw.len) {
+ err =
+ hw_atl_utils_fw_downld_dwords(self,
+ PHAL_ATLANTIC->rpc_addr,
+ (u32 *)(void *)
+ &PHAL_ATLANTIC->rpc,
+ (fw.len + sizeof(u32) -
+ sizeof(u8)) /
+ sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+ }
+
+ *rpc = &PHAL_ATLANTIC->rpc;
+ }
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ int err = 0;
+
+ err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ err = hw_atl_utils_fw_rpc_init(self);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox *pmbox)
+{
+ int err = 0;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ PHAL_ATLANTIC->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ if (pmbox != &PHAL_ATLANTIC->mbox)
+ memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
+
+ if (IS_CHIP_FEATURE(REVISION_A0)) {
+ unsigned int mtu = self->aq_nic_cfg ?
+ self->aq_nic_cfg->mtu : 1514U;
+ pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
+ pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
+ pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+ } else {
+ pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+ }
+
+err_exit:;
+}
+
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 ucp_0x368 = 0;
+
+ ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
+
+ return 0;
+}
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state, u32 speed)
+{
+ int err = 0;
+ u32 transaction_id = 0;
+
+ if (state == MPI_RESET) {
+ hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+
+ transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
+
+ AQ_HW_WAIT_FOR(transaction_id !=
+ (hw_atl_utils_mpi_read_stats
+ (self, &PHAL_ATLANTIC->mbox),
+ PHAL_ATLANTIC->mbox.transaction_id),
+ 1000U, 100U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = hw_atl_utils_mpi_set_speed(self, speed, state);
+
+err_exit:;
+}
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
+ struct aq_hw_link_status_s *link_status)
+{
+ u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
+ u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+
+ if (!link_speed_mask) {
+ link_status->mbps = 0U;
+ } else {
+ switch (link_speed_mask) {
+ case HAL_ATLANTIC_RATE_10G:
+ link_status->mbps = 10000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_5G:
+ case HAL_ATLANTIC_RATE_5GSR:
+ link_status->mbps = 5000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_2GS:
+ link_status->mbps = 2500U;
+ break;
+
+ case HAL_ATLANTIC_RATE_1G:
+ link_status->mbps = 1000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_100M:
+ link_status->mbps = 100U;
+ break;
+
+ default:
+ link_status->mbps = 0U;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u8 *mac)
+{
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+ u32 mac_addr[2];
+
+ self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &PHAL_ATLANTIC_A0->chip_features);
+
+ err = hw_atl_utils_mpi_create(self, aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
+ unsigned int rnd = 0;
+ unsigned int ucp_0x370 = 0;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ aq_hw_read_reg(self, 0x00000374U) +
+ (40U * 4U),
+ mac_addr,
+ AQ_DIMOF(mac_addr));
+ if (err < 0) {
+ mac_addr[0] = 0U;
+ mac_addr[1] = 0U;
+ err = 0;
+ } else {
+ mac_addr[0] = __swab32(mac_addr[0]);
+ mac_addr[1] = __swab32(mac_addr[1]);
+ }
+
+ ether_addr_copy(mac, (u8 *)mac_addr);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ /* chip revision */
+ l = 0xE3000000U
+ | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG))
+ | (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+
+err_exit:
+ return err;
+}
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
+{
+ unsigned int ret = 0U;
+
+ switch (mbps) {
+ case 100U:
+ ret = 5U;
+ break;
+
+ case 1000U:
+ ret = 4U;
+ break;
+
+ case 2500U:
+ ret = 3U;
+ break;
+
+ case 5000U:
+ ret = 1U;
+ break;
+
+ case 10000U:
+ ret = 0U;
+ break;
+
+ default:
+ break;
+ }
+ return ret;
+}
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
+{
+ u32 chip_features = 0U;
+ u32 val = reg_glb_mif_id_get(self);
+ u32 mif_rev = val & 0xFFU;
+
+ if ((3U & mif_rev) == 1U) {
+ chip_features |=
+ HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS;
+ } else if ((3U & mif_rev) == 2U) {
+ chip_features |=
+ HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS |
+ HAL_ATLANTIC_UTILS_CHIP_TPO2 |
+ HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ }
+
+ *p = chip_features;
+}
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+{
+ hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+ return 0;
+}
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+ unsigned int power_state)
+{
+ hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+ return 0;
+}
+
+int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
+ u64 *data, unsigned int *p_count)
+{
+ struct hw_atl_stats_s *stats = NULL;
+ int i = 0;
+
+ hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+
+ stats = &PHAL_ATLANTIC->mbox.stats;
+
+ data[i] = stats->uprc + stats->mprc + stats->bprc;
+ data[++i] = stats->uprc;
+ data[++i] = stats->mprc;
+ data[++i] = stats->bprc;
+ data[++i] = stats->erpt;
+ data[++i] = stats->uptc + stats->mptc + stats->bptc;
+ data[++i] = stats->uptc;
+ data[++i] = stats->mptc;
+ data[++i] = stats->bptc;
+ data[++i] = stats->ubrc;
+ data[++i] = stats->ubtc;
+ data[++i] = stats->mbrc;
+ data[++i] = stats->mbtc;
+ data[++i] = stats->bbrc;
+ data[++i] = stats->bbtc;
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
+ data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
+ data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
+ data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
+ data[++i] = stats->dpc;
+
+ if (p_count)
+ *p_count = ++i;
+
+ return 0;
+}
+
+static const u32 hw_atl_utils_hw_mac_regs[] = {
+ 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
+ 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
+ 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
+ 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
+ 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
+ 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
+ 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
+ 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
+ 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
+ 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
+ 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
+ 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
+ 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
+ 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
+ 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
+ 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
+ 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
+ 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
+ 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
+ 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
+ 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
+ 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
+};
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff)
+{
+ unsigned int i = 0U;
+
+ for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
+ regs_buff[i] = aq_hw_read_reg(self,
+ hw_atl_utils_hw_mac_regs[i]);
+ return 0;
+}
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
+{
+ *fw_version = aq_hw_read_reg(self, 0x18U);
+ return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
new file mode 100644
index 000000000000..b8e3d88f0879
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -0,0 +1,210 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#ifndef HW_ATL_UTILS_H
+#define HW_ATL_UTILS_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+
+struct __packed hw_atl_stats_s {
+ u32 uprc;
+ u32 mprc;
+ u32 bprc;
+ u32 erpt;
+ u32 uptc;
+ u32 mptc;
+ u32 bptc;
+ u32 erpr;
+ u32 mbtc;
+ u32 bbtc;
+ u32 mbrc;
+ u32 bbrc;
+ u32 ubrc;
+ u32 ubtc;
+ u32 dpc;
+};
+
+union __packed ip_addr {
+ struct {
+ u8 addr[16];
+ } v6;
+ struct {
+ u8 padding[12];
+ u8 addr[4];
+ } v4;
+};
+
+struct __packed hw_aq_atl_utils_fw_rpc {
+ u32 msg_id;
+
+ union {
+ struct {
+ u32 pong;
+ } msg_ping;
+
+ struct {
+ u8 mac_addr[6];
+ u32 ip_addr_cnt;
+
+ struct {
+ union ip_addr addr;
+ union ip_addr mask;
+ } ip[1];
+ } msg_arp;
+
+ struct {
+ u32 len;
+ u8 packet[1514U];
+ } msg_inject;
+
+ struct {
+ u32 priority;
+ u32 wol_packet_type;
+ u16 friendly_name_len;
+ u16 friendly_name[65];
+ u32 pattern_id;
+ u32 next_wol_pattern_offset;
+
+ union {
+ struct {
+ u32 flags;
+ u8 ipv4_source_address[4];
+ u8 ipv4_dest_address[4];
+ u16 tcp_source_port_number;
+ u16 tcp_dest_port_number;
+ } ipv4_tcp_syn_parameters;
+
+ struct {
+ u32 flags;
+ u8 ipv6_source_address[16];
+ u8 ipv6_dest_address[16];
+ u16 tcp_source_port_number;
+ u16 tcp_dest_port_number;
+ } ipv6_tcp_syn_parameters;
+
+ struct {
+ u32 flags;
+ } eapol_request_id_message_parameters;
+
+ struct {
+ u32 flags;
+ u32 mask_offset;
+ u32 mask_size;
+ u32 pattern_offset;
+ u32 pattern_size;
+ } wol_bit_map_pattern;
+ } wol_pattern;
+ } msg_wol;
+
+ struct {
+ u32 is_wake_on_link_down;
+ u32 is_wake_on_link_up;
+ } msg_wolink;
+ };
+};
+
+struct __packed hw_aq_atl_utils_mbox {
+ u32 version;
+ u32 transaction_id;
+ int error;
+ struct hw_atl_stats_s stats;
+};
+
+struct __packed hw_atl_s {
+ struct aq_hw_s base;
+ struct hw_aq_atl_utils_mbox mbox;
+ u64 speed;
+ u32 itr_tx;
+ u32 itr_rx;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ atomic_t dpc;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 rpc_tid;
+ struct hw_aq_atl_utils_fw_rpc rpc;
+};
+
+#define SELF ((struct hw_atl_s *)self)
+
+#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
+#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
+#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
+
+#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
+#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
+#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
+#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
+
+#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
+ PHAL_ATLANTIC->chip_features)
+
+enum hal_atl_utils_fw_state_e {
+ MPI_DEINIT = 0,
+ MPI_RESET = 1,
+ MPI_INIT = 2,
+ MPI_POWER = 4,
+};
+
+#define HAL_ATLANTIC_RATE_10G BIT(0)
+#define HAL_ATLANTIC_RATE_5G BIT(1)
+#define HAL_ATLANTIC_RATE_5GSR BIT(2)
+#define HAL_ATLANTIC_RATE_2GS BIT(3)
+#define HAL_ATLANTIC_RATE_1G BIT(4)
+#define HAL_ATLANTIC_RATE_100M BIT(5)
+#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox *pmbox);
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state,
+ u32 speed);
+
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
+ enum hal_atl_utils_fw_state_e state);
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
+ struct aq_hw_link_status_s *link_status);
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u8 *mac);
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
+
+int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
+ struct ethtool_cmd *cmd);
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+ unsigned int power_state);
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+
+int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
+ u64 *data,
+ unsigned int *p_count);
+
+#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
new file mode 100644
index 000000000000..0de858d215c2
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -0,0 +1,18 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef VER_H
+#define VER_H
+
+#define NIC_MAJOR_DRIVER_VERSION 1
+#define NIC_MINOR_DRIVER_VERSION 5
+#define NIC_BUILD_DRIVER_VERSION 345
+#define NIC_REVISION_DRIVER_VERSION 0
+
+#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abc9f2a59054..23873395f100 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8f525574d68..6a27c2662675 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -311,7 +311,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
if (!tx_complete || work == budget)
return budget;
- napi_complete(&np->napi);
+ napi_complete_done(&np->napi, work);
/* enable interrupt */
if (alx->flags & ALX_FLAG_USING_MSIX) {
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
return -ENOMEM;
}
- alx_reinit_rings(alx);
-
return 0;
}
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
if (alx->qnapi[0] && alx->qnapi[0]->rxq)
kfree(alx->qnapi[0]->rxq->bufs);
- if (!alx->descmem.virt)
+ if (alx->descmem.virt)
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
alx_free_rings(alx);
alx_free_napis(alx);
alx_disable_advanced_intr(alx);
+ alx_init_intr(alx, false);
err = alx_alloc_napis(alx);
if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
if (err)
goto out_free_rings;
+ /* must be called after alx_request_irq because the chip stops working
+ * if we copy the dma addresses in alx_init_ring_ptrs twice when
+ * requesting msi-x interrupts failed
+ */
+ alx_reinit_rings(alx);
+
netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
@@ -1643,8 +1648,8 @@ static void alx_poll_controller(struct net_device *netdev)
}
#endif
-static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *net_stats)
+static void alx_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *net_stats)
{
struct alx_priv *alx = netdev_priv(dev);
struct alx_hw_stats *hw_stats = &alx->hw.stats;
@@ -1688,8 +1693,6 @@ static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
spin_unlock(&alx->stats_lock);
-
- return net_stats;
}
static const struct net_device_ops alx_netdev_ops = {
@@ -1818,6 +1821,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM |
NETIF_F_TSO |
NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 773d3b7d8dd5..7e913d8331c3 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1892,7 +1892,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
if (work_done < budget) {
quit_polling:
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
adapter->hw.intr_mask |= ISR_RX_PKT;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e96091b652a7..4f7e195af0bc 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1472,7 +1472,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
prrs->vtag);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
- netif_receive_skb(skb);
+ napi_gro_receive(&adapter->napi, skb);
skip_pkt:
/* skip current packet whether it's ok or not. */
@@ -1526,7 +1526,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
/* If no Tx and not enough Rx work done, exit the polling mode */
if (work_done < budget) {
quit_polling:
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
/* test debug */
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7dad8e4b9d2a..022772e1e249 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget)
if (work_done >= budget)
return work_done;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* re-enable Interrupt */
if (likely(adapter->int_enabled))
atlx_imr_set(adapter, IMR_NORMAL_MASK);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 48707ed76ffc..5b95bb48ce97 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
b44_enable_ints(bp);
}
@@ -1674,8 +1674,8 @@ static int b44_close(struct net_device *dev)
return 0;
}
-static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *nstat)
+static void b44_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct b44 *bp = netdev_priv(dev);
struct b44_hw_stats *hwstat = &bp->hw_stats;
@@ -1718,7 +1718,6 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
#endif
} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
- return nstat;
}
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3b14d5144228..0ee6e208aa07 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
/* no more packet in rx/tx queue, remove device from poll
* queue */
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
/* restore rx/tx interrupt */
enet_dmac_writel(priv, priv->dma_chan_int_mask,
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
+ } else {
+ phydev = NULL;
}
/* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, priv->dma_chan_int_mask,
ENETDMAC_IRMASK, priv->tx_chan);
- if (priv->has_phy)
+ if (phydev)
phy_start(phydev);
else
bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- if (priv->has_phy)
+ if (phydev)
phy_disconnect(phydev);
return ret;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7e8cf213fd81..a68d4889f5db 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv, \
BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
+ * same layout, except it has been moved by 4 bytes up, *sigh*
+ */
+static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
+{
+ if (priv->is_lite && off >= RDMA_STATUS)
+ off += 4;
+ return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
+{
+ if (priv->is_lite && off >= RDMA_STATUS)
+ off += 4;
+ __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
+{
+ if (!priv->is_lite) {
+ return BIT(bit);
+ } else {
+ if (bit >= ACB_ALGO)
+ return BIT(bit + 1);
+ else
+ return BIT(bit);
+ }
+}
+
/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
* mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
*/
@@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
reg = tdma_readl(priv, TDMA_CONTROL);
if (priv->tsb_en)
- reg |= TSB_EN;
+ reg |= tdma_control_bit(priv, TSB_EN);
else
- reg &= ~TSB_EN;
+ reg &= ~tdma_control_bit(priv, TSB_EN);
tdma_writel(priv, reg, TDMA_CONTROL);
return 0;
@@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
priv->msg_enable = enable;
}
+static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
+{
+ switch (type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_RXCHK:
+ case BCM_SYSPORT_STAT_RBUF:
+ case BCM_SYSPORT_STAT_SOFT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ const struct bcm_sysport_stats *s;
+ unsigned int i, j;
+
switch (string_set) {
case ETH_SS_STATS:
- return BCM_SYSPORT_STATS_LEN;
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ s = &bcm_sysport_gstrings_stats[i];
+ if (priv->is_lite &&
+ !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+ j++;
+ }
+ return j;
default:
return -EOPNOTSUPP;
}
@@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
static void bcm_sysport_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
- int i;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ const struct bcm_sysport_stats *s;
+ int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_sysport_gstrings_stats[i].stat_string,
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ s = &bcm_sysport_gstrings_stats[i];
+ if (priv->is_lite &&
+ !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+
+ memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
ETH_GSTRING_LEN);
+ j++;
}
break;
default:
@@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
case BCM_SYSPORT_STAT_MIB_RX:
case BCM_SYSPORT_STAT_MIB_TX:
case BCM_SYSPORT_STAT_RUNT:
+ if (priv->is_lite)
+ continue;
+
if (s->type != BCM_SYSPORT_STAT_MIB_RX)
offset = UMAC_MIB_STAT_OFFSET;
val = umac_readl(priv, UMAC_MIB_START + j + offset);
@@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- int i;
+ int i, j;
if (netif_running(dev))
bcm_sysport_update_mib_counters(priv);
- for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
const struct bcm_sysport_stats *s;
char *p;
@@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(unsigned long *)p;
+ data[j] = *(unsigned long *)p;
+ j++;
}
}
@@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
u16 len, status;
struct bcm_rsb *rsb;
- /* Determine how much we should process since last call */
- p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ /* Determine how much we should process since last call, SYSTEMPORT Lite
+ * groups the producer and consumer indexes into the same 32-bit
+ * which we access using RDMA_CONS_INDEX
+ */
+ if (!priv->is_lite)
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ else
+ p_index = rdma_readl(priv, RDMA_CONS_INDEX);
p_index &= RDMA_PROD_INDEX_MASK;
if (p_index < priv->rx_c_index)
@@ -710,11 +780,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct bcm_sysport_cb *cb;
- struct netdev_queue *txq;
u32 hw_ind;
- txq = netdev_get_tx_queue(ndev, ring->index);
-
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -745,9 +812,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
ring->c_index = c_index;
- if (netif_tx_queue_stopped(txq) && pkts_compl)
- netif_tx_wake_queue(txq);
-
netif_dbg(priv, tx_done, ndev,
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
ring->index, ring->c_index, pkts_compl, bytes_compl);
@@ -759,16 +823,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
+ struct netdev_queue *txq;
unsigned int released;
unsigned long flags;
+ txq = netdev_get_tx_queue(priv->netdev, ring->index);
+
spin_lock_irqsave(&ring->lock, flags);
released = __bcm_sysport_tx_reclaim(priv, ring);
+ if (released)
+ netif_tx_wake_queue(txq);
+
spin_unlock_irqrestore(&ring->lock, flags);
return released;
}
+/* Locked version of the per-ring TX reclaim, but does not wake the queue */
+static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_tx_ring *ring =
@@ -780,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
if (work_done == 0) {
napi_complete(napi);
/* re-enable TX interrupt */
- intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ if (!ring->priv->is_lite)
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ else
+ intrl2_0_mask_clear(ring->priv, BIT(ring->index +
+ INTRL2_0_TDMA_MBDONE_SHIFT));
return 0;
}
@@ -806,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
priv->rx_c_index += work_done;
priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
- rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ /* SYSTEMPORT Lite groups the producer/consumer index, producer is
+ * maintained by HW, but writes to it will be ignore while RDMA
+ * is active
+ */
+ if (!priv->is_lite)
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+ else
+ rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -837,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring, ring_bit;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -866,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
bcm_sysport_resume_from_wol(priv);
}
+ if (!priv->is_lite)
+ goto out;
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
+ if (!(priv->irq0_stat & ring_bit))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_0_mask_set(priv, ring_bit);
+ __napi_schedule(&txr->napi);
+ }
+ }
+out:
return IRQ_HANDLED;
}
@@ -919,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev)
bcm_sysport_rx_isr(priv->irq0, priv);
enable_irq(priv->irq0);
- disable_irq(priv->irq1);
- bcm_sysport_tx_isr(priv->irq1, priv);
- enable_irq(priv->irq1);
+ if (!priv->is_lite) {
+ disable_irq(priv->irq1);
+ bcm_sysport_tx_isr(priv->irq1, priv);
+ enable_irq(priv->irq1);
+ }
}
#endif
@@ -1118,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
priv->old_duplex = phydev->duplex;
}
+ if (priv->is_lite)
+ goto out;
+
switch (phydev->speed) {
case SPEED_2500:
cmd_bits = CMD_SPEED_2500;
@@ -1158,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
reg |= cmd_bits;
umac_writel(priv, reg, UMAC_CMD);
}
-
- phy_print_status(phydev);
+out:
+ if (changed)
+ phy_print_status(phydev);
}
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1252,7 +1369,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
- bcm_sysport_tx_reclaim(priv, ring);
+ bcm_sysport_tx_clean(priv, ring);
kfree(ring->cbs);
ring->cbs = NULL;
@@ -1304,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
reg = tdma_readl(priv, TDMA_CONTROL);
if (enable)
- reg |= TDMA_EN;
+ reg |= tdma_control_bit(priv, TDMA_EN);
else
- reg &= ~TDMA_EN;
+ reg &= ~tdma_control_bit(priv, TDMA_EN);
tdma_writel(priv, reg, TDMA_CONTROL);
/* Poll for TMDA disabling completion */
@@ -1331,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
int i;
/* Initialize SW view of the RX ring */
- priv->num_rx_bds = NUM_RX_DESC;
+ priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
@@ -1368,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
rdma_writel(priv, 0, RDMA_START_ADDR_HI);
rdma_writel(priv, 0, RDMA_START_ADDR_LO);
rdma_writel(priv, 0, RDMA_END_ADDR_HI);
- rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+ rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
rdma_writel(priv, 1, RDMA_MBDONE_INTR);
@@ -1410,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
+ if (priv->is_lite)
+ return;
+
reg = umac_readl(priv, UMAC_CMD);
if (dev->flags & IFF_PROMISC)
reg |= CMD_PROMISC;
@@ -1427,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
{
u32 reg;
- reg = umac_readl(priv, UMAC_CMD);
- if (enable)
- reg |= mask;
- else
- reg &= ~mask;
- umac_writel(priv, reg, UMAC_CMD);
+ if (!priv->is_lite) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ umac_writel(priv, reg, UMAC_CMD);
+ } else {
+ reg = gib_readl(priv, GIB_CONTROL);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ gib_writel(priv, reg, GIB_CONTROL);
+ }
/* UniMAC stops on a packet boundary, wait for a full-sized packet
* to be processed (1 msec).
@@ -1445,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
{
u32 reg;
+ if (priv->is_lite)
+ return;
+
reg = umac_readl(priv, UMAC_CMD);
reg |= CMD_SW_RESET;
umac_writel(priv, reg, UMAC_CMD);
@@ -1457,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
unsigned char *addr)
{
- umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
- (addr[2] << 8) | addr[3], UMAC_MAC0);
- umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+ u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
+ addr[3];
+ u32 mac1 = (addr[4] << 8) | addr[5];
+
+ if (!priv->is_lite) {
+ umac_writel(priv, mac0, UMAC_MAC0);
+ umac_writel(priv, mac1, UMAC_MAC1);
+ } else {
+ gib_writel(priv, mac0, GIB_MAC0);
+ gib_writel(priv, mac1, GIB_MAC1);
+ }
}
static void topctrl_flush(struct bcm_sysport_priv *priv)
@@ -1504,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev)
phy_start(dev->phydev);
- /* Enable TX interrupts for the 32 TXQs */
- intrl2_1_mask_clear(priv, 0xffffffff);
+ /* Enable TX interrupts for the TXQs */
+ if (!priv->is_lite)
+ intrl2_1_mask_clear(priv, 0xffffffff);
+ else
+ intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
/* Last call before we start the real business */
netif_tx_start_all_queues(dev);
@@ -1517,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
reg = rbuf_readl(priv, RBUF_CONTROL);
reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ /* Set a correct RSB format on SYSTEMPORT Lite */
+ if (priv->is_lite) {
+ reg &= ~RBUF_RSB_SWAP1;
+ reg |= RBUF_RSB_SWAP0;
+ }
rbuf_writel(priv, reg, RBUF_CONTROL);
}
+static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
+{
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ if (!priv->is_lite) {
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ }
+}
+
+static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
+{
+ u32 __maybe_unused reg;
+
+ /* Include Broadcom tag in pad extension */
+ if (netdev_uses_dsa(priv->netdev)) {
+ reg = gib_readl(priv, GIB_CONTROL);
+ reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
+ reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
+ gib_writel(priv, reg, GIB_CONTROL);
+ }
+}
+
static int bcm_sysport_open(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1540,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev)
rbuf_init(priv);
/* Set maximum frame length */
- umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ if (!priv->is_lite)
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ else
+ gib_set_pad_extension(priv);
/* Set MAC address */
umac_set_hw_addr(priv, dev->dev_addr);
/* Read CRC forward */
- priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ if (!priv->is_lite)
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ else
+ priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
+ GIB_FCS_STRIP);
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
0, priv->phy_interface);
@@ -1561,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev)
priv->old_pause = -1;
/* mask all interrupts and request them */
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ bcm_sysport_mask_all_intrs(priv);
ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
if (ret) {
@@ -1574,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev)
goto out_phy_disconnect;
}
- ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
- if (ret) {
- netdev_err(dev, "failed to request TX interrupt\n");
- goto out_free_irq0;
+ if (!priv->is_lite) {
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
+ dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
}
/* Initialize both hardware and software ring */
@@ -1624,7 +1800,8 @@ out_free_rx_ring:
out_free_tx_ring:
for (i = 0; i < dev->num_tx_queues; i++)
bcm_sysport_fini_tx_ring(priv, i);
- free_irq(priv->irq1, dev);
+ if (!priv->is_lite)
+ free_irq(priv->irq1, dev);
out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
@@ -1642,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
phy_stop(dev->phydev);
/* mask all interrupts */
- intrl2_0_mask_set(priv, 0xffffffff);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_mask_set(priv, 0xffffffff);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ bcm_sysport_mask_all_intrs(priv);
}
static int bcm_sysport_stop(struct net_device *dev)
@@ -1683,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev)
bcm_sysport_fini_rx_ring(priv);
free_irq(priv->irq0, dev);
- free_irq(priv->irq1, dev);
+ if (!priv->is_lite)
+ free_irq(priv->irq1, dev);
/* Disconnect from PHY */
phy_disconnect(dev->phydev);
@@ -1722,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
#define REV_FMT "v%2x.%02x"
+static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
+ [SYSTEMPORT] = {
+ .is_lite = false,
+ .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
+ },
+ [SYSTEMPORT_LITE] = {
+ .is_lite = true,
+ .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
+ },
+};
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemportlite-v1.00",
+ .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
+ { .compatible = "brcm,systemport-v1.00",
+ .data = &bcm_sysport_params[SYSTEMPORT] },
+ { .compatible = "brcm,systemport",
+ .data = &bcm_sysport_params[SYSTEMPORT] },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
+
static int bcm_sysport_probe(struct platform_device *pdev)
{
+ const struct bcm_sysport_hw_params *params;
+ const struct of_device_id *of_id = NULL;
struct bcm_sysport_priv *priv;
struct device_node *dn;
struct net_device *dev;
@@ -1734,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dn = pdev->dev.of_node;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ of_id = of_match_node(bcm_sysport_of_match, dn);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ /* Fairly quickly we need to know the type of adapter we have */
+ params = of_id->data;
/* Read the Transmit/Receive Queue properties */
if (of_property_read_u32(dn, "systemport,num-txq", &txq))
@@ -1741,6 +1946,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
rxq = 1;
+ /* Sanity check the number of transmit queues */
+ if (!txq || txq > TDMA_NUM_RINGS)
+ return -EINVAL;
+
dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
if (!dev)
return -ENOMEM;
@@ -1748,10 +1957,21 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize private members */
priv = netdev_priv(dev);
+ /* Allocate number of TX rings */
+ priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
+ sizeof(struct bcm_sysport_tx_ring),
+ GFP_KERNEL);
+ if (!priv->tx_rings)
+ return -ENOMEM;
+
+ priv->is_lite = params->is_lite;
+ priv->num_rx_desc_words = params->num_rx_desc_words;
+
priv->irq0 = platform_get_irq(pdev, 0);
- priv->irq1 = platform_get_irq(pdev, 1);
+ if (!priv->is_lite)
+ priv->irq1 = platform_get_irq(pdev, 1);
priv->wol_irq = platform_get_irq(pdev, 2);
- if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
dev_err(&pdev->dev, "invalid interrupts\n");
ret = -EINVAL;
goto err_free_netdev;
@@ -1825,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
- "Broadcom SYSTEMPORT" REV_FMT
+ "Broadcom SYSTEMPORT%s" REV_FMT
" at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ priv->is_lite ? " Lite" : "",
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
priv->base, priv->irq0, priv->irq1, txq, rxq);
@@ -2022,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d)
rbuf_init(priv);
/* Set maximum frame length */
- umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ if (!priv->is_lite)
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ else
+ gib_set_pad_extension(priv);
/* Set MAC address */
umac_set_hw_addr(priv, dev->dev_addr);
@@ -2058,13 +2282,6 @@ out_free_tx_rings:
static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
bcm_sysport_suspend, bcm_sysport_resume);
-static const struct of_device_id bcm_sysport_of_match[] = {
- { .compatible = "brcm,systemport-v1.00" },
- { .compatible = "brcm,systemport" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
-
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
.remove = bcm_sysport_remove,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 1c82e3da69a7..863ddd7870b7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -127,6 +127,10 @@ struct bcm_rsb {
#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */
+#define INTRL2_0_TDMA_MBDONE_SHIFT 12
+#define INTRL2_0_TDMA_MBDONE_MASK (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT)
+
/* RXCHK offset and defines */
#define SYS_PORT_RXCHK_OFFSET 0x300
@@ -176,7 +180,9 @@ struct bcm_rsb {
#define RBUF_OK_TO_SEND_MASK 0xff
#define RBUF_CRC_REPLACE (1 << 20)
#define RBUF_OK_TO_SEND_MODE (1 << 21)
-#define RBUF_RSB_SWAP (1 << 22)
+/* SYSTEMPORT Lite uses two bits here */
+#define RBUF_RSB_SWAP0 (1 << 22)
+#define RBUF_RSB_SWAP1 (1 << 23)
#define RBUF_ACPI_EN (1 << 23)
#define RBUF_PKT_RDY_THRESH 0x04
@@ -247,6 +253,7 @@ struct bcm_rsb {
#define MIB_RUNT_CNT_RST (1 << 1)
#define MIB_TX_CNT_RST (1 << 2)
+/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
#define MSEQ_LEN_SHIFT 16
@@ -258,6 +265,34 @@ struct bcm_rsb {
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
+/* Only valid on SYSTEMPORT Lite */
+#define SYS_PORT_GIB_OFFSET 0x1000
+
+#define GIB_CONTROL 0x00
+#define GIB_TX_EN (1 << 0)
+#define GIB_RX_EN (1 << 1)
+#define GIB_TX_FLUSH (1 << 2)
+#define GIB_RX_FLUSH (1 << 3)
+#define GIB_GTX_CLK_SEL_SHIFT 4
+#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_FCS_STRIP (1 << 6)
+#define GIB_LCL_LOOP_EN (1 << 7)
+#define GIB_LCL_LOOP_TXEN (1 << 8)
+#define GIB_RMT_LOOP_EN (1 << 9)
+#define GIB_RMT_LOOP_RXEN (1 << 10)
+#define GIB_RX_PAUSE_EN (1 << 11)
+#define GIB_PREAMBLE_LEN_SHIFT 12
+#define GIB_PREAMBLE_LEN_MASK 0xf
+#define GIB_IPG_LEN_SHIFT 16
+#define GIB_IPG_LEN_MASK 0x3f
+#define GIB_PAD_EXTENSION_SHIFT 22
+#define GIB_PAD_EXTENSION_MASK 0x3f
+
+#define GIB_MAC1 0x08
+#define GIB_MAC0 0x0c
+
/* Receive DMA offset and defines */
#define SYS_PORT_RDMA_OFFSET 0x2000
@@ -409,16 +444,19 @@ struct bcm_rsb {
RING_PCP_DEI_VID)
#define TDMA_CONTROL 0x600
-#define TDMA_EN (1 << 0)
-#define TSB_EN (1 << 1)
-#define TSB_SWAP (1 << 2)
-#define ACB_ALGO (1 << 3)
+#define TDMA_EN 0
+#define TSB_EN 1
+/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we
+ * keep the SYSTEMPORT layout here and adjust with tdma_control_bit()
+ */
+#define TSB_SWAP 2
+#define ACB_ALGO 3
#define BUF_DATA_OFFSET_SHIFT 4
#define BUF_DATA_OFFSET_MASK 0x3ff
-#define VLAN_EN (1 << 14)
-#define SW_BRCM_TAG (1 << 15)
-#define WNC_KPT_SIZE_UPDATE (1 << 16)
-#define SYNC_PKT_SIZE (1 << 17)
+#define VLAN_EN 14
+#define SW_BRCM_TAG 15
+#define WNC_KPT_SIZE_UPDATE 16
+#define SYNC_PKT_SIZE 17
#define ACH_TXDONE_DELAY_SHIFT 18
#define ACH_TXDONE_DELAY_MASK 0xff
@@ -475,12 +513,12 @@ struct dma_desc {
};
/* Number of Receive hardware descriptor words */
-#define NUM_HW_RX_DESC_WORDS 1024
-/* Real number of usable descriptors */
-#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+#define SP_NUM_HW_RX_DESC_WORDS 1024
+#define SP_LT_NUM_HW_RX_DESC_WORDS 256
-/* Internal linked-list RAM has up to 1536 entries */
-#define NUM_TX_DESC 1536
+/* Internal linked-list RAM size */
+#define SP_NUM_TX_DESC 1536
+#define SP_LT_NUM_TX_DESC 256
#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
@@ -627,6 +665,16 @@ struct bcm_sysport_cb {
DEFINE_DMA_UNMAP_LEN(dma_len);
};
+enum bcm_sysport_type {
+ SYSTEMPORT = 0,
+ SYSTEMPORT_LITE,
+};
+
+struct bcm_sysport_hw_params {
+ bool is_lite;
+ unsigned int num_rx_desc_words;
+};
+
/* Software view of the TX ring */
struct bcm_sysport_tx_ring {
spinlock_t lock; /* Ring lock for tx reclaim/xmit */
@@ -651,6 +699,8 @@ struct bcm_sysport_priv {
u32 irq0_mask;
u32 irq1_stat;
u32 irq1_mask;
+ bool is_lite;
+ unsigned int num_rx_desc_words;
struct napi_struct napi ____cacheline_aligned;
struct net_device *netdev;
struct platform_device *pdev;
@@ -659,7 +709,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
- struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+ struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
void __iomem *rx_bds;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 7c19c8e2bf91..6ce80cbcb48e 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -12,11 +12,6 @@
#include <linux/brcmphy.h>
#include "bgmac.h"
-struct bcma_mdio {
- struct bcma_device *core;
- u8 phyaddr;
-};
-
static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
u32 value, int timeout)
{
@@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
* PHY ops
**************************************************/
-static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
+static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
{
struct bcma_device *core;
u16 phy_access_addr;
@@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
- if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
} else {
- core = bcma_mdio->core;
+ core = bgmac->bcma.core;
phy_access_addr = BGMAC_PHY_ACCESS;
phy_ctl_addr = BGMAC_PHY_CNTL;
}
@@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
+static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg,
u16 value)
{
struct bcma_device *core;
@@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
u16 phy_ctl_addr;
u32 tmp;
- if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
} else {
- core = bcma_mdio->core;
+ core = bgmac->bcma.core;
phy_access_addr = BGMAC_PHY_ACCESS;
phy_ctl_addr = BGMAC_PHY_CNTL;
}
@@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
tmp |= phyaddr;
bcma_write32(core, phy_ctl_addr, tmp);
- bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
- if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
dev_warn(&core->dev, "Error setting MDIO int\n");
tmp = BGMAC_PA_START;
@@ -132,57 +127,67 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
-static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio)
+static void bcma_mdio_phy_init(struct bgmac *bgmac)
{
- struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo;
+ struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo;
u8 i;
+ /* For some legacy hardware we do chipset-based PHY initialization here
+ * without even detecting PHY ID. It's hacky and should be cleaned as
+ * soon as someone can test it.
+ */
if (ci->id == BCMA_CHIP_ID_BCM5356) {
for (i = 0; i < 5; i++) {
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b);
- bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b);
+ bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
}
+ return;
}
if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
(ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
- struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc;
+ struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
for (i = 0; i < 5; i++) {
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
}
+ return;
}
+
+ /* For all other hw do initialization using PHY subsystem. */
+ if (bgmac->net_dev && bgmac->net_dev->phydev)
+ phy_init_hw(bgmac->net_dev->phydev);
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
static int bcma_mdio_phy_reset(struct mii_bus *bus)
{
- struct bcma_mdio *bcma_mdio = bus->priv;
- u8 phyaddr = bcma_mdio->phyaddr;
+ struct bgmac *bgmac = bus->priv;
+ u8 phyaddr = bgmac->phyaddr;
- if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS)
+ if (phyaddr == BGMAC_PHY_NOREGS)
return 0;
- bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET);
+ bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET);
udelay(100);
- if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET)
- dev_err(&bcma_mdio->core->dev, "PHY reset failed\n");
- bcma_mdio_phy_init(bcma_mdio);
+ if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET)
+ dev_err(bgmac->dev, "PHY reset failed\n");
+ bcma_mdio_phy_init(bgmac);
return 0;
}
@@ -202,16 +207,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum,
return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value);
}
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
{
- struct bcma_mdio *bcma_mdio;
+ struct bcma_device *core = bgmac->bcma.core;
struct mii_bus *mii_bus;
int err;
- bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL);
- if (!bcma_mdio)
- return ERR_PTR(-ENOMEM);
-
mii_bus = mdiobus_alloc();
if (!mii_bus) {
err = -ENOMEM;
@@ -221,15 +222,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
mii_bus->name = "bcma_mdio mii bus";
sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num,
core->core_unit);
- mii_bus->priv = bcma_mdio;
+ mii_bus->priv = bgmac;
mii_bus->read = bcma_mdio_mii_read;
mii_bus->write = bcma_mdio_mii_write;
mii_bus->reset = bcma_mdio_phy_reset;
mii_bus->parent = &core->dev;
- mii_bus->phy_mask = ~(1 << phyaddr);
-
- bcma_mdio->core = core;
- bcma_mdio->phyaddr = phyaddr;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
err = mdiobus_register(mii_bus);
if (err) {
@@ -242,23 +240,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
err_free_bus:
mdiobus_free(mii_bus);
err:
- kfree(bcma_mdio);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(bcma_mdio_mii_register);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
{
- struct bcma_mdio *bcma_mdio;
-
if (!mii_bus)
return;
- bcma_mdio = mii_bus->priv;
-
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
- kfree(bcma_mdio);
}
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 4a4ffc0c4c65..d59cfcc4c4d5 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core)
u8 *mac;
int err;
- bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL);
+ bgmac = bgmac_alloc(&core->dev);
if (!bgmac)
return -ENOMEM;
bgmac->bcma.core = core;
- bgmac->dev = &core->dev;
bgmac->dma_dev = core->dma_dev;
bgmac->irq = core->irq;
@@ -145,7 +144,7 @@ static int bgmac_probe(struct bcma_device *core)
goto err;
}
- ether_addr_copy(bgmac->mac_addr, mac);
+ ether_addr_copy(bgmac->net_dev->dev_addr, mac);
/* On BCM4706 we need common core to access PHY */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
@@ -178,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core)
if (!bgmac_is_bcm4707_family(core) &&
!(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
- mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
+ mii_bus = bcma_mdio_mii_register(bgmac);
if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
goto err;
@@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core)
err1:
bcma_mdio_mii_unregister(bgmac->mii_bus);
err:
- kfree(bgmac);
bcma_set_drvdata(core, NULL);
return err;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6f736c19872f..7b1af950f312 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -151,7 +151,7 @@ static int bgmac_probe(struct platform_device *pdev)
struct resource *regs;
const u8 *mac_addr;
- bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL);
+ bgmac = bgmac_alloc(&pdev->dev);
if (!bgmac)
return -ENOMEM;
@@ -169,7 +169,7 @@ static int bgmac_probe(struct platform_device *pdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- ether_addr_copy(bgmac->mac_addr, mac_addr);
+ ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr);
else
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0e066dc6b8cc..415046750bb4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -12,6 +12,8 @@
#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
#include <linux/bcm47xx_nvram.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include "bgmac.h"
static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
@@ -1148,7 +1150,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
return weight;
if (handled < weight) {
- napi_complete(napi);
+ napi_complete_done(napi, handled);
bgmac_chip_intrs_on(bgmac);
}
@@ -1446,33 +1448,42 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
}
EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
-int bgmac_enet_probe(struct bgmac *info)
+struct bgmac *bgmac_alloc(struct device *dev)
{
struct net_device *net_dev;
struct bgmac *bgmac;
- int err;
/* Allocation and references */
- net_dev = alloc_etherdev(sizeof(*bgmac));
+ net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
if (!net_dev)
- return -ENOMEM;
+ return NULL;
net_dev->netdev_ops = &bgmac_netdev_ops;
net_dev->ethtool_ops = &bgmac_ethtool_ops;
+
bgmac = netdev_priv(net_dev);
- memcpy(bgmac, info, sizeof(*bgmac));
+ bgmac->dev = dev;
bgmac->net_dev = net_dev;
+
+ return bgmac;
+}
+EXPORT_SYMBOL_GPL(bgmac_alloc);
+
+int bgmac_enet_probe(struct bgmac *bgmac)
+{
+ struct net_device *net_dev = bgmac->net_dev;
+ int err;
+
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
- if (!is_valid_ether_addr(bgmac->mac_addr)) {
+ if (!is_valid_ether_addr(net_dev->dev_addr)) {
dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
- bgmac->mac_addr);
- eth_random_addr(bgmac->mac_addr);
+ net_dev->dev_addr);
+ eth_hw_addr_random(net_dev);
dev_warn(bgmac->dev, "Using random MAC: %pM\n",
- bgmac->mac_addr);
+ net_dev->dev_addr);
}
- ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
/* This (reset &) enable is not preset in specs or reference driver but
* Broadcom does it in arch PCI code when enabling fake PCI device.
@@ -1488,7 +1499,7 @@ int bgmac_enet_probe(struct bgmac *info)
err = bgmac_dma_alloc(bgmac);
if (err) {
dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
- goto err_netdev_free;
+ goto err_out;
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
@@ -1521,8 +1532,7 @@ err_phy_disconnect:
phy_disconnect(net_dev->phydev);
err_dma_free:
bgmac_dma_free(bgmac);
-err_netdev_free:
- free_netdev(net_dev);
+err_out:
return err;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 71f493f2451f..248727dc62f2 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -474,7 +474,6 @@ struct bgmac {
struct device *dev;
struct device *dma_dev;
- unsigned char mac_addr[ETH_ALEN];
u32 feature_flags;
struct net_device *net_dev;
@@ -517,12 +516,13 @@ struct bgmac {
int (*phy_connect)(struct bgmac *bgmac);
};
-int bgmac_enet_probe(struct bgmac *info);
+struct bgmac *bgmac_alloc(struct device *dev);
+int bgmac_enet_probe(struct bgmac *bgmac);
void bgmac_enet_remove(struct bgmac *bgmac);
void bgmac_adjust_link(struct net_device *net_dev);
int bgmac_phy_connect_direct(struct bgmac *bgmac);
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr);
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d5d1026be4b7..e3af1f3cb61f 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_fast_work(bnapi))) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
bnapi->last_status_idx);
@@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_work(bnapi))) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
@@ -6821,13 +6821,13 @@ bnx2_save_stats(struct bnx2 *bp)
(unsigned long) (bp->stats_blk->ctr + \
bp->temp_stats_blk->ctr)
-static struct rtnl_link_stats64 *
+static void
bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
{
struct bnx2 *bp = netdev_priv(dev);
if (bp->stats_blk == NULL)
- return net_stats;
+ return;
net_stats->rx_packets =
GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
@@ -6891,7 +6891,6 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
GET_32BIT_NET_STATS(stat_FwRxDrop);
- return net_stats;
}
/* All ethtool functions called with rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 3e199d3e461e..9e8c06130c09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
-
- /* put page reference used by the memory pool, since we
- * won't be using this page as the mempool anymore.
- */
- if (pool->page)
- put_page(pool->page);
-
+ if (!pool->page) {
pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
if (unlikely(!pool->page))
return -ENOMEM;
@@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return -ENOMEM;
}
- get_page(pool->page);
sw_buf->page = pool->page;
sw_buf->offset = pool->offset;
@@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
pool->offset += SGE_PAGE_SIZE;
-
+ if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
+ get_page(pool->page);
+ else
+ pool->page = NULL;
return 0;
}
@@ -3229,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
* has been updated when NAPI was scheduled.
*/
if (IS_FCOE_FP(fp)) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
} else {
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 5f19427c7b27..43423744fdfa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
return port_type;
}
-static int bnx2x_get_vf_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bnx2x_get_vf_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (bp->state == BNX2X_STATE_OPEN) {
if (test_bit(BNX2X_LINK_REPORT_FD,
&bp->vf_link_vars.link_report_flags))
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
- ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
+ cmd->base.speed = bp->vf_link_vars.line_speed;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
}
- cmd->port = PORT_OTHER;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
return 0;
}
-static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
u32 media_type;
+ u32 supported, advertising, lp_advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&lp_advertising,
+ cmd->link_modes.lp_advertising);
/* Dual Media boards present all available port types */
- cmd->supported = bp->port.supported[cfg_idx] |
+ supported = bp->port.supported[cfg_idx] |
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
- cmd->advertising = bp->port.advertising[cfg_idx];
+ advertising = bp->port.advertising[cfg_idx];
media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
if (media_type == ETH_PHY_SFP_1G_FIBER) {
- cmd->supported &= ~(SUPPORTED_10000baseT_Full);
- cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ supported &= ~(SUPPORTED_10000baseT_Full);
+ advertising &= ~(ADVERTISED_10000baseT_Full);
}
if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
!(bp->flags & MF_FUNC_DIS)) {
- cmd->duplex = bp->link_vars.duplex;
+ cmd->base.duplex = bp->link_vars.duplex;
if (IS_MF(bp) && !BP_NOMCP(bp))
- ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+ cmd->base.speed = bnx2x_get_mf_speed(bp);
else
- ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->base.speed = bp->link_vars.line_speed;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
}
- cmd->port = bnx2x_get_port_type(bp);
+ cmd->base.port = bnx2x_get_port_type(bp);
- cmd->phy_address = bp->mdio.prtad;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.phy_address = bp->mdio.prtad;
if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* Publish LP advertised speeds and FC */
if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
u32 status = bp->link_vars.link_status;
- cmd->lp_advertising |= ADVERTISED_Autoneg;
+ lp_advertising |= ADVERTISED_Autoneg;
if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Pause;
+ lp_advertising |= ADVERTISED_Pause;
if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+ lp_advertising |= ADVERTISED_Asym_Pause;
if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+ lp_advertising |= ADVERTISED_10baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+ lp_advertising |= ADVERTISED_10baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+ lp_advertising |= ADVERTISED_100baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+ lp_advertising |= ADVERTISED_100baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+ lp_advertising |= ADVERTISED_1000baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
if (media_type == ETH_PHY_KR) {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_1000baseKX_Full;
} else {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_1000baseT_Full;
}
}
if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
+ lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
if (media_type == ETH_PHY_KR) {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_10000baseKR_Full;
} else {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_10000baseT_Full;
}
}
if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
+ lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
return 0;
}
-static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
u32 speed, phy_idx;
+ u32 supported;
+ u8 duplex = cmd->base.duplex;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (IS_MF_SD(bp))
return 0;
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
/* If received a request for an unknown duplex, assume full*/
- if (cmd->duplex == DUPLEX_UNKNOWN)
- cmd->duplex = DUPLEX_FULL;
+ if (duplex == DUPLEX_UNKNOWN)
+ duplex = DUPLEX_FULL;
if (IS_MF_SI(bp)) {
u32 part;
@@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- if (cmd->port != bnx2x_get_port_type(bp)) {
- switch (cmd->port) {
+ if (cmd->base.port != bnx2x_get_port_type(bp)) {
+ switch (cmd->base.port) {
case PORT_TP:
if (!(bp->port.supported[0] & SUPPORTED_TP ||
bp->port.supported[1] & SUPPORTED_TP)) {
@@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->link_params.multi_phy_config = old_multi_phy_config;
DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 an_supported_speed = bp->port.supported[cfg_idx];
if (bp->link_params.phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
@@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
/* advertise the requested speed and duplex if supported */
- if (cmd->advertising & ~an_supported_speed) {
+ if (advertising & ~an_supported_speed) {
DP(BNX2X_MSG_ETHTOOL,
"Advertisement parameters are not supported\n");
return -EINVAL;
}
bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
- bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
- cmd->advertising);
- if (cmd->advertising) {
+ advertising);
+ if (advertising) {
bp->link_params.speed_cap_mask[cfg_idx] = 0;
- if (cmd->advertising & ADVERTISED_10baseT_Half) {
+ if (advertising & ADVERTISED_10baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
}
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Half) {
+ if (advertising & ADVERTISED_100baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
}
- if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+ if (advertising & ADVERTISED_1000baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
}
- if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+ if (advertising & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseKX_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
- if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+ if (advertising & (ADVERTISED_10000baseT_Full |
ADVERTISED_10000baseKX4_Full |
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
- if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+ if (advertising & ADVERTISED_20000baseKR2_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
@@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* advertise the requested speed and duplex if supported */
switch (speed) {
case SPEED_10:
- if (cmd->duplex == DUPLEX_FULL) {
+ if (duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Full)) {
DP(BNX2X_MSG_ETHTOOL,
@@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_100:
- if (cmd->duplex == DUPLEX_FULL) {
+ if (duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Full)) {
DP(BNX2X_MSG_ETHTOOL,
@@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_1000:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"1G half not supported\n");
return -EINVAL;
@@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_2500:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"2.5G half not supported\n");
return -EINVAL;
@@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_10000:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"10G half not supported\n");
return -EINVAL;
@@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
bp->link_params.req_line_speed[cfg_idx] = speed;
- bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
bp->port.advertising[cfg_idx] = advertising;
}
@@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev,
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
- .get_settings = bnx2x_get_settings,
- .set_settings = bnx2x_set_settings,
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
@@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_eee = bnx2x_get_eee,
.set_eee = bnx2x_set_eee,
.get_ts_info = bnx2x_get_ts_info,
+ .get_link_ksettings = bnx2x_get_link_ksettings,
+ .set_link_ksettings = bnx2x_set_link_ksettings,
};
static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
- .get_settings = bnx2x_get_vf_settings,
.get_drvinfo = bnx2x_get_drvinfo,
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
@@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
+ .get_link_ksettings = bnx2x_get_vf_link_ksettings,
};
void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 6082ed1b5ea0..a7ca45b251cb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9608cb49a11c..235733e91c79 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,15 +34,13 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/rtc.h>
+#include <linux/bpf.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/udp_tunnel.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#endif
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/cache.h>
@@ -56,6 +55,7 @@
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
#include "bnxt_dcb.h"
+#include "bnxt_xdp.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -99,6 +99,8 @@ enum board_idx {
BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
NETXTREME_E_VF,
NETXTREME_C_VF,
};
@@ -133,6 +135,8 @@ static const struct {
{ "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
{ "Broadcom NetXtreme-E Ethernet Virtual Function" },
{ "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
@@ -168,6 +172,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
@@ -213,16 +219,7 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
-static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
-{
- /* Tell compiler to fetch tx indices from memory. */
- barrier();
-
- return bp->tx_ring_size -
- ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
-}
-
-static const u16 bnxt_lhint_arr[] = {
+const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
TX_BD_FLAGS_LHINT_1024_TO_2047,
@@ -265,8 +262,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- txr = &bp->tx_ring[i];
txq = netdev_get_tx_queue(dev, i);
+ txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
free_size = bnxt_tx_avail(bp, txr);
@@ -512,8 +509,7 @@ tx_dma_error:
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- int index = txr - &bp->tx_ring[0];
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
int i;
@@ -576,6 +572,25 @@ next_tx_int:
}
}
+static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ struct device *dev = &bp->pdev->dev;
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+ if (dma_mapping_error(dev, *mapping)) {
+ __free_page(page);
+ return NULL;
+ }
+ *mapping += bp->rx_dma_offset;
+ return page;
+}
+
static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
@@ -586,8 +601,8 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
if (!data)
return NULL;
- *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
- bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+ *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
+ bp->rx_buf_use_size, bp->rx_dir);
if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data);
@@ -596,29 +611,37 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
return data;
}
-static inline int bnxt_alloc_rx_data(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
- u8 *data;
dma_addr_t mapping;
- data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
- if (!data)
- return -ENOMEM;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
- rx_buf->data = data;
- dma_unmap_addr_set(rx_buf, mapping, mapping);
+ if (!page)
+ return -ENOMEM;
- rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rx_buf->data = page;
+ rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+ } else {
+ u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+
+ if (!data)
+ return -ENOMEM;
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bp->rx_offset;
+ }
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
return 0;
}
-static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
- u8 *data)
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -628,9 +651,9 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
+ prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
- dma_unmap_addr_set(prod_rx_buf, mapping,
- dma_unmap_addr(cons_rx_buf, mapping));
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
@@ -756,13 +779,60 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int payload = offset_and_len >> 16;
+ unsigned int len = offset_and_len & 0xffff;
+ struct skb_frag_struct *frag;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int off, err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+
+ if (unlikely(!payload))
+ payload = eth_get_headlen(data_ptr, len);
+
+ skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+
+ off = (void *)data_ptr - page_address(page);
+ skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+ memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
+ payload + NET_IP_ALIGN);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_size_sub(frag, payload);
+ frag->page_offset += payload;
+ skb->data_len -= payload;
+ skb->tail += payload;
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr, u16 cons,
- u16 prod, u8 *data, dma_addr_t dma_addr,
- unsigned int len)
+ void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
{
- int err;
+ u16 prod = rxr->rx_prod;
struct sk_buff *skb;
+ int err;
err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
@@ -772,14 +842,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ bp->rx_dir);
if (!skb) {
kfree(data);
return NULL;
}
- skb_reserve(skb, BNXT_RX_OFFSET);
- skb_put(skb, len);
+ skb_reserve(skb, bp->rx_offset);
+ skb_put(skb, offset_and_len & 0xffff);
return skb;
}
@@ -815,7 +885,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ mapping = cons_rx_buf->mapping;
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
@@ -878,14 +948,14 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping,
- bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ bp->rx_dir);
- memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+ memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
+ len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping,
- bp->rx_copy_thresh,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ bp->rx_dir);
skb_put(skb, len);
return skb;
@@ -954,17 +1024,19 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
}
prod_rx_buf->data = tpa_info->data;
+ prod_rx_buf->data_ptr = tpa_info->data_ptr;
mapping = tpa_info->mapping;
- dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ prod_rx_buf->mapping = mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
tpa_info->data = cons_rx_buf->data;
+ tpa_info->data_ptr = cons_rx_buf->data_ptr;
cons_rx_buf->data = NULL;
- tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ tpa_info->mapping = cons_rx_buf->mapping;
tpa_info->len =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
@@ -1099,7 +1171,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int len, nw_off, tcp_opt_len;
+ int len, nw_off, tcp_opt_len = 0;
if (tcp_ts)
tcp_opt_len = 12;
@@ -1130,7 +1202,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
dev_kfree_skb_any(skb);
return NULL;
}
- tcp_gro_complete(skb);
if (nw_off) { /* tunnel */
struct udphdr *uh = NULL;
@@ -1180,6 +1251,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+ if (likely(skb))
+ tcp_gro_complete(skb);
#endif
return skb;
}
@@ -1189,17 +1262,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u32 *raw_cons,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
- bool *agg_event)
+ u8 *event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u8 agg_id = TPA_END_AGG_ID(tpa_end);
- u8 *data, agg_bufs;
+ u8 *data_ptr, agg_bufs;
u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
+ void *data;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
@@ -1211,7 +1285,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info = &rxr->rx_tpa[agg_id];
data = tpa_info->data;
- prefetch(data);
+ data_ptr = tpa_info->data_ptr;
+ prefetch(data_ptr);
len = tpa_info->len;
mapping = tpa_info->mapping;
@@ -1222,7 +1297,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
return ERR_PTR(-EBUSY);
- *agg_event = true;
+ *event |= BNXT_AGG_EVENT;
cp_cons = NEXT_CMP(cp_cons);
}
@@ -1234,7 +1309,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data, len, mapping);
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
@@ -1250,18 +1325,19 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
tpa_info->data = new_data;
+ tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ bp->rx_dir);
if (!skb) {
kfree(data);
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
}
- skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
@@ -1307,7 +1383,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
* -EIO - packet aborted due to hw error indicated in BD
*/
static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
- bool *agg_event)
+ u8 *event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
@@ -1318,10 +1394,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
- u8 *data, agg_bufs, cmp_type;
+ u8 *data_ptr, agg_bufs, cmp_type;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ void *data;
int rc = 0;
+ u32 misc;
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1342,13 +1420,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
+ *event |= BNXT_RX_EVENT;
goto next_rx_no_prod;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
- (struct rx_tpa_end_cmp_ext *)rxcmp1,
- agg_event);
+ (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
if (unlikely(IS_ERR(skb)))
return -EBUSY;
@@ -1356,37 +1434,36 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = -ENOMEM;
if (likely(skb)) {
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_napi_id(skb, &bnapi->napi);
- if (bnxt_busy_polling(bnapi))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&bnapi->napi, skb);
+ napi_gro_receive(&bnapi->napi, skb);
rc = 1;
}
+ *event |= BNXT_RX_EVENT;
goto next_rx_no_prod;
}
cons = rxcmp->rx_cmp_opaque;
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
}
- prefetch(data);
+ prefetch(data_ptr);
- agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
- RX_CMP_AGG_BUFS_SHIFT;
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+ agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
return -EBUSY;
cp_cons = NEXT_CMP(cp_cons);
- *agg_event = true;
+ *event |= BNXT_AGG_EVENT;
}
+ *event |= BNXT_RX_EVENT;
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
@@ -1399,17 +1476,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
- dma_addr = dma_unmap_addr(rx_buf, mapping);
+ dma_addr = rx_buf->mapping;
+
+ if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
}
} else {
- skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+ u32 payload;
+
+ if (rx_buf->data_ptr == data_ptr)
+ payload = misc & RX_CMP_PAYLOAD_OFFSET;
+ else
+ payload = 0;
+ skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+ payload | len);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1460,11 +1549,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_napi_id(skb, &bnapi->napi);
- if (bnxt_busy_polling(bnapi))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&bnapi->napi, skb);
+ napi_gro_receive(&bnapi->napi, skb);
rc = 1;
next_rx:
@@ -1637,8 +1722,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
u32 cons;
int tx_pkts = 0;
int rx_pkts = 0;
- bool rx_event = false;
- bool agg_event = false;
+ u8 event = 0;
struct tx_cmp *txcmp;
while (1) {
@@ -1660,12 +1744,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (unlikely(tx_pkts > bp->tx_wake_thresh))
rx_pkts = budget;
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
if (likely(rc >= 0))
rx_pkts += rc;
else if (rc == -EBUSY) /* partial completion */
break;
- rx_event = true;
} else if (unlikely((TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE) ||
(TX_CMP_TYPE(txcmp) ==
@@ -1680,6 +1763,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
break;
}
+ if (event & BNXT_TX_EVENT) {
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ void __iomem *db = txr->tx_doorbell;
+ u16 prod = txr->tx_prod;
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ writel(DB_KEY_TX | prod, db);
+ writel(DB_KEY_TX | prod, db);
+ }
+
cpr->cp_raw_cons = raw_cons;
/* ACK completion ring before freeing tx ring and producing new
* buffers in rx/agg rings to prevent overflowing the completion
@@ -1688,14 +1783,14 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
if (tx_pkts)
- bnxt_tx_int(bp, bnapi, tx_pkts);
+ bnapi->tx_int(bp, bnapi, tx_pkts);
- if (rx_event) {
+ if (event & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (agg_event) {
+ if (event & BNXT_AGG_EVENT) {
writel(DB_KEY_RX | rxr->rx_agg_prod,
rxr->rx_agg_doorbell);
writel(DB_KEY_RX | rxr->rx_agg_prod,
@@ -1716,7 +1811,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
u32 rx_pkts = 0;
- bool agg_event = false;
+ u8 event = 0;
while (1) {
int rc;
@@ -1740,7 +1835,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
if (likely(rc == -EIO))
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
@@ -1763,13 +1858,13 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (agg_event) {
+ if (event & BNXT_AGG_EVENT) {
writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
}
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_pkts);
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
}
return rx_pkts;
@@ -1782,9 +1877,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
- if (!bnxt_lock_napi(bnapi))
- return budget;
-
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
@@ -1792,42 +1884,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
if (!bnxt_has_work(bp, cpr)) {
- napi_complete(napi);
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ if (napi_complete_done(napi, work_done))
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
}
}
mmiowb();
- bnxt_unlock_napi(bnapi);
return work_done;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int bnxt_busy_poll(struct napi_struct *napi)
-{
- struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int rx_work, budget = 4;
-
- if (atomic_read(&bp->intr_sem) != 0)
- return LL_FLUSH_FAILED;
-
- if (!bp->link_info.link_up)
- return LL_FLUSH_FAILED;
-
- if (!bnxt_lock_poll(bnapi))
- return LL_FLUSH_BUSY;
-
- rx_work = bnxt_poll_work(bp, bnapi, budget);
-
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
-
- bnxt_unlock_poll(bnapi);
- return rx_work;
-}
-#endif
-
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i, max_idx;
@@ -1905,11 +1971,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!data)
continue;
- dma_unmap_single(
- &pdev->dev,
- dma_unmap_addr(tpa_info, mapping),
- bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir);
tpa_info->data = NULL;
@@ -1919,19 +1983,20 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
for (j = 0; j < max_idx; j++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
- u8 *data = rx_buf->data;
+ void *data = rx_buf->data;
if (!data)
continue;
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, rx_buf->mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
rx_buf->data = NULL;
- kfree(data);
+ if (BNXT_RX_PAGE_MODE(bp))
+ __free_page(data);
+ else
+ kfree(data);
}
for (j = 0; j < max_agg_idx; j++) {
@@ -1942,8 +2007,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!page)
continue;
- dma_unmap_page(&pdev->dev,
- dma_unmap_addr(rx_agg_buf, mapping),
+ dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
rx_agg_buf->page = NULL;
@@ -2034,6 +2098,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ if (rxr->xdp_prog)
+ bpf_prog_put(rxr->xdp_prog);
+
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2172,6 +2239,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
ring->queue_id = bp->q_info[j].queue_id;
+ if (i < bp->tx_nr_rings_xdp)
+ continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
j++;
}
@@ -2319,6 +2388,15 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
+ if (IS_ERR(rxr->xdp_prog)) {
+ int rc = PTR_ERR(rxr->xdp_prog);
+
+ rxr->xdp_prog = NULL;
+ return rc;
+ }
+ }
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
@@ -2365,6 +2443,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
return -ENOMEM;
rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
rxr->rx_tpa[i].mapping = mapping;
}
} else {
@@ -2380,6 +2459,14 @@ static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+ bp->rx_dma_offset = XDP_PACKET_HEADROOM;
+ } else {
+ bp->rx_offset = BNXT_RX_OFFSET;
+ bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
+ }
+
for (i = 0; i < bp->rx_nr_rings; i++) {
rc = bnxt_init_one_rx_ring(bp, i);
if (rc)
@@ -2503,9 +2590,11 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
return pages;
}
-static void bnxt_set_tpa_flags(struct bnxt *bp)
+void bnxt_set_tpa_flags(struct bnxt *bp)
{
bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ return;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
if (bp->dev->features & NETIF_F_GRO)
@@ -2535,7 +2624,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
- if (rx_space > PAGE_SIZE) {
+ if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
u32 jumbo_factor;
bp->flags |= BNXT_FLAG_JUMBO;
@@ -2587,6 +2676,27 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->cp_ring_mask = bp->cp_bit - 1;
}
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ if (page_mode) {
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
+ return -EOPNOTSUPP;
+ bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bp->rx_dir = DMA_BIDIRECTIONAL;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ } else {
+ bp->dev->max_mtu = BNXT_MAX_MTU;
+ bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+ bp->rx_dir = DMA_FROM_DEVICE;
+ bp->rx_skb_func = bnxt_rx_skb;
+ }
+ return 0;
+}
+
static void bnxt_free_vnic_attributes(struct bnxt *bp)
{
int i;
@@ -2669,6 +2779,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out;
}
+ if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ !(vnic->flags & BNXT_VNIC_RSS_FLAG))
+ continue;
+
/* Allocate rss table and hash key */
vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&vnic->rss_table_dma_addr,
@@ -2892,6 +3006,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
+ kfree(bp->tx_ring_map);
+ bp->tx_ring_map = NULL;
kfree(bp->tx_ring);
bp->tx_ring = NULL;
kfree(bp->rx_ring);
@@ -2944,6 +3060,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (!bp->tx_ring)
return -ENOMEM;
+ bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+
+ if (!bp->tx_ring_map)
+ return -ENOMEM;
+
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
j = 0;
else
@@ -2952,6 +3074,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
bp->tx_ring[i].bnapi = bp->bnapi[j];
bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+ bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
+ if (i >= bp->tx_nr_rings_xdp) {
+ bp->tx_ring[i].txq_index = i -
+ bp->tx_nr_rings_xdp;
+ bp->bnapi[j]->tx_int = bnxt_tx_int;
+ } else {
+ bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
+ bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+ }
}
rc = bnxt_alloc_stats(bp);
@@ -2993,6 +3124,47 @@ alloc_mem_err:
return rc;
}
+static void bnxt_disable_int(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID)
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+
+ bnxt_disable_int(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+ int i;
+
+ atomic_set(&bp->intr_sem, 0);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id)
{
@@ -3292,6 +3464,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
+#define BNXT_NTP_TUNNEL_FLTR_FLAG \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
+
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
@@ -3312,10 +3487,31 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req.ip_protocol = keys->basic.ip_proto;
- req.src_ipaddr[0] = keys->addrs.v4addrs.src;
- req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
+ int i;
+
+ req.ethertype = htons(ETH_P_IPV6);
+ req.ip_addr_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
+ *(struct in6_addr *)&req.src_ipaddr[0] =
+ keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req.dst_ipaddr[0] =
+ keys->addrs.v6addrs.dst;
+ for (i = 0; i < 4; i++) {
+ req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ }
+ } else {
+ req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
+ if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
+ req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
+ req.tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
+ }
req.src_port = keys->ports.src;
req.src_port_mask = cpu_to_be16(0xffff);
@@ -3562,6 +3758,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
+ } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
+ req.rss_rule =
+ cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
} else {
req.rss_rule = cpu_to_le16(0xffff);
}
@@ -3665,6 +3867,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
return rc;
}
+static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
+{
+ struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_vnic_qcaps_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10600)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ if (resp->flags &
+ cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
+ bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
{
u16 i;
@@ -3768,7 +3991,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req.length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
@@ -3787,7 +4010,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (rc || err) {
switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_CMPL:
+ case RING_FREE_REQ_RING_TYPE_L2_CMPL:
netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
rc, err);
return -1;
@@ -3811,6 +4034,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
return rc;
}
+static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
+{
+ int rc;
+
+ if (BNXT_PF(bp)) {
+ struct hwrm_func_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = cpu_to_le16(idx);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ } else {
+ struct hwrm_func_vf_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ req.enables =
+ cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = cpu_to_le16(idx);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ }
+ return rc;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
int i, rc = 0;
@@ -3827,6 +4074,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
goto err_out;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
+ }
}
for (i = 0; i < bp->tx_nr_rings; i++) {
@@ -3901,7 +4154,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
if (rc || error_code) {
switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_CMPL:
+ case RING_FREE_REQ_RING_TYPE_L2_CMPL:
netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
@@ -3977,6 +4230,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ /* The completion rings are about to be freed. After that the
+ * IRQ doorbell will not work anymore. So we need to disable
+ * IRQ here.
+ */
+ bnxt_disable_int_sync(bp);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -3984,7 +4243,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_CMPL,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -3992,6 +4251,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+/* Caller must hold bp->hwrm_cmd_lock */
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(fid);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+
+ return rc;
+}
+
+static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ req.num_tx_rings = cpu_to_le16(*tx_rings);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
@@ -4249,7 +4552,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(bp->dev->dev_addr);
+ eth_hw_addr_random(bp->dev);
rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
}
return rc;
@@ -4463,8 +4766,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
+ if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
+ goto skip_rss_ctx;
+
/* allocate context for vnic */
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
if (rc) {
@@ -4484,6 +4791,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
bp->rsscos_nr_ctxs++;
}
+skip_rss_ctx:
/* configure default vnic, ring grp */
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
if (rc) {
@@ -4518,13 +4826,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
int i, rc = 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
if (vnic_id >= bp->nr_vnics)
break;
- bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+ vnic = &bp->vnic_info[vnic_id];
+ vnic->flags |= BNXT_VNIC_RFS_FLAG;
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
@@ -4698,40 +5010,13 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
return bnxt_init_chip(bp, irq_re_init);
}
-static void bnxt_disable_int(struct bnxt *bp)
-{
- int i;
-
- if (!bp->bnapi)
- return;
-
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- }
-}
-
-static void bnxt_enable_int(struct bnxt *bp)
-{
- int i;
-
- atomic_set(&bp->intr_sem, 0);
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
- }
-}
-
static int bnxt_set_real_num_queues(struct bnxt *bp)
{
int rc;
struct net_device *dev = bp->dev;
- rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
+ bp->tx_nr_rings_xdp);
if (rc)
return rc;
@@ -4779,19 +5064,12 @@ static void bnxt_setup_msix(struct bnxt *bp)
tcs = netdev_get_num_tc(dev);
if (tcs > 1) {
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
- if (bp->tx_nr_rings_per_tc == 0) {
- netdev_reset_tc(dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- } else {
- int i, off, count;
+ int i, off, count;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
- for (i = 0; i < tcs; i++) {
- count = bp->tx_nr_rings_per_tc;
- off = i * count;
- netdev_set_tc_queue(dev, i, count, off);
- }
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
}
}
@@ -4836,6 +5114,26 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc;
}
+#ifdef CONFIG_RFS_ACCEL
+static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_rsscos_ctxs;
+#endif
+ return bp->pf.max_rsscos_ctxs;
+}
+
+static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_vnics;
+#endif
+ return bp->pf.max_vnics;
+}
+#endif
+
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
@@ -5094,10 +5392,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ for (i = 0; i < bp->cp_nr_rings; i++)
napi_disable(&bp->bnapi[i]->napi);
- bnxt_disable_poll(bp->bnapi[i]);
- }
}
static void bnxt_enable_napi(struct bnxt *bp)
@@ -5106,7 +5402,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
bp->bnapi[i]->in_reset = false;
- bnxt_enable_poll(bp->bnapi[i]);
napi_enable(&bp->bnapi[i]->napi);
}
}
@@ -5150,7 +5445,7 @@ static void bnxt_report_link(struct bnxt *bp)
if (bp->link_info.link_up) {
const char *duplex;
const char *flow_ctrl;
- u16 speed;
+ u16 speed, fec;
netif_carrier_on(bp->dev);
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
@@ -5172,6 +5467,12 @@ static void bnxt_report_link(struct bnxt *bp)
netdev_info(bp->dev, "EEE is %s\n",
bp->eee.eee_active ? "active" :
"not active");
+ fec = bp->link_info.fec_cfg;
+ if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
+ netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+ (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
+ (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
+ (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
@@ -5296,6 +5597,11 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
}
}
+
+ link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
+ if (bp->hwrm_spec_code >= 0x10504)
+ link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
+
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -5314,17 +5620,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
if ((link_info->support_auto_speeds | diff) !=
link_info->support_auto_speeds) {
/* An advertised speed is no longer supported, so we need to
- * update the advertisement settings. See bnxt_reset() for
- * comments about the rtnl_lock() sequence below.
+ * update the advertisement settings. Caller holds RTNL
+ * so we can modify link settings.
*/
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
link_info->advertising = link_info->support_auto_speeds;
- if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
- (link_info->autoneg & BNXT_AUTONEG_SPEED))
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
bnxt_hwrm_set_link_setting(bp, true, false);
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
}
return 0;
}
@@ -5389,7 +5690,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
{
u8 autoneg = bp->link_info.autoneg;
u16 fw_link_speed = bp->link_info.req_link_speed;
- u32 advertising = bp->link_info.advertising;
+ u16 advertising = bp->link_info.advertising;
if (autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |=
@@ -5494,6 +5795,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_qcaps_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+ if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+ int i;
+
+ bp->num_leds = resp->num_leds;
+ memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
+ bp->num_leds);
+ for (i = 0; i < bp->num_leds; i++) {
+ struct bnxt_led_info *led = &bp->leds[i];
+ __le16 caps = led->led_state_caps;
+
+ if (!led->led_group_id ||
+ !BNXT_LED_ALT_BLINK_CAP(caps)) {
+ bp->num_leds = 0;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -5532,6 +5872,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
rc);
return rc;
}
+ if (!BNXT_SINGLE_PF(bp))
+ return 0;
+
if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
link_info->req_flow_ctrl)
@@ -5683,19 +6026,6 @@ static int bnxt_open(struct net_device *dev)
return __bnxt_open_nic(bp, true, true);
}
-static void bnxt_disable_int_sync(struct bnxt *bp)
-{
- int i;
-
- atomic_inc(&bp->intr_sem);
- if (!netif_running(bp->dev))
- return;
-
- bnxt_disable_int(bp);
- for (i = 0; i < bp->cp_nr_rings; i++)
- synchronize_irq(bp->irq_tbl[i].vector);
-}
-
int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -5717,13 +6047,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
msleep(20);
- /* Flush rings before disabling interrupts */
+ /* Flush rings and and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
del_timer_sync(&bp->timer);
bnxt_free_skbs(bp);
@@ -5770,16 +6099,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-static struct rtnl_link_stats64 *
+static void
bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
u32 i;
struct bnxt *bp = netdev_priv(dev);
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
if (!bp->bnapi)
- return stats;
+ return;
/* TODO check if we need to synchronize with bnxt_close path */
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -5826,8 +6153,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
stats->tx_errors = le64_to_cpu(tx->tx_err);
}
-
- return stats;
}
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
@@ -5980,20 +6305,36 @@ skip_uc:
return rc;
}
+/* If the chip and firmware supports RFS */
+static bool bnxt_rfs_supported(struct bnxt *bp)
+{
+ if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return true;
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ return true;
+ return false;
+}
+
+/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
- struct bnxt_pf_info *pf = &bp->pf;
- int vnics;
+ int vnics, max_vnics, max_rss_ctxs;
- if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
return false;
vnics = 1 + bp->rx_nr_rings;
- if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
+ max_vnics = bnxt_get_max_func_vnics(bp);
+ max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
+
+ /* RSS contexts not a limiting factor */
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ max_rss_ctxs = max_vnics;
+ if (vnics > max_vnics || vnics > max_rss_ctxs) {
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
- min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
+ min(max_rss_ctxs - 1, max_vnics - 1));
return false;
}
@@ -6049,6 +6390,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ flags &= ~BNXT_FLAG_TPA;
+
if (features & NETIF_F_HW_VLAN_CTAG_RX)
flags |= BNXT_FLAG_STRIP_VLAN;
@@ -6200,29 +6544,37 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
{
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- * If there is a parallel dev_close(), bnxt_close() may be holding
+ /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+ * set. If the device is being closed, bnxt_close() may be holding
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_lock();
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_unlock();
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
- int rc;
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
smp_mb__after_atomic();
@@ -6236,16 +6588,6 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
- if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
- &bp->sp_event))
- bnxt_hwrm_phy_qcaps(bp);
-
- rc = bnxt_update_link(bp, true);
- if (rc)
- netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
- rc);
- }
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6266,22 +6608,99 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_port_qstats(bp);
+
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ int rc = 0;
+
+ if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+ &bp->sp_event))
+ bnxt_hwrm_phy_qcaps(bp);
+
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ rc = bnxt_update_link(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_get_port_module_status(bp);
+ bnxt_rtnl_unlock_sp(bp);
+ }
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
- if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
- bnxt_get_port_module_status(bp);
-
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
- bnxt_hwrm_port_qstats(bp);
-
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
+/* Under rtnl_lock */
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
+{
+ int max_rx, max_tx, tx_sets = 1;
+ int tx_rings_needed;
+ bool sh = true;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ sh = false;
+
+ if (tcs)
+ tx_sets = tcs;
+
+ rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
+ if (rc)
+ return rc;
+
+ if (max_rx < rx)
+ return -ENOMEM;
+
+ tx_rings_needed = tx * tx_sets + tx_xdp;
+ if (max_tx < tx_rings_needed)
+ return -ENOMEM;
+
+ if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
+ tx_rings_needed < (tx * tx_sets + tx_xdp))
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
+{
+ if (bp->bar2) {
+ pci_iounmap(pdev, bp->bar2);
+ bp->bar2 = NULL;
+ }
+
+ if (bp->bar1) {
+ pci_iounmap(pdev, bp->bar1);
+ bp->bar1 = NULL;
+ }
+
+ if (bp->bar0) {
+ pci_iounmap(pdev, bp->bar0);
+ bp->bar0 = NULL;
+ }
+}
+
+static void bnxt_cleanup_pci(struct bnxt *bp)
+{
+ bnxt_unmap_bars(bp, bp->pdev);
+ pci_release_regions(bp->pdev);
+ pci_disable_device(bp->pdev);
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -6369,25 +6788,10 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->current_interval = BNXT_TIMER_INTERVAL;
clear_bit(BNXT_STATE_OPEN, &bp->state);
-
return 0;
init_err_release:
- if (bp->bar2) {
- pci_iounmap(pdev, bp->bar2);
- bp->bar2 = NULL;
- }
-
- if (bp->bar1) {
- pci_iounmap(pdev, bp->bar1);
- bp->bar1 = NULL;
- }
-
- if (bp->bar0) {
- pci_iounmap(pdev, bp->bar0);
- bp->bar0 = NULL;
- }
-
+ bnxt_unmap_bars(bp, pdev);
pci_release_regions(pdev);
init_err_disable:
@@ -6444,9 +6848,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
+ int rc;
if (tc > bp->max_tc) {
- netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+ netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
tc, bp->max_tc);
return -EINVAL;
}
@@ -6457,13 +6862,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if (tc) {
- int max_rx_rings, max_tx_rings, rc;
-
- rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
- if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
- return -ENOMEM;
- }
+ rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ tc, bp->tx_nr_rings_xdp);
+ if (rc)
+ return rc;
/* Needs to close the device and do hw resource re-allocations */
if (netif_running(bp->dev))
@@ -6507,6 +6909,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
keys1->ports.ports == keys2->ports.ports &&
keys1->basic.ip_proto == keys2->basic.ip_proto &&
keys1->basic.n_proto == keys2->basic.n_proto &&
+ keys1->control.flags == keys2->control.flags &&
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
return true;
@@ -6524,9 +6927,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
- if (skb->encapsulation)
- return -EPROTONOSUPPORT;
-
if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int off = 0, j;
@@ -6553,12 +6953,23 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
- if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+ if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
+ fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
((fkeys->basic.ip_proto != IPPROTO_TCP) &&
(fkeys->basic.ip_proto != IPPROTO_UDP))) {
rc = -EPROTONOSUPPORT;
goto err_free;
}
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
+ bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
+ bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
@@ -6765,9 +7176,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
#endif
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = bnxt_busy_poll,
-#endif
+ .ndo_xdp = bnxt_xdp,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -6787,15 +7196,12 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
bnxt_dcb_free(bp);
- pci_iounmap(pdev, bp->bar2);
- pci_iounmap(pdev, bp->bar1);
- pci_iounmap(pdev, bp->bar0);
kfree(bp->edev);
bp->edev = NULL;
+ if (bp->xdp_prog)
+ bpf_prog_put(bp->xdp_prog);
+ bnxt_cleanup_pci(bp);
free_netdev(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
static int bnxt_probe_phy(struct bnxt *bp)
@@ -6906,8 +7312,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int rc;
rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
- if (rc)
- return rc;
+ if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
+ /* Not enough rings, try disabling agg rings. */
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+ if (rc)
+ return rc;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bnxt_set_ring_params(bp);
+ }
if (bp->flags & BNXT_FLAG_ROCE_CAP) {
int max_cp, max_stat, max_irq;
@@ -6946,6 +7361,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+
+ rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
@@ -6987,7 +7407,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bnxt *bp;
int rc, max_irqs;
- if (pdev->device == 0x16cd && pci_is_bridge(pdev))
+ if (pci_is_bridge(pdev))
return -ENODEV;
if (version_printed++ == 0)
@@ -7013,17 +7433,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &bnxt_netdev_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
-
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock);
rc = bnxt_hwrm_ver_get(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
bnxt_hwrm_fw_set_time(bp);
@@ -7054,7 +7473,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 60 - 9500 */
dev->min_mtu = ETH_ZLEN;
- dev->max_mtu = 9500;
+ dev->max_mtu = BNXT_MAX_MTU;
bnxt_dcb_init(bp);
@@ -7067,11 +7486,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
bp->ulp_probe = bnxt_ulp_probe;
@@ -7081,7 +7500,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
rc);
rc = -1;
- goto init_err;
+ goto init_err_pci_clean;
}
rc = bnxt_hwrm_queue_qportcfg(bp);
@@ -7089,15 +7508,22 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
rc);
rc = -1;
- goto init_err;
+ goto init_err_pci_clean;
}
bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_set_max_func_irqs(bp, max_irqs);
- bnxt_set_dflt_rings(bp);
+ rc = bnxt_set_dflt_rings(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ goto init_err_pci_clean;
+ }
/* Default RSS hash cfg. */
bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
@@ -7112,7 +7538,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
- if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnxt_hwrm_vnic_qcaps(bp);
+ if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
@@ -7125,15 +7552,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = bnxt_probe_phy(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = bnxt_hwrm_func_reset(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = bnxt_init_int_mode(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
rc = register_netdev(dev);
if (rc)
@@ -7150,10 +7577,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_err_clr_int:
bnxt_clear_int_mode(bp);
-init_err:
- pci_iounmap(pdev, bp->bar0);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+init_err_pci_clean:
+ bnxt_cleanup_pci(bp);
init_err_free:
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 16defe9ececc..faf26a2f726b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,10 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.6.0"
+#define DRV_MODULE_VERSION "1.7.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 6
+#define DRV_VER_MIN 7
#define DRV_VER_UPD 0
struct tx_bd {
@@ -416,6 +417,11 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+#define BNXT_MAX_MTU 9500
+#define BNXT_MAX_PAGE_MODE_MTU \
+ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
+ XDP_PACKET_HEADROOM)
+
#define BNXT_MIN_PKT_SIZE 52
#define BNXT_NUM_TESTS(bp) 0
@@ -507,17 +513,25 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
+#define BNXT_RX_EVENT 1
+#define BNXT_AGG_EVENT 2
+#define BNXT_TX_EVENT 4
+
struct bnxt_sw_tx_bd {
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(mapping);
u8 is_gso;
u8 is_push;
- unsigned short nr_frags;
+ union {
+ unsigned short nr_frags;
+ u16 rx_prod;
+ };
};
struct bnxt_sw_rx_bd {
- u8 *data;
- DEFINE_DMA_UNMAP_ADDR(mapping);
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
};
struct bnxt_sw_rx_agg_bd {
@@ -558,6 +572,7 @@ struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
u16 tx_prod;
u16 tx_cons;
+ u16 txq_index;
void __iomem *tx_doorbell;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
@@ -576,7 +591,8 @@ struct bnxt_tx_ring_info {
};
struct bnxt_tpa_info {
- u8 *data;
+ void *data;
+ u8 *data_ptr;
dma_addr_t mapping;
u16 len;
unsigned short gso_type;
@@ -608,6 +624,8 @@ struct bnxt_rx_ring_info {
void __iomem *rx_doorbell;
void __iomem *rx_agg_doorbell;
+ struct bpf_prog *xdp_prog;
+
struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
struct bnxt_sw_rx_bd *rx_buf_ring;
@@ -654,20 +672,13 @@ struct bnxt_napi {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- atomic_t poll_state;
-#endif
- bool in_reset;
-};
+ void (*tx_int)(struct bnxt *, struct bnxt_napi *,
+ int);
+ u32 flags;
+#define BNXT_NAPI_FLAG_XDP 0x1
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum bnxt_poll_state_t {
- BNXT_STATE_IDLE = 0,
- BNXT_STATE_NAPI,
- BNXT_STATE_POLL,
- BNXT_STATE_DISABLE,
+ bool in_reset;
};
-#endif
struct bnxt_irq {
irq_handler_t handler;
@@ -720,6 +731,7 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_FLAG 2
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
+#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -840,7 +852,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
u16 support_speeds;
- u16 auto_link_speeds;
+ u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
@@ -855,6 +867,10 @@ struct bnxt_link_info {
u16 force_link_speed;
u32 preemphasis;
u8 module_status;
+ u16 fec_cfg;
+#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
+#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
+#define BNXT_FEC_ENC_RS PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
/* copy of requested setting from ethtool cmd */
u8 autoneg;
@@ -863,7 +879,7 @@ struct bnxt_link_info {
u8 req_duplex;
u8 req_flow_ctrl;
u16 req_link_speed;
- u32 advertising;
+ u16 advertising; /* user adv setting */
bool force_link_chng;
/* a copy of phy_qcfg output used to report link
@@ -879,6 +895,20 @@ struct bnxt_queue_info {
u8 queue_profile;
};
+#define BNXT_MAX_LED 4
+
+struct bnxt_led_info {
+ u8 led_id;
+ u8 led_type;
+ u8 led_group_id;
+ u8 unused;
+ __le16 led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \
+ cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
+
+ __le16 led_color_caps;
+};
+
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
@@ -956,10 +986,13 @@ struct bnxt {
#define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
+ #define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
BNXT_FLAG_ROCEV2_CAP)
+ #define BNXT_FLAG_NO_AGG_RINGS 0x20000
+ #define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -971,6 +1004,7 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
@@ -979,12 +1013,21 @@ struct bnxt {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
+ u16 *tx_ring_map;
struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int,
struct sk_buff *);
+ struct sk_buff * (*rx_skb_func)(struct bnxt *,
+ struct bnxt_rx_ring_info *,
+ u16, void *, u8 *, dma_addr_t,
+ unsigned int);
+
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
+ u16 rx_offset;
+ u16 rx_dma_offset;
+ enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
u32 rx_copy_thresh;
@@ -1000,6 +1043,7 @@ struct bnxt {
int tx_nr_pages;
int tx_nr_rings;
int tx_nr_rings_per_tc;
+ int tx_nr_rings_xdp;
int tx_wake_thresh;
int tx_push_thresh;
@@ -1132,6 +1176,11 @@ struct bnxt {
struct ethtool_eee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+
+ u8 num_leds;
+ struct bnxt_led_info leds[BNXT_MAX_LED];
+
+ struct bpf_prog *xdp_prog;
};
#define BNXT_RX_STATS_OFFSET(counter) \
@@ -1141,93 +1190,6 @@ struct bnxt {
((offsetof(struct tx_port_stats, counter) + \
sizeof(struct rx_port_stats) + 512) / 8)
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
-{
- atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-/* called from the NAPI poll routine to get ownership of a bnapi */
-static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
-{
- int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
- BNXT_STATE_NAPI);
-
- return rc == BNXT_STATE_IDLE;
-}
-
-static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
-{
- atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-/* called from the busy poll routine to get ownership of a bnapi */
-static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
-{
- int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
- BNXT_STATE_POLL);
-
- return rc == BNXT_STATE_IDLE;
-}
-
-static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
-{
- atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
-{
- return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
-}
-
-static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
-{
- int old;
-
- while (1) {
- old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
- BNXT_STATE_DISABLE);
- if (old == BNXT_STATE_IDLE)
- break;
- usleep_range(500, 5000);
- }
-}
-
-#else
-
-static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
-{
- return true;
-}
-
-static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
-{
- return false;
-}
-
-static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
-{
- return false;
-}
-
-static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
-{
-}
-
-#endif
-
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
@@ -1238,7 +1200,23 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+
+ return bp->tx_ring_size -
+ ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+extern const u16 bnxt_lhint_arr[];
+
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp);
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
@@ -1246,6 +1224,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
int bmap_size);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_hwrm_set_coal(struct bnxt *);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
@@ -1259,6 +1238,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 784aa77610bc..6903a873f072 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -357,7 +357,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_rx_rings, max_tx_rings, tcs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
- channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
+ channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
@@ -387,9 +387,10 @@ static int bnxt_set_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct bnxt *bp = netdev_priv(dev);
- int max_rx_rings, max_tx_rings, tcs;
- u32 rc = 0;
+ int req_tx_rings, req_rx_rings, tcs;
bool sh = false;
+ int tx_xdp = 0;
+ int rc = 0;
if (channel->other_count)
return -EINVAL;
@@ -409,19 +410,22 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
-
tcs = netdev_get_num_tc(dev);
- if (tcs > 1)
- max_tx_rings /= tcs;
-
- if (sh &&
- channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
- return -ENOMEM;
- if (!sh && (channel->rx_count > max_rx_rings ||
- channel->tx_count > max_tx_rings))
- return -ENOMEM;
+ req_tx_rings = sh ? channel->combined_count : channel->tx_count;
+ req_rx_rings = sh ? channel->combined_count : channel->rx_count;
+ if (bp->tx_nr_rings_xdp) {
+ if (!sh) {
+ netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
+ return -EINVAL;
+ }
+ tx_xdp = req_rx_rings;
+ }
+ rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
@@ -439,19 +443,17 @@ static int bnxt_set_channels(struct net_device *dev,
if (sh) {
bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->rx_nr_rings = min_t(int, channel->combined_count,
- max_rx_rings);
- bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
- max_tx_rings);
+ bp->rx_nr_rings = channel->combined_count;
+ bp->tx_nr_rings_per_tc = channel->combined_count;
} else {
bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->rx_count;
bp->tx_nr_rings_per_tc = channel->tx_count;
}
-
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings_xdp = tx_xdp;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
if (tcs > 1)
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
@@ -524,24 +526,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr_found:
fkeys = &fltr->fkeys;
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
- fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
- fs->flow_type = UDP_V4_FLOW;
- else
- goto fltr_err;
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V4_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V4_FLOW;
+ else
+ goto fltr_err;
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ } else {
+ int i;
- fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V6_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V6_FLOW;
+ else
+ goto fltr_err;
+
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ for (i = 0; i < 4; i++) {
+ fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
+ fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+ }
+ fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
- fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+ }
fs->ring_cookie = fltr->rxq;
rc = 0;
@@ -893,7 +920,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
- u16 fw_speeds = link_info->auto_link_speeds;
+ u16 fw_speeds = link_info->advertising;
u8 fw_pause = 0;
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -1090,8 +1117,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base;
- u32 speed, fw_advertising = 0;
bool set_pause = false;
+ u16 fw_advertising = 0;
+ u32 speed;
int rc = 0;
if (!BNXT_SINGLE_PF(bp))
@@ -1550,17 +1578,37 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
install.install_type = cpu_to_le32(install_type);
- rc = hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (rc)
- return -EOPNOTSUPP;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc) {
+ rc = -EOPNOTSUPP;
+ goto flash_pkg_exit;
+ }
+
+ if (resp->error_code) {
+ u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ install.flags |= cpu_to_le16(
+ NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc) {
+ rc = -EOPNOTSUPP;
+ goto flash_pkg_exit;
+ }
+ }
+ }
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp->result, (int)resp->problem_item);
- return -ENOPKG;
+ rc = -ENOPKG;
}
- return 0;
+flash_pkg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static int bnxt_flash_device(struct net_device *dev,
@@ -2039,6 +2087,47 @@ static int bnxt_nway_reset(struct net_device *dev)
return rc;
}
+static int bnxt_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct hwrm_port_led_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_led_cfg *led_cfg;
+ u8 led_state;
+ __le16 duration;
+ int i, rc;
+
+ if (!bp->num_leds || BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ if (state == ETHTOOL_ID_ACTIVE) {
+ led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
+ duration = cpu_to_le16(500);
+ } else if (state == ETHTOOL_ID_INACTIVE) {
+ led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
+ duration = cpu_to_le16(0);
+ } else {
+ return -EINVAL;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ req.num_leds = bp->num_leds;
+ led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+ for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+ req.enables |= BNXT_LED_DFLT_ENABLES(i);
+ led_cfg->led_id = bp->leds[i].led_id;
+ led_cfg->led_state = led_state;
+ led_cfg->led_blink_on = duration;
+ led_cfg->led_blink_off = duration;
+ led_cfg->led_group_id = bp->leds[i].led_group_id;
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
@@ -2070,5 +2159,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
- .nway_reset = bnxt_nway_reset
+ .nway_reset = bnxt_nway_reset,
+ .set_phys_id = bnxt_set_phys_id,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3abc03b60dbc..ed1e555292e9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -10,6 +10,29 @@
#ifndef BNXT_ETHTOOL_H
#define BNXT_ETHTOOL_H
+struct bnxt_led_cfg {
+ u8 led_id;
+ u8 led_state;
+ u8 led_color;
+ u8 unused;
+ __le16 led_blink_on;
+ __le16 led_blink_off;
+ u8 led_group_id;
+ u8 rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA \
+ (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT 6
+
+#define BNXT_LED_DFLT_ENABLES(x) \
+ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 2ddfa51519a1..6e275c23d68b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016 Broadcom Limited
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,12 +11,12 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.6.0 */
+/* HSI and HWRM Specification 1.7.0 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 6
+#define HWRM_VERSION_MINOR 7
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_STR "1.6.0"
+#define HWRM_VERSION_STR "1.7.0"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -549,6 +549,8 @@ struct hwrm_ver_get_output {
__le32 dev_caps_cfg;
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
#define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
u8 roce_fw_maj;
u8 roce_fw_min;
u8 roce_fw_bld;
@@ -832,20 +834,32 @@ struct hwrm_func_qcfg_output {
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -921,20 +935,32 @@ struct hwrm_func_cfg_input {
__le32 min_bw;
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -1529,6 +1555,20 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -1919,6 +1959,219 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
+/* hwrm_port_led_cfg */
+/* Input (64 bytes) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused_0[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ u8 led0_group_id;
+ u8 unused_1;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ u8 led1_group_id;
+ u8 unused_2;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ u8 led2_group_id;
+ u8 unused_3;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ u8 led3_group_id;
+ u8 unused_4;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_5;
+ u8 unused_6;
+ u8 unused_7;
+ u8 valid;
+};
+
/* hwrm_queue_qportcfg */
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
@@ -2216,20 +2469,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id0_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2244,20 +2509,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id1_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2272,20 +2549,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id2_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2300,20 +2589,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id3_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2328,20 +2629,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id4_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2356,20 +2669,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id5_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2384,20 +2709,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id6_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2412,20 +2749,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
__le32 queue_id7_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2467,20 +2816,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id0_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2495,20 +2856,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id1_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2523,20 +2896,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id2_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2551,20 +2936,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id3_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2579,20 +2976,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id4_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2607,20 +3016,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id5_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2635,20 +3056,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id6_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2663,20 +3096,32 @@ struct hwrm_queue_cos2bw_cfg_input {
__le32 queue_id7_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2797,6 +3242,41 @@ struct hwrm_vnic_cfg_output {
u8 valid;
};
+/* hwrm_vnic_qcaps */
+/* Input (24 bytes) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ __le32 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
/* hwrm_vnic_tpa_cfg */
/* Input (40 bytes) */
struct hwrm_vnic_tpa_cfg_input {
@@ -2992,9 +3472,10 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
#define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
u8 unused_0;
__le16 unused_1;
__le64 page_tbl_addr;
@@ -3028,10 +3509,16 @@ struct hwrm_ring_alloc_input {
__le32 max_bw;
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -3066,9 +3553,10 @@ struct hwrm_ring_free_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_FREE_REQ_RING_TYPE_TX 0x1UL
#define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3166,9 +3654,10 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_RESET_REQ_RING_TYPE_TX 0x1UL
#define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3597,6 +4086,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le32 flags;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -3697,7 +4187,7 @@ struct hwrm_cfa_ntuple_filter_free_output {
};
/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (40 bytes) */
+/* Input (48 bytes) */
struct hwrm_cfa_ntuple_filter_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3707,10 +4197,14 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
__le32 enables;
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
__le32 unused_0;
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ __le16 unused_1[3];
};
/* Output (16 bytes) */
@@ -4058,9 +4552,7 @@ struct hwrm_fw_set_structured_data_input {
__le64 src_data_addr;
__le16 data_len;
u8 hdr_cnt;
- u8 unused_0;
- __le16 port_id;
- __le16 unused_1;
+ u8 unused_0[5];
};
/* Output (16 bytes) */
@@ -4077,7 +4569,7 @@ struct hwrm_fw_set_structured_data_output {
};
/* hwrm_fw_get_structured_data */
-/* Input (40 bytes) */
+/* Input (32 bytes) */
struct hwrm_fw_get_structured_data_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -4095,10 +4587,9 @@ struct hwrm_fw_get_structured_data_input {
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
u8 count;
u8 unused_0;
- __le16 port_id;
- __le16 unused_1[3];
};
/* Output (16 bytes) */
@@ -4582,7 +5073,11 @@ struct hwrm_nvm_install_update_input {
__le32 install_type;
#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- __le32 unused_0;
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ __le16 unused_0;
};
/* Output (24 bytes) */
@@ -4608,6 +5103,15 @@ struct hwrm_nvm_install_update_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ u8 unused_0[7];
+};
+
/* Hardware Resource Manager Specification */
/* Input (16 bytes) */
struct input {
@@ -4735,11 +5239,26 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_FREE (0xf1UL)
#define HWRM_WOL_FILTER_QCFG (0xf2UL)
#define HWRM_WOL_REASON_QCFG (0xf3UL)
+ #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL)
+ #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL)
+ #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
+ #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
+ #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
+ #define HWRM_CFA_VF_PAIR_FREE (0x101UL)
+ #define HWRM_CFA_VF_PAIR_INFO (0x102UL)
+ #define HWRM_CFA_FLOW_ALLOC (0x103UL)
+ #define HWRM_CFA_FLOW_FREE (0x104UL)
+ #define HWRM_CFA_FLOW_FLUSH (0x105UL)
+ #define HWRM_CFA_FLOW_STATS (0x106UL)
+ #define HWRM_CFA_FLOW_INFO (0x107UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
+ #define HWRM_NVM_FLUSH (0xfff0UL)
#define HWRM_NVM_GET_VARIABLE (0xfff1UL)
#define HWRM_NVM_SET_VARIABLE (0xfff2UL)
#define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
@@ -4939,12 +5458,13 @@ struct ctx_hw_stats {
struct hwrm_struct_hdr {
__le16 struct_id;
#define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
__le16 len;
u8 version;
u8 count;
@@ -4954,14 +5474,14 @@ struct hwrm_struct_hdr {
__le16 unused_0[3];
};
-/* DCBX Application configuration structure (8 bytes) */
-struct hwrm_struct_data_dcbx_app_cfg {
- __le16 protocol_id;
+/* DCBX Application configuration structure (1057) (8 bytes) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
u8 priority;
u8 valid;
u8 unused_0[3];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c69602508666..0b8cd7443843 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
@@ -416,6 +417,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u16 vf_ring_grps;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ int total_vf_tx_rings = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
@@ -429,6 +431,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+ vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
FUNC_CFG_REQ_ENABLES_MRU |
@@ -451,7 +455,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
req.num_rx_rings = cpu_to_le16(vf_rx_rings);
req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.num_l2_ctxs = cpu_to_le16(4);
- vf_vnics = 1;
req.num_vnics = cpu_to_le16(vf_vnics);
/* FIXME spec currently uses 1 bit for stats ctx */
@@ -459,6 +462,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
+ int vf_tx_rsvd = vf_tx_rings;
+
req.fid = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
@@ -466,10 +471,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
break;
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = le16_to_cpu(req.fid);
+ rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
+ &vf_tx_rsvd);
+ if (rc)
+ break;
+ total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (!rc) {
- pf->max_tx_rings -= vf_tx_rings * num_vfs;
+ pf->max_tx_rings -= total_vf_tx_rings;
pf->max_rx_rings -= vf_rx_rings * num_vfs;
pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
pf->max_cp_rings -= vf_cp_rings * num_vfs;
@@ -506,6 +516,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rx_rings)
rx_ok = 1;
}
+ if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+ rx_ok = 0;
if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
tx_ok = 1;
@@ -544,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out2;
+ bnxt_ulp_sriov_cfg(bp, *num_vfs);
+
rc = pci_enable_sriov(bp->pdev, *num_vfs);
if (rc)
goto err_out2;
@@ -585,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp)
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
+
+ bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
new file mode 100644
index 000000000000..899c30fb5188
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -0,0 +1,240 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_xdp.h"
+
+static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod)
+{
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct tx_bd_ext *txbd1;
+ struct tx_bd *txbd;
+ u32 flags;
+ u16 prod;
+
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->rx_prod = rx_prod;
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+ TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
+ txbd1->tx_bd_mss = cpu_to_le32(0);
+ txbd1->tx_bd_cfa_action = cpu_to_le32(0);
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+}
+
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u16 tx_cons = txr->tx_cons;
+ u16 last_tx_cons = tx_cons;
+ u16 rx_prod;
+ int i;
+
+ for (i = 0; i < nr_pkts; i++) {
+ last_tx_cons = tx_cons;
+ tx_cons = NEXT_TX(tx_cons);
+ tx_cons = NEXT_TX(tx_cons);
+ }
+ txr->tx_cons = tx_cons;
+ if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
+ rx_prod = rxr->rx_prod;
+ } else {
+ tx_buf = &txr->tx_buf_ring[last_tx_cons];
+ rx_prod = tx_buf->rx_prod;
+ }
+ writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
+}
+
+/* returns the following:
+ * true - packet consumed by XDP and new buffer is allocated.
+ * false - packet should be passed to the stack.
+ */
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+ struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct pci_dev *pdev;
+ struct xdp_buff xdp;
+ dma_addr_t mapping;
+ void *orig_data;
+ u32 tx_avail;
+ u32 offset;
+ u32 act;
+
+ if (!xdp_prog)
+ return false;
+
+ pdev = bp->pdev;
+ txr = rxr->bnapi->tx_ring;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ xdp.data_hard_start = *data_ptr - offset;
+ xdp.data = *data_ptr;
+ xdp.data_end = *data_ptr + *len;
+ orig_data = xdp.data;
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+ rcu_read_lock();
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ rcu_read_unlock();
+
+ tx_avail = bnxt_tx_avail(bp, txr);
+ /* If the tx ring is not full, we must not update the rx producer yet
+ * because we may still be transmitting on some BDs.
+ */
+ if (tx_avail != bp->tx_ring_size)
+ *event &= ~BNXT_RX_EVENT;
+
+ if (orig_data != xdp.data) {
+ offset = xdp.data - xdp.data_hard_start;
+ *data_ptr = xdp.data_hard_start + offset;
+ *len = xdp.data_end - xdp.data;
+ }
+ switch (act) {
+ case XDP_PASS:
+ return false;
+
+ case XDP_TX:
+ if (tx_avail < 2) {
+ trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_reuse_rx_data(rxr, cons, page);
+ return true;
+ }
+
+ *event = BNXT_TX_EVENT;
+ dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
+ bp->rx_dir);
+ bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
+ NEXT_RX(rxr->rx_prod));
+ bnxt_reuse_rx_data(rxr, cons, page);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* Fall thru */
+ case XDP_ABORTED:
+ trace_xdp_exception(bp->dev, xdp_prog, act);
+ /* Fall thru */
+ case XDP_DROP:
+ bnxt_reuse_rx_data(rxr, cons, page);
+ break;
+ }
+ return true;
+}
+
+/* Under rtnl_lock */
+static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
+{
+ struct net_device *dev = bp->dev;
+ int tx_xdp = 0, rc, tc;
+ struct bpf_prog *old;
+
+ if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
+ return -EOPNOTSUPP;
+ }
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
+ netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
+ return -EOPNOTSUPP;
+ }
+ if (prog)
+ tx_xdp = bp->rx_nr_rings;
+
+ tc = netdev_get_num_tc(dev);
+ if (!tc)
+ tc = 1;
+ rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ tc, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
+ return rc;
+ }
+ if (netif_running(dev))
+ bnxt_close_nic(bp, true, false);
+
+ old = xchg(&bp->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ if (prog) {
+ bnxt_set_rx_skb_mode(bp, true);
+ } else {
+ int rx, tx;
+
+ bnxt_set_rx_skb_mode(bp, false);
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+ bp->tx_nr_rings_xdp = tx_xdp;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+ bnxt_set_tpa_flags(bp);
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, true, false);
+
+ return 0;
+}
+
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ rc = bnxt_xdp_set(bp, xdp->prog);
+ break;
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!bp->xdp_prog;
+ rc = 0;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
new file mode 100644
index 000000000000..b529f2c5355b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -0,0 +1,19 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_XDP_H
+#define BNXT_XDP_H
+
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+ struct page *page, u8 **data_ptr, unsigned int *len,
+ u8 *event);
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index b1d2ac818710..cec94bbb2ea5 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3665,7 +3665,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk)
static inline u16 cnic_get_vlan(struct net_device *dev,
struct net_device **vlan_dev)
{
- if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(dev)) {
*vlan_dev = vlan_dev_real_dev(dev);
return vlan_dev_vlan_id(dev);
}
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 435a2e4739d1..89d4feba1a9a 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ae42de4fdddf..a448177990fe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14145,8 +14145,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.set_link_ksettings = tg3_set_link_ksettings,
};
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void tg3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14154,13 +14154,11 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
if (!tp->hw_stats) {
*stats = tp->net_stats_prev;
spin_unlock_bh(&tp->lock);
- return stats;
+ return;
}
tg3_get_nstats(tp, stats);
spin_unlock_bh(&tp->lock);
-
- return stats;
}
static void tg3_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 112030828c4b..6e13c937d715 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
return rcvd;
poll_exit:
- napi_complete(napi);
+ napi_complete_done(napi, rcvd);
rx_ctrl->rx_complete++;
@@ -3111,7 +3111,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* Used spin_lock to synchronize reading of stats structures, which
* is written by BNA under the same lock.
*/
-static struct rtnl_link_stats64 *
+static void
bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -3123,8 +3123,6 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
bnad_netdev_hwstats_fill(bnad, stats);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
- return stats;
}
static void
@@ -3427,7 +3425,7 @@ static const struct net_device_ops bnad_netdev_ops = {
.ndo_open = bnad_open,
.ndo_stop = bnad_stop,
.ndo_start_xmit = bnad_start_xmit,
- .ndo_get_stats64 = bnad_get_stats64,
+ .ndo_get_stats64 = bnad_get_stats64,
.ndo_set_rx_mode = bnad_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnad_set_mac_address,
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c0fb80acc2da..016d481c6476 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -43,13 +43,13 @@
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
+#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
+#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
*/
#define MACB_HALT_TIMEOUT 1230
+/* DMA buffer descriptor might be different size
+ * depends on hardware configuration.
+ */
+static unsigned int macb_dma_desc_get_size(struct macb *bp)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#endif
+ return sizeof(struct macb_dma_desc);
+}
+
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /* Dma buffer descriptor is 4 words length (instead of 2 words)
+ * for 64b GEM.
+ */
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ idx <<= 1;
+#endif
+ return idx;
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
+{
+ return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+}
+#endif
+
/* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
{
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
unsigned int index)
{
- return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
+ index = macb_tx_ring_wrap(queue->bp, index);
+ index = macb_adj_dma_desc_idx(queue->bp, index);
+ return &queue->tx_ring[index];
}
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
dma_addr_t offset;
offset = macb_tx_ring_wrap(queue->bp, index) *
- sizeof(struct macb_dma_desc);
+ macb_dma_desc_get_size(queue->bp);
return queue->tx_ring_dma + offset;
}
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
{
- return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
+ index = macb_rx_ring_wrap(bp, index);
+ index = macb_adj_dma_desc_idx(bp, index);
+ return &bp->rx_ring[index];
}
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
}
-static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
{
- desc->addr = (u32)addr;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- desc->addrh = (u32)(addr >> 32);
+ struct macb_dma_desc_64 *desc_64;
+
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ desc_64 = macb_64b_desc(bp, desc);
+ desc_64->addrh = upper_32_bits(addr);
+ }
+#endif
+ desc->addr = lower_32_bits(addr);
+}
+
+static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+{
+ dma_addr_t addr = 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ struct macb_dma_desc_64 *desc_64;
+
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ desc_64 = macb_64b_desc(bp, desc);
+ addr = ((u64)(desc_64->addrh) << 32);
+ }
#endif
+ addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+ return addr;
}
static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
- macb_set_addr(desc, 0);
+ macb_set_addr(bp, desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
- queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
+ struct macb_dma_desc *desc;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
rmb();
bp->rx_prepared_head++;
+ desc = macb_rx_desc(bp, entry);
if (!bp->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
- macb_set_addr(&(bp->rx_ring[entry]), paddr);
- bp->rx_ring[entry].ctrl = 0;
+ macb_set_addr(bp, desc, paddr);
+ desc->ctrl = 0;
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
} else {
- bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
- bp->rx_ring[entry].ctrl = 0;
+ desc->addr &= ~MACB_BIT(RX_USED);
+ desc->ctrl = 0;
}
}
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
bool rxused;
entry = macb_rx_ring_wrap(bp, bp->rx_tail);
- desc = &bp->rx_ring[entry];
+ desc = macb_rx_desc(bp, entry);
/* Make hw descriptor updates visible to CPU */
rmb();
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
- addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- addr |= ((u64)(desc->addrh) << 32);
-#endif
+ addr = macb_get_addr(bp, desc);
ctrl = desc->ctrl;
if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
static inline void macb_init_rx_ring(struct macb *bp)
{
dma_addr_t addr;
+ struct macb_dma_desc *desc = NULL;
int i;
addr = bp->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) {
- bp->rx_ring[i].addr = addr;
- bp->rx_ring[i].ctrl = 0;
+ desc = macb_rx_desc(bp, i);
+ macb_set_addr(bp, desc, addr);
+ desc->ctrl = 0;
addr += bp->rx_buffer_size;
}
- bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+ desc->addr |= MACB_BIT(RX_WRAP);
bp->rx_tail = 0;
}
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
for (tail = bp->rx_tail; budget > 0; tail++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
- u32 addr, ctrl;
+ u32 ctrl;
/* Make hw descriptor updates visible to CPU */
rmb();
- addr = desc->addr;
ctrl = desc->ctrl;
- if (!(addr & MACB_BIT(RX_USED)))
+ if (!(desc->addr & MACB_BIT(RX_USED)))
break;
if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1090,7 +1146,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = bp->macbgem_ops.mog_rx(bp, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Packets received while interrupts were disabled */
status = macb_readl(bp, RSR);
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
i = tx_head;
entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
- desc = &queue->tx_ring[entry];
+ desc = macb_tx_desc(queue, entry);
desc->ctrl = ctrl;
if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
i--;
entry = macb_tx_ring_wrap(bp, i);
tx_skb = &queue->tx_skb[entry];
- desc = &queue->tx_ring[entry];
+ desc = macb_tx_desc(queue, entry);
ctrl = (u32)tx_skb->size;
if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
/* Set TX buffer descriptor */
- macb_set_addr(desc, tx_skb->mapping);
+ macb_set_addr(bp, desc, tx_skb->mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
if (!skb)
continue;
- desc = &bp->rx_ring[i];
- addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- addr |= ((u64)(desc->addrh) << 32);
-#endif
+ desc = macb_rx_desc(bp, i);
+ addr = macb_get_addr(bp, desc);
+
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
+ struct macb_dma_desc *desc = NULL;
unsigned int q;
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
for (i = 0; i < bp->tx_ring_size; i++) {
- queue->tx_ring[i].addr = 0;
- queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ desc = macb_tx_desc(queue, i);
+ macb_set_addr(bp, desc, 0);
+ desc->ctrl = MACB_BIT(TX_USED);
}
- queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+ desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
}
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
static void macb_init_rings(struct macb *bp)
{
int i;
+ struct macb_dma_desc *desc = NULL;
macb_init_rx_ring(bp);
for (i = 0; i < bp->tx_ring_size; i++) {
- bp->queues[0].tx_ring[i].addr = 0;
- bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ desc = macb_tx_desc(&bp->queues[0], i);
+ macb_set_addr(bp, desc, 0);
+ desc->ctrl = MACB_BIT(TX_USED);
}
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
- bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+ desc->ctrl |= MACB_BIT(TX_WRAP);
}
static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- dmacfg |= GEM_BIT(ADDR64);
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ dmacfg |= GEM_BIT(ADDR64);
#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+ macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Enable interrupts */
@@ -2085,6 +2146,9 @@ static int macb_open(struct net_device *dev)
netif_tx_start_all_queues(dev);
+ if (bp->ptp_info)
+ bp->ptp_info->ptp_init(dev);
+
return 0;
}
@@ -2106,6 +2170,9 @@ static int macb_close(struct net_device *dev)
macb_free_consistent(bp);
+ if (bp->ptp_info)
+ bp->ptp_info->ptp_remove(dev);
+
return 0;
}
@@ -2379,6 +2446,17 @@ static int macb_set_ringparam(struct net_device *netdev,
return 0;
}
+static int macb_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ if (bp->ptp_info)
+ return bp->ptp_info->get_ts_info(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -2396,7 +2474,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = macb_get_ts_info,
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
@@ -2409,6 +2487,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct phy_device *phydev = dev->phydev;
+ struct macb *bp = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
@@ -2416,7 +2495,17 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
- return phy_mii_ioctl(phydev, rq, cmd);
+ if (!bp->ptp_info)
+ return phy_mii_ioctl(phydev, rq, cmd);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bp->ptp_info->set_hwtst(dev, rq, cmd);
+ case SIOCGHWTSTAMP:
+ return bp->ptp_info->get_hwtst(dev, rq);
+ default:
+ return phy_mii_ioctl(phydev, rq, cmd);
+ }
}
static int macb_set_features(struct net_device *netdev,
@@ -2627,7 +2716,8 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue->TBQPH = GEM_TBQPH(hw_q -1);
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue->TBQPH = GEM_TBQPH(hw_q - 1);
#endif
} else {
/* queue0 uses legacy registers */
@@ -2637,7 +2727,8 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue->TBQPH = MACB_TBQPH;
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue->TBQPH = MACB_TBQPH;
#endif
}
@@ -2730,13 +2821,14 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
- sizeof(struct macb_dma_desc)),
+ macb_dma_desc_get_size(lp)),
&lp->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring)
return -ENOMEM;
@@ -2748,7 +2840,7 @@ static int at91ether_start(struct net_device *dev)
if (!lp->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
- sizeof(struct macb_dma_desc),
+ macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
@@ -2756,13 +2848,14 @@ static int at91ether_start(struct net_device *dev)
addr = lp->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
- lp->rx_ring[i].addr = addr;
- lp->rx_ring[i].ctrl = 0;
+ desc = macb_rx_desc(lp, i);
+ macb_set_addr(lp, desc, addr);
+ desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ;
}
/* Set the Wrap bit on the last descriptor */
- lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+ desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
lp->rx_tail = 0;
@@ -2834,7 +2927,7 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
- sizeof(struct macb_dma_desc),
+ macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
@@ -2885,13 +2978,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_dma_desc *desc;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+ desc = macb_rx_desc(lp, lp->rx_tail);
+ while (desc->addr & MACB_BIT(RX_USED)) {
p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
- pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+ pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
skb_reserve(skb, 2);
@@ -2905,17 +3000,19 @@ static void at91ether_rx(struct net_device *dev)
lp->stats.rx_dropped++;
}
- if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+ if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
lp->stats.multicast++;
/* reset ownership bit */
- lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+ desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */
if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
lp->rx_tail = 0;
else
lp->rx_tail++;
+
+ desc = macb_rx_desc(lp, lp->rx_tail);
}
}
@@ -3211,8 +3308,11 @@ static int macb_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+ if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ bp->hw_dma_cap = HW_DMA_CAP_64B;
+ } else
+ bp->hw_dma_cap = HW_DMA_CAP_32B;
#endif
spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d67adad67be1..234a49eaccfd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -10,6 +10,8 @@
#ifndef _MACB_H
#define _MACB_H
+#include <linux/phy.h>
+
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
#define MACB_MAX_QUEUES 8
@@ -131,6 +133,20 @@
#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */
#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */
#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */
+#define GEM_TISUBN 0x01bc /* 1588 Timer Increment Sub-ns */
+#define GEM_TSH 0x01c0 /* 1588 Timer Seconds High */
+#define GEM_TSL 0x01d0 /* 1588 Timer Seconds Low */
+#define GEM_TN 0x01d4 /* 1588 Timer Nanoseconds */
+#define GEM_TA 0x01d8 /* 1588 Timer Adjust */
+#define GEM_TI 0x01dc /* 1588 Timer Increment */
+#define GEM_EFTSL 0x01e0 /* PTP Event Frame Tx Seconds Low */
+#define GEM_EFTN 0x01e4 /* PTP Event Frame Tx Nanoseconds */
+#define GEM_EFRSL 0x01e8 /* PTP Event Frame Rx Seconds Low */
+#define GEM_EFRN 0x01ec /* PTP Event Frame Rx Nanoseconds */
+#define GEM_PEFTSL 0x01f0 /* PTP Peer Event Frame Tx Secs Low */
+#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */
+#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */
+#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -174,6 +190,7 @@
#define MACB_NCR_TPF_SIZE 1
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
+#define MACB_SRTSM_OFFSET 15
/* Bitfields in NCFGR */
#define MACB_SPD_OFFSET 0 /* Speed */
@@ -319,6 +336,32 @@
#define MACB_PTZ_SIZE 1
#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */
#define MACB_WOL_SIZE 1
+#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */
+#define MACB_DRQFR_SIZE 1
+#define MACB_SFR_OFFSET 19 /* PTP Sync Frame Received */
+#define MACB_SFR_SIZE 1
+#define MACB_DRQFT_OFFSET 20 /* PTP Delay Request Frame Transmitted */
+#define MACB_DRQFT_SIZE 1
+#define MACB_SFT_OFFSET 21 /* PTP Sync Frame Transmitted */
+#define MACB_SFT_SIZE 1
+#define MACB_PDRQFR_OFFSET 22 /* PDelay Request Frame Received */
+#define MACB_PDRQFR_SIZE 1
+#define MACB_PDRSFR_OFFSET 23 /* PDelay Response Frame Received */
+#define MACB_PDRSFR_SIZE 1
+#define MACB_PDRQFT_OFFSET 24 /* PDelay Request Frame Transmitted */
+#define MACB_PDRQFT_SIZE 1
+#define MACB_PDRSFT_OFFSET 25 /* PDelay Response Frame Transmitted */
+#define MACB_PDRSFT_SIZE 1
+#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */
+#define MACB_SRI_SIZE 1
+
+/* Timer increment fields */
+#define MACB_TI_CNS_OFFSET 0
+#define MACB_TI_CNS_SIZE 8
+#define MACB_TI_ACNS_OFFSET 8
+#define MACB_TI_ACNS_SIZE 8
+#define MACB_TI_NIT_OFFSET 16
+#define MACB_TI_NIT_SIZE 8
/* Bitfields in MAN */
#define MACB_DATA_OFFSET 0 /* data */
@@ -385,7 +428,20 @@
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
+#define GEM_DAW64_OFFSET 23
+#define GEM_DAW64_SIZE 1
+
+/* Bitfields in TISUBN */
+#define GEM_SUBNSINCR_OFFSET 0
+#define GEM_SUBNSINCR_SIZE 16
+
+/* Bitfields in TI */
+#define GEM_NSINCR_OFFSET 0
+#define GEM_NSINCR_SIZE 8
+/* Bitfields in ADJ */
+#define GEM_ADDSUB_OFFSET 31
+#define GEM_ADDSUB_SIZE 1
/* Constants for CLK */
#define MACB_CLK_DIV8 0
#define MACB_CLK_DIV16 1
@@ -413,6 +469,7 @@
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
#define MACB_CAPS_USRIO_DISABLED 0x00000010
#define MACB_CAPS_JUMBO 0x00000020
+#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -487,11 +544,19 @@
struct macb_dma_desc {
u32 addr;
u32 ctrl;
+};
+
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- u32 addrh;
- u32 resvd;
-#endif
+enum macb_hw_dma_cap {
+ HW_DMA_CAP_32B,
+ HW_DMA_CAP_64B,
+};
+
+struct macb_dma_desc_64 {
+ u32 addrh;
+ u32 resvd;
};
+#endif
/* DMA descriptor bitfields */
#define MACB_RX_USED_OFFSET 0
@@ -782,6 +847,20 @@ struct macb_or_gem_ops {
int (*mog_rx)(struct macb *bp, int budget);
};
+/* MACB-PTP interface: adapt to platform needs. */
+struct macb_ptp_info {
+ void (*ptp_init)(struct net_device *ndev);
+ void (*ptp_remove)(struct net_device *ndev);
+ s32 (*get_ptp_max_adj)(void);
+ unsigned int (*get_tsu_rate)(struct macb *bp);
+ int (*get_ts_info)(struct net_device *dev,
+ struct ethtool_ts_info *info);
+ int (*get_hwtst)(struct net_device *netdev,
+ struct ifreq *ifr);
+ int (*set_hwtst)(struct net_device *netdev,
+ struct ifreq *ifr, int cmd);
+};
+
struct macb_config {
u32 caps;
unsigned int dma_burst_length;
@@ -874,6 +953,11 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
+
+ struct macb_ptp_info *ptp_info; /* macb-ptp interface */
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ enum macb_hw_dma_cap hw_dma_cap;
+#endif
};
static inline bool macb_is_gem(struct macb *bp)
@@ -881,4 +965,9 @@ static inline bool macb_is_gem(struct macb *bp)
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
}
+static inline bool gem_has_ptp(struct macb *bp)
+{
+ return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
+}
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index ce7de6f72512..2bd7c638b178 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
work_done = xgmac_rx(priv, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
__raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
return work_done;
@@ -1446,9 +1446,9 @@ static void xgmac_poll_controller(struct net_device *dev)
}
#endif
-static struct rtnl_link_stats64 *
+static void
xgmac_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *storage)
{
struct xgmac_priv *priv = netdev_priv(dev);
void __iomem *base = priv->base;
@@ -1476,7 +1476,6 @@ xgmac_get_stats64(struct net_device *dev,
writel(0, base + XGMAC_MMC_CTRL);
spin_unlock_bh(&priv->stats_lock);
- return storage;
}
static int xgmac_set_mac_address(struct net_device *dev, void *p)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index b00c3002360e..50384cede8be 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -296,12 +296,16 @@ lio_ethtool_get_channels(struct net_device *dev,
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) {
- struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
- max_rx = CFG_GET_OQ_MAX_Q(conf23);
- max_tx = CFG_GET_IQ_MAX_Q(conf23);
- rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
- tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
+ max_rx = oct->sriov_info.num_pf_rings;
+ max_tx = oct->sriov_info.num_pf_rings;
+ rx_count = lio->linfo.num_rxpciq;
+ tx_count = lio->linfo.num_txpciq;
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ max_tx = oct->sriov_info.rings_per_vf;
+ max_rx = oct->sriov_info.rings_per_vf;
+ rx_count = lio->linfo.num_rxpciq;
+ tx_count = lio->linfo.num_txpciq;
}
channel->max_rx = max_rx;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 39a9665c9d00..be9c0e3f5ade 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -15,6 +15,7 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <net/vxlan.h>
@@ -2223,25 +2224,6 @@ static void if_cfg_callback(struct octeon_device *oct,
wake_up_interruptible(&ctx->wc);
}
-/**
- * \brief Select queue based on hash
- * @param dev Net device
- * @param skb sk_buff structure
- * @returns selected queue number
- */
-static u16 select_q(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv __attribute__((unused)),
- select_queue_fallback_t fallback __attribute__((unused)))
-{
- u32 qindex = 0;
- struct lio *lio;
-
- lio = GET_LIO(dev);
- qindex = skb_tx_hash(dev, skb);
-
- return (u16)(qindex % (lio->linfo.num_txpciq));
-}
-
/** Routine to push packets arriving on Octeon interface upto network layer.
* @param oct_id - octeon device id.
* @param skbuff - skbuff struct to be passed to network layer.
@@ -2263,6 +2245,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
struct skb_shared_hwtstamps *shhwtstamps;
u64 ns;
u16 vtag = 0;
+ u32 r_dh_off;
struct net_device *netdev = (struct net_device *)arg;
struct octeon_droq *droq = container_of(param, struct octeon_droq,
napi);
@@ -2308,6 +2291,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
put_page(pg_info->page);
}
+ r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
+
if (((oct->chip_id == OCTEON_CN66XX) ||
(oct->chip_id == OCTEON_CN68XX)) &&
ptp_enable) {
@@ -2320,16 +2305,27 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
/* Nanoseconds are in the first 64-bits
* of the packet.
*/
- memcpy(&ns, (skb->data), sizeof(ns));
+ memcpy(&ns, (skb->data + r_dh_off),
+ sizeof(ns));
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp =
ns_to_ktime(ns +
lio->ptp_adjust);
}
- skb_pull(skb, sizeof(ns));
}
}
+ if (rh->r_dh.has_hash) {
+ __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
+ u32 hash = be32_to_cpu(*hash_be);
+
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
+ }
+
+ skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
+
skb->protocol = eth_type_trans(skb, skb->dev);
if ((netdev->features & NETIF_F_RXCSUM) &&
(((rh->r_dh.encap_on) &&
@@ -2365,7 +2361,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
if (packet_was_received) {
droq->stats.rx_bytes_received += len;
droq->stats.rx_pkts_received++;
- netdev->last_rx = jiffies;
} else {
droq->stats.rx_dropped++;
netif_info(lio, rx_err, lio->netdev,
@@ -2441,7 +2436,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
iq = oct->instr_queue[iq_no];
if (iq) {
/* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, 1, budget);
+ tx_done = octeon_flush_iq(oct, iq, budget);
/* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false.
*/
@@ -2451,8 +2446,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
__func__, iq_no);
}
- if ((work_done < budget) && (tx_done)) {
- napi_complete(napi);
+ /* force enable interrupt if reg cnts are high to avoid wraparound */
+ if ((work_done < budget && tx_done) ||
+ (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
+ (droq->pkt_count >= MAX_REG_CNT)) {
+ tx_done = 1;
+ napi_complete_done(napi, work_done);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
@@ -2629,7 +2628,9 @@ static int liquidio_open(struct net_device *netdev)
oct->droq[0]->ops.poll_mode = 1;
}
- oct_ptp_open(netdev);
+ if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) &&
+ ptp_enable)
+ oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
@@ -2677,13 +2678,7 @@ static int liquidio_stop(struct net_device *netdev)
lio->linfo.link.s.link_up = 0;
lio->link_changes++;
- /* Pause for a moment and wait for Octeon to flush out (to the wire) any
- * egress packets that are in-flight.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(100));
-
- /* Now it should be safe to tell Octeon that nic interface is down. */
+ /* Tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
if (OCTEON_CN23XX_PF(oct)) {
@@ -2973,9 +2968,13 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
*/
static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
+ struct lio *lio = GET_LIO(netdev);
+
switch (cmd) {
case SIOCSHWTSTAMP:
- return hwtstamp_ioctl(netdev, ifr);
+ if ((lio->oct_dev->chip_id == OCTEON_CN66XX ||
+ lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable)
+ return hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -3322,11 +3321,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_trans_update(netdev);
- if (skb_shinfo(skb)->gso_size)
- stats->tx_done += skb_shinfo(skb)->gso_segs;
+ if (tx_info->s.gso_segs)
+ stats->tx_done += tx_info->s.gso_segs;
else
stats->tx_done++;
- stats->tx_tot_bytes += skb->len;
+ stats->tx_tot_bytes += ndata.datasize;
return NETDEV_TX_OK;
@@ -3741,7 +3740,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_vlan = liquidio_set_vf_vlan,
.ndo_get_vf_config = liquidio_get_vf_config,
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
- .ndo_select_queue = select_q
};
/** \brief Entry point for the liquidio module
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 70d96c10c673..9d5e03502c76 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -15,6 +15,7 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
+#include <linux/module.h>
#include <linux/pci.h>
#include <net/vxlan.h>
#include "liquidio_common.h"
@@ -1455,26 +1456,6 @@ static void if_cfg_callback(struct octeon_device *oct,
wake_up_interruptible(&ctx->wc);
}
-/**
- * \brief Select queue based on hash
- * @param dev Net device
- * @param skb sk_buff structure
- * @returns selected queue number
- */
-static u16 select_q(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv __attribute__((unused)),
- select_queue_fallback_t fallback __attribute__((unused)))
-{
- struct lio *lio;
- u32 qindex;
-
- lio = GET_LIO(dev);
-
- qindex = skb_tx_hash(dev, skb);
-
- return (u16)(qindex % (lio->linfo.num_txpciq));
-}
-
/** Routine to push packets arriving on Octeon interface upto network layer.
* @param oct_id - octeon device id.
* @param skbuff - skbuff struct to be passed to network layer.
@@ -1497,6 +1478,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
struct net_device *netdev = (struct net_device *)arg;
struct sk_buff *skb = (struct sk_buff *)skbuff;
u16 vtag = 0;
+ u32 r_dh_off;
if (netdev) {
struct lio *lio = GET_LIO(netdev);
@@ -1540,7 +1522,20 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
put_page(pg_info->page);
}
- skb_pull(skb, rh->r_dh.len * 8);
+ r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
+
+ if (rh->r_dh.has_hwtstamp)
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
+
+ if (rh->r_dh.has_hash) {
+ __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
+ u32 hash = be32_to_cpu(*hash_be);
+
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
+ }
+
+ skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
skb->protocol = eth_type_trans(skb, skb->dev);
if ((netdev->features & NETIF_F_RXCSUM) &&
@@ -1577,7 +1572,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
if (packet_was_received) {
droq->stats.rx_bytes_received += len;
droq->stats.rx_pkts_received++;
- netdev->last_rx = jiffies;
} else {
droq->stats.rx_dropped++;
netif_info(lio, rx_err, lio->netdev,
@@ -1627,7 +1621,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
iq = oct->instr_queue[iq_no];
if (iq) {
/* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, 1, budget);
+ tx_done = octeon_flush_iq(oct, iq, budget);
/* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false.
*/
@@ -1637,8 +1631,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
__func__, iq_no);
}
- if ((work_done < budget) && (tx_done)) {
- napi_complete(napi);
+ /* force enable interrupt if reg cnts are high to avoid wraparound */
+ if ((work_done < budget && tx_done) ||
+ (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
+ (droq->pkt_count >= MAX_REG_CNT)) {
+ tx_done = 1;
+ napi_complete_done(napi, work_done);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
@@ -2440,11 +2438,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_trans_update(netdev);
- if (skb_shinfo(skb)->gso_size)
- stats->tx_done += skb_shinfo(skb)->gso_segs;
+ if (tx_info->s.gso_segs)
+ stats->tx_done += tx_info->s.gso_segs;
else
stats->tx_done++;
- stats->tx_tot_bytes += skb->len;
+ stats->tx_tot_bytes += ndata.datasize;
return NETDEV_TX_OK;
@@ -2703,7 +2701,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_features = liquidio_set_features,
.ndo_udp_tunnel_add = liquidio_add_vxlan_port,
.ndo_udp_tunnel_del = liquidio_del_vxlan_port,
- .ndo_select_queue = select_q,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index ba329f6ca779..294c6f3c6b48 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -98,6 +98,9 @@ enum octeon_tag_type {
#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2)
#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1)
+#define BYTES_PER_DHLEN_UNIT 8
+#define MAX_REG_CNT 2000000U
+
static inline u32 incr_index(u32 index, u32 count, u32 max)
{
if ((index + count) >= max)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index 1cb3514fc949..b3dc2e9651a8 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -429,15 +429,11 @@ struct octeon_config {
/* The following config values are fixed and should not be modified. */
-/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */
-#define MAX_BAR1_MAP_INDEX 2
+#define BAR1_INDEX_DYNAMIC_MAP 2
+#define BAR1_INDEX_STATIC_MAP 15
#define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024)
-/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access.
- * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access.
- */
-#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \
- OCTEON_BAR1_ENTRY_SIZE)
+#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE)
/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
* NoResponse Lists are now maintained with each IQ. (Dec' 2007).
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 3265e0b7923e..53f38d05f7c2 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -18,6 +18,7 @@
/**
* @file octeon_console.c
*/
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/crc32.h>
@@ -549,6 +550,16 @@ int octeon_init_consoles(struct octeon_device *oct)
return ret;
}
+ /* Dedicate one of Octeon's BAR1 index registers to create a static
+ * mapping to a region of Octeon DRAM that contains the PCI console
+ * named block.
+ */
+ oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP;
+ oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index,
+ true);
+ oct->console_nb_info.dram_region_base = addr
+ & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL);
+
/* num_consoles > 0, is an indication that the consoles
* are accessible
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index a8df493a5012..9675ffbf25e6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
spin_lock_bh(&droq->lock);
writel(droq->pkt_count, droq->pkts_sent_reg);
droq->pkt_count = 0;
+ /* this write needs to be flushed before we release the lock */
+ mmiowb();
spin_unlock_bh(&droq->lock);
oct = droq->oct_dev;
}
@@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
spin_lock_bh(&iq->lock);
writel(iq->pkt_in_done, iq->inst_cnt_reg);
iq->pkt_in_done = 0;
+ /* this write needs to be flushed before we release the lock */
+ mmiowb();
spin_unlock_bh(&iq->lock);
oct = iq->oct_dev;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 18f6836250a6..c301a3852482 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -477,6 +477,12 @@ struct octeon_device {
/* Console caches */
struct octeon_console console[MAX_OCTEON_MAPS];
+ /* Console named block info */
+ struct {
+ u64 dram_region_base;
+ int bar1_index;
+ } console_nb_info;
+
/* Coprocessor clock rate. */
u64 coproc_clock_rate;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index e04ca8f0b4a7..4608a5af35a3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -369,5 +369,5 @@ int octeon_setup_iq(struct octeon_device *oct, int ifidx,
void *app_ctx);
int
octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
- u32 pending_thresh, u32 napi_budget);
+ u32 napi_budget);
#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 73696b427f06..201b9875f9bb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct,
{
struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no];
u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS;
+ long timeout = LIO_MBOX_WRITE_WAIT_TIME;
unsigned long flags;
spin_lock_irqsave(&mbox->lock, flags);
@@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct,
count = 0;
while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) {
- schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME);
+ schedule_timeout_uninterruptible(timeout);
if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
ret = OCTEON_MBOX_STATUS_FAILED;
break;
@@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct,
count = 0;
while (readq(mbox->mbox_write_reg) !=
OCTEON_PFVFACK) {
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(timeout);
if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
ret = OCTEON_MBOX_STATUS_FAILED;
break;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index fe60a3e6247b..c9376fe075bc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -31,8 +31,8 @@
#define OCTEON_PFVFSIG 0x1122334455667788
#define OCTEON_PFVFERR 0xDEADDEADDEADDEAD
-#define LIO_MBOX_WRITE_WAIT_CNT 1000
-#define LIO_MBOX_WRITE_WAIT_TIME 10
+#define LIO_MBOX_WRITE_WAIT_CNT 1000
+#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
enum octeon_mbox_cmd_status {
OCTEON_MBOX_STATUS_SUCCESS = 0,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 13a18c9a7a51..5cd96e7d426c 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -23,7 +23,7 @@
#include "response_manager.h"
#include "octeon_device.h"
-#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
+#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP
#ifdef __BIG_ENDIAN_BITFIELD
static inline void
@@ -96,6 +96,25 @@ __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
u32 copy_len = 0, index_reg_val = 0;
unsigned long flags;
u8 __iomem *mapped_addr;
+ u64 static_mapping_base;
+
+ static_mapping_base = oct->console_nb_info.dram_region_base;
+
+ if (static_mapping_base &&
+ static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) {
+ int bar1_index = oct->console_nb_info.bar1_index;
+
+ mapped_addr = oct->mmio[1].hw_addr
+ + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE))
+ + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL));
+
+ if (op)
+ octeon_pci_fastread(oct, mapped_addr, hostbuf, len);
+ else
+ octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len);
+
+ return;
+ }
spin_lock_irqsave(&oct->mem_access_lock, flags);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index c3d6a8228362..0243be8dd56f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -49,7 +49,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
/* Add in the response related fields. Opcode and Param are already
* there.
*/
- if (OCTEON_CN23XX_PF(oct)) {
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
@@ -70,7 +70,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
*sc->status_word = COMPLETION_WORD_INIT;
- if (OCTEON_CN23XX_PF(oct))
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
sc->cmd.cmd3.rptr = sc->dmarptr;
else
sc->cmd.cmd2.rptr = sc->dmarptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 3ce66759e80a..707bc15adec6 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -455,7 +455,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
/* Can only be called from process context */
int
octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
- u32 pending_thresh, u32 napi_budget)
+ u32 napi_budget)
{
u32 inst_processed = 0;
u32 tot_inst_processed = 0;
@@ -468,33 +468,32 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
- if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
- do {
- /* Process any outstanding IQ packets. */
- if (iq->flush_index == iq->octeon_read_index)
- break;
-
- if (napi_budget)
- inst_processed = lio_process_iq_request_list
- (oct, iq,
- napi_budget - tot_inst_processed);
- else
- inst_processed =
- lio_process_iq_request_list(oct, iq, 0);
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->octeon_read_index)
+ break;
- if (inst_processed) {
- atomic_sub(inst_processed, &iq->instr_pending);
- iq->stats.instr_processed += inst_processed;
- }
+ if (napi_budget)
+ inst_processed =
+ lio_process_iq_request_list(oct, iq,
+ napi_budget -
+ tot_inst_processed);
+ else
+ inst_processed =
+ lio_process_iq_request_list(oct, iq, 0);
+
+ if (inst_processed) {
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
- tot_inst_processed += inst_processed;
- inst_processed = 0;
+ tot_inst_processed += inst_processed;
+ inst_processed = 0;
- } while (tot_inst_processed < napi_budget);
+ } while (tot_inst_processed < napi_budget);
- if (napi_budget && (tot_inst_processed >= napi_budget))
- tx_done = 0;
- }
+ if (napi_budget && (tot_inst_processed >= napi_budget))
+ tx_done = 0;
iq->last_db_time = jiffies;
@@ -530,7 +529,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
iq->last_db_time = jiffies;
/* Flush the instruction queue */
- octeon_flush_iq(oct, iq, 1, 0);
+ octeon_flush_iq(oct, iq, 0);
lio_enable_irq(NULL, iq);
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 21f80f5744ba..a2138686c605 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
/* We stopped because no more packets were available. */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
octeon_mgmt_enable_rx_irq(p);
}
octeon_mgmt_update_rx_stats(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 2e74bbaa38e1..02a986cdbb39 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
- ring->rx_max_pending = MAX_RCV_BUF_COUNT;
- ring->rx_pending = qs->rbdr_len;
+ ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
+ ring->rx_pending = qs->cq_len;
ring->tx_max_pending = MAX_SND_QUEUE_LEN;
ring->tx_pending = qs->sq_len;
}
+static int nicvf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ u32 rx_count, tx_count;
+
+ /* Due to HW errata this is not supported on T88 pass 1.x silicon */
+ if (pass1_silicon(nic->pdev))
+ return -EINVAL;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ tx_count = clamp_t(u32, ring->tx_pending,
+ MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
+ rx_count = clamp_t(u32, ring->rx_pending,
+ MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
+
+ if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
+ return 0;
+
+ /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
+ qs->sq_len = rounddown_pow_of_two(tx_count);
+ qs->cq_len = rounddown_pow_of_two(rx_count);
+
+ if (netif_running(netdev)) {
+ nicvf_stop(netdev);
+ nicvf_open(netdev);
+ }
+
+ return 0;
+}
+
static int nicvf_get_rss_hash_opts(struct nicvf *nic,
struct ethtool_rxnfc *info)
{
@@ -635,7 +669,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
}
static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, u8 hfunc)
+ const u8 *hkey, const u8 hfunc)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
@@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_regs = nicvf_get_regs,
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
+ .set_ringparam = nicvf_set_ringparam,
.get_rxnfc = nicvf_get_rxnfc,
.set_rxnfc = nicvf_set_rxnfc,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 2006f58b14b1..6feaa24bcfd4 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
/* Slow packet rate, exit polling */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Re-enable interrupts */
cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
cq->cq_idx);
@@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev)
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
- if (nicvf_update_hw_max_frs(nic, netdev->mtu))
+ err = nicvf_update_hw_max_frs(nic, netdev->mtu);
+ if (err)
goto cleanup;
/* Clear percpu stats */
@@ -1461,8 +1462,8 @@ void nicvf_update_stats(struct nicvf *nic)
nicvf_update_sq_stats(nic, qidx);
}
-static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void nicvf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct nicvf *nic = netdev_priv(netdev);
struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
@@ -1478,7 +1479,6 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
stats->tx_packets = hw_stats->tx_frames;
stats->tx_dropped = hw_stats->tx_drops;
- return stats;
}
static void nicvf_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d2ac133e36f1..ac0390be3b12 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
cq_cfg.ena = 1;
cq_cfg.reset = 0;
cq_cfg.caching = 0;
- cq_cfg.qsize = CMP_QSIZE;
+ cq_cfg.qsize = ilog2(qs->cq_len >> 10);
cq_cfg.avg_con = 0;
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
@@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
- sq_cfg.qsize = SND_QSIZE;
+ sq_cfg.qsize = ilog2(qs->sq_len >> 10);
sq_cfg.tstmp_bgx_intf = 0;
- sq_cfg.cq_limit = 0;
+ /* CQ's level at which HW will stop processing SQEs to avoid
+ * transmitting a pkt with no space in CQ to post CQE_TX.
+ */
+ sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
/* Set threshold value for interrupt generation */
@@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
{
bool disable = false;
struct queue_set *qs = nic->qs;
+ struct queue_set *pqs = nic->pnicvf->qs;
int qidx;
if (!qs)
return 0;
+ /* Take primary VF's queue lengths.
+ * This is needed to take queue lengths set from ethtool
+ * into consideration.
+ */
+ if (nic->sqs_mode && pqs) {
+ qs->cq_len = pqs->cq_len;
+ qs->sq_len = pqs->sq_len;
+ }
+
if (enable) {
if (nicvf_alloc_resources(nic))
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 9e2104675bc9..5cb84da99a2d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -59,8 +59,9 @@
/* Default queue count per QS, its lengths and threshold values */
#define DEFAULT_RBDR_CNT 1
-#define SND_QSIZE SND_QUEUE_SIZE2
+#define SND_QSIZE SND_QUEUE_SIZE0
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
+#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10))
#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2ULL
#define MIN_SQ_DESC_PER_PKT_XMIT 2
@@ -70,11 +71,18 @@
/* Keep CQ and SQ sizes same, if timestamping
* is enabled this equation will change.
*/
-#define CMP_QSIZE CMP_QUEUE_SIZE2
+#define CMP_QSIZE CMP_QUEUE_SIZE0
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
+#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10))
+#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10))
#define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
+/* No of CQEs that might anyway gets used by HW due to pipelining
+ * effects irrespective of PASS/DROP/LEVELS being configured
+ */
+#define CMP_QUEUE_PIPELINE_RSVD 544
+
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
@@ -93,8 +101,8 @@
* RED accepts pkt if unused CQE < 2304 & >= 2560
* DROPs pkts if unused CQE < 2304
*/
-#define RQ_PASS_CQ_LVL 160ULL
-#define RQ_DROP_CQ_LVL 144ULL
+#define RQ_PASS_CQ_LVL 192ULL
+#define RQ_DROP_CQ_LVL 184ULL
/* RED and Backpressure levels of RBDR for pkt reception
* For RBDR, level is a measure of fullness i.e 0x0 means empty
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9211c750e064..4c8e8cf730bb 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -31,6 +31,7 @@ struct lmac {
u8 lmac_type;
u8 lane_to_sds;
bool use_training;
+ bool autoneg;
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -47,8 +48,9 @@ struct lmac {
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
- int lmac_count;
+ u8 lmac_count;
u8 max_lmac;
+ u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
@@ -460,7 +462,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
/* power down, reset autoneg, autoneg enable */
cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
cfg &= ~PCS_MRX_CTL_PWR_DN;
- cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ cfg |= PCS_MRX_CTL_RST_AN;
+ if (lmac->phydev) {
+ cfg |= PCS_MRX_CTL_AN_EN;
+ } else {
+ /* In scenarios where PHY driver is not present or it's a
+ * non-standard PHY, FW sets AN_EN to inform Linux driver
+ * to do auto-neg and link polling or not.
+ */
+ if (cfg & PCS_MRX_CTL_AN_EN)
+ lmac->autoneg = true;
+ }
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
if (lmac->lmac_type == BGX_MODE_QSGMII) {
@@ -471,7 +483,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
return 0;
}
- if (lmac->lmac_type == BGX_MODE_SGMII) {
+ if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
PCS_MRX_STATUS_AN_CPT, false)) {
dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@@ -677,12 +689,71 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
+static void bgx_poll_for_sgmii_link(struct lmac *lmac)
+{
+ u64 pcs_link, an_result;
+ u8 speed;
+
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ /*Link state bit is sticky, read it again*/
+ if (!(pcs_link & PCS_MRX_STATUS_LINK))
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ goto next_poll;
+ }
+
+ lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
+ an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_ANX_AN_RESULTS);
+
+ speed = (an_result >> 3) & 0x3;
+ lmac->last_duplex = (an_result >> 1) & 0x1;
+ switch (speed) {
+ case 0:
+ lmac->last_speed = 10;
+ break;
+ case 1:
+ lmac->last_speed = 100;
+ break;
+ case 2:
+ lmac->last_speed = 1000;
+ break;
+ default:
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+
+next_poll:
+
+ if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up)
+ bgx_sgmii_change_link_state(lmac);
+ lmac->last_link = lmac->link_up;
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
+}
+
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
+ if (lmac->is_sgmii) {
+ bgx_poll_for_sgmii_link(lmac);
+ return;
+ }
/* Receive link is latching low. Force it high and verify it */
bgx_reg_modify(lmac->bgx, lmac->lmacid,
@@ -774,9 +845,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
(lmac->lmac_type != BGX_MODE_XLAUI) &&
(lmac->lmac_type != BGX_MODE_40G_KR) &&
(lmac->lmac_type != BGX_MODE_10G_KR)) {
- if (!lmac->phydev)
- return -ENODEV;
-
+ if (!lmac->phydev) {
+ if (lmac->autoneg) {
+ bgx_reg_write(bgx, lmacid,
+ BGX_GMP_PCS_LINKX_TIMER,
+ PCS_LINKX_TIMER_COUNT);
+ goto poll;
+ } else {
+ /* Default to below link speed and duplex */
+ lmac->link_up = true;
+ lmac->last_speed = 1000;
+ lmac->last_duplex = 1;
+ bgx_sgmii_change_link_state(lmac);
+ return 0;
+ }
+ }
lmac->phydev->dev_flags = 0;
if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@@ -785,15 +868,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
return -ENODEV;
phy_start_aneg(lmac->phydev);
- } else {
- lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
- if (!lmac->check_link)
- return -ENOMEM;
- INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
- queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+ return 0;
}
+poll:
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+
return 0;
}
@@ -893,17 +978,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
struct device *dev = &bgx->pdev->dev;
struct lmac *lmac;
char str[20];
- u8 dlm;
- if (lmacid > bgx->max_lmac)
+ if (!bgx->is_dlm && lmacid)
return;
lmac = &bgx->lmac[lmacid];
- dlm = (lmacid / 2) + (bgx->bgx_id * 2);
if (!bgx->is_dlm)
sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
else
- sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
+ sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
switch (lmac->lmac_type) {
case BGX_MODE_SGMII:
@@ -989,7 +1072,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
{
struct lmac *lmac;
- struct lmac *olmac;
u64 cmr_cfg;
u8 lmac_type;
u8 lane_to_sds;
@@ -1009,62 +1091,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
return;
}
- /* On 81xx BGX can be split across 2 DLMs
- * firmware programs lmac_type of LMAC0 and LMAC2
+ /* For DLMs or SLMs on 80/81/83xx so many lane configurations
+ * are possible and vary across boards. Also Kernel doesn't have
+ * any way to identify board type/info and since firmware does,
+ * just take lmac type and serdes lane config as is.
*/
- if ((idx == 0) || (idx == 2)) {
- cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
- lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
- lane_to_sds = (u8)(cmr_cfg & 0xFF);
- /* Check if config is not reset value */
- if ((lmac_type == 0) && (lane_to_sds == 0xE4))
- lmac->lmac_type = BGX_MODE_INVALID;
- else
- lmac->lmac_type = lmac_type;
- lmac_set_training(bgx, lmac, lmac->lmacid);
- lmac_set_lane2sds(bgx, lmac);
-
- olmac = &bgx->lmac[idx + 1];
- /* Check if other LMAC on the same DLM is already configured by
- * firmware, if so use the same config or else set as same, as
- * that of LMAC 0/2.
- * This check is needed as on 80xx only one lane of each of the
- * DLM of BGX0 is used, so have to rely on firmware for
- * distingushing 80xx from 81xx.
- */
- cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
- lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
- lane_to_sds = (u8)(cmr_cfg & 0xFF);
- if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
- olmac->lmac_type = lmac->lmac_type;
- lmac_set_lane2sds(bgx, olmac);
- } else {
- olmac->lmac_type = lmac_type;
- olmac->lane_to_sds = lane_to_sds;
- }
- lmac_set_training(bgx, olmac, olmac->lmacid);
- }
-}
-
-static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
-{
- struct lmac *lmac;
-
- if (!bgx->is_dlm)
- return true;
-
- lmac = &bgx->lmac[0];
- if (lmac->lmac_type == BGX_MODE_INVALID)
- return false;
-
- return true;
+ cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+ lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+ lane_to_sds = (u8)(cmr_cfg & 0xFF);
+ /* Check if config is reset value */
+ if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+ lmac->lmac_type = BGX_MODE_INVALID;
+ else
+ lmac->lmac_type = lmac_type;
+ lmac->lane_to_sds = lane_to_sds;
+ lmac_set_training(bgx, lmac, lmac->lmacid);
}
static void bgx_get_qlm_mode(struct bgx *bgx)
{
struct lmac *lmac;
- struct lmac *lmac01;
- struct lmac *lmac23;
u8 idx;
/* Init all LMAC's type to invalid */
@@ -1080,29 +1126,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
if (bgx->lmac_count > bgx->max_lmac)
bgx->lmac_count = bgx->max_lmac;
- for (idx = 0; idx < bgx->max_lmac; idx++)
- bgx_set_lmac_config(bgx, idx);
-
- if (!bgx->is_dlm || bgx->is_rgx) {
- bgx_print_qlm_mode(bgx, 0);
- return;
- }
-
- if (bgx->lmac_count) {
- bgx_print_qlm_mode(bgx, 0);
- bgx_print_qlm_mode(bgx, 2);
- }
-
- /* If DLM0 is not in BGX mode then LMAC0/1 have
- * to be configured with serdes lanes of DLM1
- */
- if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
- return;
for (idx = 0; idx < bgx->lmac_count; idx++) {
- lmac01 = &bgx->lmac[idx];
- lmac23 = &bgx->lmac[idx + 2];
- lmac01->lmac_type = lmac23->lmac_type;
- lmac01->lane_to_sds = lmac23->lane_to_sds;
+ bgx_set_lmac_config(bgx, idx);
+ bgx_print_qlm_mode(bgx, idx);
}
}
@@ -1143,13 +1169,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
if (acpi_bus_get_device(handle, &adev))
goto out;
- acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
+ acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
+ SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
- bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+ bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
+ bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
- bgx->lmac_count++;
return AE_OK;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index c18ebfeb2039..a60f189429bb 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -153,10 +153,15 @@
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_LINK BIT_ULL(2)
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_ADV 0x30010
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_LINKX_TIMER 0x30040
+#define PCS_LINKX_TIMER_COUNT 0x1E84
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_MODE BIT_ULL(8)
#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befedef709..578c7f8f11bf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
int speed = 2;
if (!xcv) {
- dev_err(&xcv->pdev->dev,
- "XCV init not done, probe may have failed\n");
+ pr_err("XCV init not done, probe may have failed\n");
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 86f467a2c485..d56142b98534 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, int budget)
int work_done = process_responses(adapter, budget);
if (likely(work_done < budget)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
writel(adapter->sge->respQ.cidx,
adapter->regs + A_SG_SLEEPING);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 5f226eda8cd6..52063587e1e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -351,7 +351,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
e->smt_idx = smt_idx;
atomic_set(&e->refcnt, 1);
neigh_replace(e, neigh);
- if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(neigh->dev))
e->vlan = vlan_dev_vlan_id(neigh->dev);
else
e->vlan = VLAN_NONE;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index e4b5b057f417..1b9d154f1149 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct *napi, int budget)
__skb_queue_head_init(&queue);
skb_queue_splice_init(&q->rx_queue, &queue);
if (skb_queue_empty(&queue)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
spin_unlock_irq(&q->lock);
return work_done;
}
@@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int work_done = process_responses(adap, qs, budget);
if (likely(work_done < budget)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/*
* Because we don't atomically flush the following
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0bce1bf9ca0f..163543b1ea0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -263,6 +263,11 @@ struct tp_params {
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+ /* cached TP_OUT_CONFIG compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ int rx_pkt_encap;
+
/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
* subset of the set of fields which may be present in the Compressed
* Filter Tuple portion of filters and TCP TCB connections. The
@@ -581,22 +586,6 @@ struct sge_rspq { /* state for an SGE response queue */
rspq_handler_t handler;
rspq_flush_handler_t flush_handler;
struct t4_lro_mgr lro_mgr;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define CXGB_POLL_STATE_IDLE 0
-#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
-#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */
-#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */
-#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */
-#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \
- CXGB_POLL_STATE_POLL_YIELD)
-#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \
- CXGB_POLL_STATE_POLL)
-#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \
- CXGB_POLL_STATE_POLL_YIELD)
- unsigned int bpoll_state;
- spinlock_t bpoll_lock; /* lock for busy poll */
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
};
struct sge_eth_stats { /* Ethernet queue statistics */
@@ -782,6 +771,10 @@ struct vf_info {
bool pf_set_mac;
};
+struct mbox_list {
+ struct list_head list;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -844,6 +837,10 @@ struct adapter {
struct work_struct db_drop_task;
bool tid_release_task_busy;
+ /* lock for mailbox cmd list */
+ spinlock_t mbox_lock;
+ struct mbox_list mlist;
+
/* support for mailbox command/reply logging */
#define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
@@ -1160,102 +1157,6 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
return netdev2pinfo(dev)->adapter;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
-{
- spin_lock_init(&q->bpoll_lock);
- q->bpoll_state = CXGB_POLL_STATE_IDLE;
-}
-
-static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
-{
- bool rc = true;
-
- spin_lock(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_LOCKED) {
- q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
- rc = false;
- } else {
- q->bpoll_state = CXGB_POLL_STATE_NAPI;
- }
- spin_unlock(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
-{
- bool rc = false;
-
- spin_lock(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
- rc = true;
- q->bpoll_state = CXGB_POLL_STATE_IDLE;
- spin_unlock(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
-{
- bool rc = true;
-
- spin_lock_bh(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_LOCKED) {
- q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
- rc = false;
- } else {
- q->bpoll_state |= CXGB_POLL_STATE_POLL;
- }
- spin_unlock_bh(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
-{
- bool rc = false;
-
- spin_lock_bh(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
- rc = true;
- q->bpoll_state = CXGB_POLL_STATE_IDLE;
- spin_unlock_bh(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
-{
- return q->bpoll_state & CXGB_POLL_USER_PEND;
-}
-#else
-static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
-{
-}
-
-static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
-{
- return true;
-}
-
-static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
-{
- return false;
-}
-
-static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
-{
- return false;
-}
-
-static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
-{
- return false;
-}
-
-static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
-{
- return false;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/* Return a version number to identify the type of adapter. The scheme is:
* - bits 0..9: chip version
* - bits 10..15: chip revision
@@ -1312,7 +1213,6 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
-int cxgb_busy_poll(struct napi_struct *napi);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
extern int dbfifo_int_thresh;
@@ -1488,6 +1388,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+int t4_shutdown_adapter(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4_bar2_sge_qregs(struct adapter *adapter,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6f951877430b..afb0967d2ce6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -188,18 +188,24 @@ static void link_report(struct net_device *dev)
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
- case 10000:
- s = "10Gbps";
+ case 100:
+ s = "100Mbps";
break;
case 1000:
- s = "1000Mbps";
+ s = "1Gbps";
break;
- case 100:
- s = "100Mbps";
+ case 10000:
+ s = "10Gbps";
+ break;
+ case 25000:
+ s = "25Gbps";
break;
case 40000:
s = "40Gbps";
break;
+ case 100000:
+ s = "100Gbps";
+ break;
default:
pr_info("%s: unsupported speed: %d\n",
dev->name, p->link_cfg.speed);
@@ -738,14 +744,8 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
- if (q && q->handler) {
+ if (q && q->handler)
napi_disable(&q->napi);
- local_bh_disable();
- while (!cxgb_poll_lock_napi(q))
- mdelay(1);
- local_bh_enable();
- }
-
}
}
@@ -776,10 +776,9 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
- if (q->handler) {
- cxgb_busy_poll_init_lock(q);
+ if (q->handler)
napi_enable(&q->napi);
- }
+
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
@@ -1806,7 +1805,7 @@ static void check_neigh_update(struct neighbour *neigh)
const struct device *parent;
const struct net_device *netdev = neigh->dev;
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(netdev))
netdev = vlan_dev_real_dev(netdev);
parent = netdev->dev.parent;
if (parent && parent->driver == &cxgb4_driver.driver)
@@ -2112,7 +2111,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
#if IS_ENABLED(CONFIG_BONDING)
struct adapter *adap;
#endif
- if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(event_dev))
event_dev = vlan_dev_real_dev(event_dev);
#if IS_ENABLED(CONFIG_BONDING)
if (event_dev->flags & IFF_MASTER) {
@@ -2369,8 +2368,8 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
}
EXPORT_SYMBOL(cxgb4_remove_server_filter);
-static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *ns)
+static void cxgb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *ns)
{
struct port_stats stats;
struct port_info *p = netdev_priv(dev);
@@ -2383,7 +2382,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
spin_lock(&adapter->stats_lock);
if (!netif_device_present(dev)) {
spin_unlock(&adapter->stats_lock);
- return ns;
+ return;
}
t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
&p->stats_base);
@@ -2401,7 +2400,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
ns->rx_over_errors = 0;
ns->rx_crc_errors = stats.rx_fcs_err;
ns->rx_frame_errors = stats.rx_symbol_err;
- ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
+ ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
stats.rx_ovflow2 + stats.rx_ovflow3 +
stats.rx_trunc0 + stats.rx_trunc1 +
stats.rx_trunc2 + stats.rx_trunc3;
@@ -2417,7 +2416,6 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
ns->tx_errors = stats.tx_error_frames;
ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
- return ns;
}
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -2578,6 +2576,19 @@ static int cxgb_get_vf_config(struct net_device *dev,
ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
return 0;
}
+
+static int cxgb_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct port_info *pi = netdev_priv(dev);
+ unsigned int phy_port_id;
+
+ phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
+ ppid->id_len = sizeof(phy_port_id);
+ memcpy(ppid->id, &phy_port_id, ppid->id_len);
+ return 0;
+}
+
#endif
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2745,9 +2756,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_fcoe_enable = cxgb_fcoe_enable,
.ndo_fcoe_disable = cxgb_fcoe_disable,
#endif /* CONFIG_CHELSIO_T4_FCOE */
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = cxgb_busy_poll,
-#endif
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
};
@@ -2757,6 +2765,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
.ndo_open = dummy_open,
.ndo_set_vf_mac = cxgb_set_vf_mac,
.ndo_get_vf_config = cxgb_get_vf_config,
+ .ndo_get_phys_port_id = cxgb_get_phys_port_id,
};
#endif
@@ -2777,8 +2786,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
void t4_fatal_err(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
- t4_intr_disable(adap);
+ int port;
+
+ /* Disable the SGE since ULDs are going to free resources that
+ * could be exposed to the adapter. RDMA MWs for example...
+ */
+ t4_shutdown_adapter(adap);
+ for_each_port(adap, port) {
+ struct net_device *dev = adap->port[port];
+
+ /* If we get here in very early initialization the network
+ * devices may not have been set up yet.
+ */
+ if (!dev)
+ continue;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ }
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
@@ -4397,9 +4422,9 @@ static void print_port_info(const struct net_device *dev)
spd = " 8 GT/s";
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
- bufp += sprintf(bufp, "100/");
+ bufp += sprintf(bufp, "100M/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
- bufp += sprintf(bufp, "1000/");
+ bufp += sprintf(bufp, "1G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
@@ -4511,12 +4536,14 @@ static int config_mgmt_dev(struct pci_dev *pdev)
int err;
snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup);
+ netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
+ dummy_setup);
if (!netdev)
return -ENOMEM;
pi = netdev_priv(netdev);
pi->adapter = adap;
+ pi->port_id = adap->pf % adap->params.nports;
SET_NETDEV_DEV(netdev, &pdev->dev);
adap->port[0] = netdev;
@@ -4606,6 +4633,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
+#ifdef CONFIG_PCI_IOV
+ u32 v, port_vec;
+#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4707,6 +4737,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
+ spin_lock_init(&adapter->mbox_lock);
+
+ INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -4874,8 +4907,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"continuing\n");
adapter->params.offload = 0;
} else {
- adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
- CXGB4_MAX_LINK_HANDLE);
+ adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
if (!adapter->tc_u32)
dev_warn(&pdev->dev,
"could not offload tc u32, continuing\n");
@@ -4982,6 +5014,19 @@ sriov:
err = -ENOMEM;
goto free_adapter;
}
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+
+ v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+ err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
+ &v, &port_vec);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "Could not fetch port params\n");
+ goto free_adapter;
+ }
+
+ adapter->params.nports = hweight32(port_vec);
pci_set_drvdata(pdev, adapter);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 52af62e0ecb6..a1b19422b339 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -437,28 +437,26 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
t4_free_mem(adap->tc_u32);
}
-struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
- unsigned int size)
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
{
+ unsigned int max_tids = adap->tids.nftids;
struct cxgb4_tc_u32_table *t;
unsigned int i;
- if (!size)
+ if (!max_tids)
return NULL;
t = t4_alloc_mem(sizeof(*t) +
- (size * sizeof(struct cxgb4_link)));
+ (max_tids * sizeof(struct cxgb4_link)));
if (!t)
return NULL;
- t->size = size;
+ t->size = max_tids;
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
unsigned int bmap_size;
- unsigned int max_tids;
- max_tids = adap->tids.nftids;
bmap_size = BITS_TO_LONGS(max_tids);
link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
if (!link->tid_map)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
index 6bdc885eff22..021261a41c13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
@@ -37,8 +37,6 @@
#include <net/pkt_cls.h>
-#define CXGB4_MAX_LINK_HANDLE 32
-
static inline bool can_tc_u32_offload(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
@@ -52,6 +50,5 @@ int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
struct tc_cls_u32_offload *cls);
void cxgb4_cleanup_tc_u32(struct adapter *adapter);
-struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
- unsigned int size);
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap);
#endif /* __CXGB4_TC_U32_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 8098902c094a..d0868c2320da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -408,10 +408,9 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
if (!q)
return;
- if (q->handler) {
- cxgb_busy_poll_init_lock(q);
+ if (q->handler)
napi_enable(&q->napi);
- }
+
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
@@ -420,13 +419,8 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
{
- if (q && q->handler) {
+ if (q && q->handler)
napi_disable(&q->napi);
- local_bh_disable();
- while (!cxgb_poll_lock_napi(q))
- mdelay(1);
- local_bh_enable();
- }
}
static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
@@ -597,7 +591,6 @@ void t4_uld_mem_free(struct adapter *adap)
void t4_uld_clean_up(struct adapter *adap)
{
- struct sge_uld_rxq_info *rxq_info;
unsigned int i;
if (!adap->uld)
@@ -605,7 +598,6 @@ void t4_uld_clean_up(struct adapter *adap)
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)
continue;
- rxq_info = adap->sge.uld_rxq_info[i];
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, i);
if (adap->flags & USING_MSIX)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 60a26037a1c6..7c8c5b9a3c22 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -432,7 +432,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
else
lport = netdev2pinfo(physdev)->lport;
- if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(neigh->dev))
vlan = vlan_dev_vlan_id(neigh->dev);
else
vlan = VLAN_NONE;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index cbd68a8fe2e4..c9026352a842 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
struct ch_sched_params info;
struct ch_sched_params tp;
- memset(&info, 0, sizeof(info));
- memset(&tp, 0, sizeof(tp));
-
memcpy(&tp, p, sizeof(tp));
/* Don't try to match class parameter */
tp.u.params.class = SCHED_CLS_NONE;
@@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
if (e->state == SCHED_STATE_UNUSED)
continue;
- memset(&info, 0, sizeof(info));
memcpy(&info, &e->info, sizeof(info));
/* Don't try to match class parameter */
info.u.params.class = SCHED_CLS_NONE;
@@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (!e)
goto out;
- memset(&np, 0, sizeof(np));
memcpy(&np, p, sizeof(np));
np.u.params.class = e->idx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 9f606478c29c..f05f0d400324 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -43,9 +43,7 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/tcp.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
-#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_CHELSIO_T4_FCOE
#include <scsi/fc/fc_fcoe.h>
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1774,15 +1772,20 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
struct sge_uld_txq *txq;
unsigned int idx = skb_txq(skb);
- txq_info = adap->sge.uld_txq_info[tx_uld_type];
- txq = &txq_info->uldtxq[idx];
-
if (unlikely(is_ctrl_pkt(skb))) {
/* Single ctrl queue is a requirement for LE workaround path */
if (adap->tids.nsftids)
idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
}
+
+ txq_info = adap->sge.uld_txq_info[tx_uld_type];
+ if (unlikely(!txq_info)) {
+ WARN_ON(true);
+ return NET_XMIT_DROP;
+ }
+
+ txq = &txq_info->uldtxq[idx];
return ofld_xmit(txq, skb);
}
@@ -2038,16 +2041,22 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
+ u16 err_vec;
struct port_info *pi;
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ /* Compressed error vector is enabled for T6 only */
+ if (q->adap->params.tp.rx_pkt_encap)
+ err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
+ else
+ err_vec = be16_to_cpu(pkt->err_vec);
+
+ csum_ok = pkt->csum_calc && !err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP_F)) &&
- !(cxgb_poll_busy_polling(q)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
return 0;
@@ -2092,7 +2101,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
(pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
- if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
+ if (q->adap->params.tp.rx_pkt_encap)
+ csum_ok = err_vec &
+ T6_COMPR_RXERR_SUM_F;
+ else
+ csum_ok = err_vec & RXERR_CSUM_F;
+ if (!csum_ok)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
@@ -2273,38 +2287,6 @@ static int process_responses(struct sge_rspq *q, int budget)
return budget - budget_left;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-int cxgb_busy_poll(struct napi_struct *napi)
-{
- struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
- unsigned int params, work_done;
- u32 val;
-
- if (!cxgb_poll_lock_poll(q))
- return LL_FLUSH_BUSY;
-
- work_done = process_responses(q, 4);
- params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
- q->next_intr_params = params;
- val = CIDXINC_V(work_done) | SEINTARM_V(params);
-
- /* If we don't have access to the new User GTS (T5+), use the old
- * doorbell mechanism; otherwise use the new BAR2 mechanism.
- */
- if (unlikely(!q->bar2_addr))
- t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
- val | INGRESSQID_V((u32)q->cntxt_id));
- else {
- writel(val | INGRESSQID_V(q->bar2_qid),
- q->bar2_addr + SGE_UDB_GTS);
- wmb();
- }
-
- cxgb_poll_unlock_poll(q);
- return work_done;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* napi_rx_handler - the NAPI handler for Rx processing
* @napi: the napi instance
@@ -2323,9 +2305,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int work_done;
u32 val;
- if (!cxgb_poll_lock_napi(q))
- return budget;
-
work_done = process_responses(q, budget);
if (likely(work_done < budget)) {
int timer_index;
@@ -2365,7 +2344,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
- cxgb_poll_unlock_napi(q);
return work_done;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8139514d32c..87000cd39737 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -284,6 +284,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
+ struct mbox_list entry;
u16 access = 0;
u16 execute = 0;
u32 v;
@@ -311,11 +312,62 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
timeout = -timeout;
}
+ /* Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ spin_lock(&adap->mbox_lock);
+ list_add_tail(&entry.list, &adap->mlist.list);
+ spin_unlock(&adap->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /* If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rearely
+ * contend on access to the mailbox ...
+ */
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
+ ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
+ t4_record_mbox(adap, cmd, size, access, ret);
+ return ret;
+ }
+
+ /* If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (list_first_entry(&adap->mlist.list, struct mbox_list,
+ list) == &entry)
+ break;
+
+ /* Delay for a bit before checking again ... */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ mdelay(ms);
+ }
+ }
+
+ /* Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
-
if (v != MBOX_OWNER_DRV) {
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
return ret;
@@ -366,6 +418,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
execute = i + ms;
t4_record_mbox(adap, cmd_rpl,
MBOX_LEN, access, execute);
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -375,6 +430,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
+ t4_fatal_err(adap);
return ret;
}
@@ -5382,22 +5441,28 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
const char *t4_get_port_type_description(enum fw_port_type port_type)
{
static const char *const port_type_description[] = {
- "R XFI",
- "R XAUI",
- "T SGMII",
- "T XFI",
- "T XAUI",
+ "Fiber_XFI",
+ "Fiber_XAUI",
+ "BT_SGMII",
+ "BT_XFI",
+ "BT_XAUI",
"KX4",
"CX4",
"KX",
"KR",
- "R SFP+",
- "KR/KX",
- "KR/KX/KX4",
- "R QSFP_10G",
- "R QSA",
- "R QSFP",
- "R BP40_BA",
+ "SFP",
+ "BP_AP",
+ "BP4_AP",
+ "QSFP_10G",
+ "QSA",
+ "QSFP",
+ "BP40_BA",
+ "KR4_100G",
+ "CR4_QSFP",
+ "CR_QSFP",
+ "CR2_QSFP",
+ "SFP28",
+ "KR_SFP28",
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -5438,6 +5503,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -5469,6 +5535,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & COUNTPAUSESTATTX_F) {
+ p->tx_frames -= p->tx_pause;
+ p->tx_octets -= p->tx_pause * 64;
+ }
+ if (stat_ctl & COUNTPAUSEMCTX_F)
+ p->tx_mcast_frames -= p->tx_pause;
+ }
p->rx_octets = GET_STAT(RX_PORT_BYTES);
p->rx_frames = GET_STAT(RX_PORT_FRAMES);
p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
@@ -5497,6 +5571,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & COUNTPAUSESTATRX_F) {
+ p->rx_frames -= p->rx_pause;
+ p->rx_octets -= p->rx_pause * 64;
+ }
+ if (stat_ctl & COUNTPAUSEMCRX_F)
+ p->rx_mcast_frames -= p->rx_pause;
+ }
+
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -7477,6 +7560,39 @@ int t4_prep_adapter(struct adapter *adapter)
}
/**
+ * t4_shutdown_adapter - shut down adapter, host & wire
+ * @adapter: the adapter
+ *
+ * Perform an emergency shutdown of the adapter and stop it from
+ * continuing any further communication on the ports or DMA to the
+ * host. This is typically used when the adapter and/or firmware
+ * have crashed and we want to prevent any further accidental
+ * communication with the rest of the world. This will also force
+ * the port Link Status to go down -- if register writes work --
+ * which should help our peers figure out that we're down.
+ */
+int t4_shutdown_adapter(struct adapter *adapter)
+{
+ int port;
+
+ t4_intr_disable(adapter);
+ t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
+ for_each_port(adapter, port) {
+ u32 a_port_cfg = PORT_REG(port,
+ is_t4(adapter->params.chip)
+ ? XGMAC_PORT_CFG_A
+ : MAC_PORT_CFG_A);
+
+ t4_write_reg(adapter, a_port_cfg,
+ t4_read_reg(adapter, a_port_cfg)
+ & ~SIGNAL_DET_V(1));
+ }
+ t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
+
+ return 0;
+}
+
+/**
* t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
@@ -7686,6 +7802,13 @@ int t4_init_tp_params(struct adapter *adap)
&adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A);
}
+ /* For T6, cache the adapter's compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ v = t4_read_reg(adap, TP_OUT_CONFIG_A);
+ adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
+ }
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index a267173f5997..5043b64805f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1175,6 +1175,21 @@ struct cpl_rx_pkt {
#define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S)
#define RXERR_CSUM_F RXERR_CSUM_V(1U)
+#define T6_COMPR_RXERR_LEN_S 1
+#define T6_COMPR_RXERR_LEN_V(x) ((x) << T6_COMPR_RXERR_LEN_S)
+#define T6_COMPR_RXERR_LEN_F T6_COMPR_RXERR_LEN_V(1U)
+
+#define T6_COMPR_RXERR_VEC_S 0
+#define T6_COMPR_RXERR_VEC_M 0x3F
+#define T6_COMPR_RXERR_VEC_V(x) ((x) << T6_COMPR_RXERR_LEN_S)
+#define T6_COMPR_RXERR_VEC_G(x) \
+ (((x) >> T6_COMPR_RXERR_VEC_S) & T6_COMPR_RXERR_VEC_M)
+
+/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */
+#define T6_COMPR_RXERR_SUM_S 4
+#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S)
+#define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index ecf3ccc257bc..a323185507ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -169,6 +169,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/
+ CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
/* T6 adapters:
*/
@@ -185,6 +188,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6011),
CH_PCI_ID_TABLE_FENTRY(0x6014),
CH_PCI_ID_TABLE_FENTRY(0x6015),
+ CH_PCI_ID_TABLE_FENTRY(0x6080),
+ CH_PCI_ID_TABLE_FENTRY(0x6081),
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 9fea255c7e87..3348d33c36fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -855,6 +855,14 @@
#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U)
+#define DBG_GPIO_EN_A 0x6010
+#define XGMAC_PORT_CFG_A 0x1000
+#define MAC_PORT_CFG_A 0x800
+
+#define SIGNAL_DET_S 14
+#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S)
+#define SIGNAL_DET_F SIGNAL_DET_V(1U)
+
#define MC_ECC_STATUS_A 0x751c
#define MC_P_ECC_STATUS_A 0x4131c
@@ -1276,6 +1284,10 @@
#define DBGLARPTR_M 0x7fU
#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
+#define CRXPKTENC_S 3
+#define CRXPKTENC_V(x) ((x) << CRXPKTENC_S)
+#define CRXPKTENC_F CRXPKTENC_V(1U)
+
#define TP_DBG_LA_DATAL_A 0x7ed8
#define TP_DBG_LA_CONFIG_A 0x7ed4
#define TP_OUT_CONFIG_A 0x7d04
@@ -1794,12 +1806,29 @@
#define MPS_CMN_CTL_A 0x9000
+#define COUNTPAUSEMCRX_S 5
+#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S)
+#define COUNTPAUSEMCRX_F COUNTPAUSEMCRX_V(1U)
+
+#define COUNTPAUSESTATRX_S 4
+#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S)
+#define COUNTPAUSESTATRX_F COUNTPAUSESTATRX_V(1U)
+
+#define COUNTPAUSEMCTX_S 3
+#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S)
+#define COUNTPAUSEMCTX_F COUNTPAUSEMCTX_V(1U)
+
+#define COUNTPAUSESTATTX_S 2
+#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S)
+#define COUNTPAUSESTATTX_F COUNTPAUSESTATTX_V(1U)
+
#define NUMPORTS_S 0
#define NUMPORTS_M 0x3U
#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
#define MPS_INT_CAUSE_A 0x9008
#define MPS_TX_INT_CAUSE_A 0x9408
+#define MPS_STAT_CTL_A 0x9600
#define FRMERR_S 15
#define FRMERR_V(x) ((x) << FRMERR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 2accab386323..5fdaa16426c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0F
-#define T4FW_VERSION_MICRO 0x25
+#define T4FW_VERSION_MINOR 0x10
+#define T4FW_VERSION_MICRO 0x1A
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0F
-#define T5FW_VERSION_MICRO 0x25
+#define T5FW_VERSION_MINOR 0x10
+#define T5FW_VERSION_MICRO 0x1A
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0F
-#define T6FW_VERSION_MICRO 0x25
+#define T6FW_VERSION_MINOR 0x10
+#define T6FW_VERSION_MICRO 0x1A
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0d1a134c8174..ac7a150c54e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -158,20 +158,23 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
netif_carrier_on(dev);
switch (pi->link_cfg.speed) {
- case 40000:
- s = "40Gbps";
+ case 100:
+ s = "100Mbps";
+ break;
+ case 1000:
+ s = "1Gbps";
break;
-
case 10000:
s = "10Gbps";
break;
-
- case 1000:
- s = "1000Mbps";
+ case 25000:
+ s = "25Gbps";
break;
-
- case 100:
- s = "100Mbps";
+ case 40000:
+ s = "40Gbps";
+ break;
+ case 100000:
+ s = "100Gbps";
break;
default:
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f3ed9ce99e5e..e37dde2ba97f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
u32 val;
if (likely(work_done < budget)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
intr_params = rspq->next_intr_params;
rspq->next_intr_params = rspq->intr_params;
} else
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 396c88678eab..7a7c02f1f8b9 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
pr_info("mdio write timed out\n");
}
-static int ep93xx_rx(struct net_device *dev, int processed, int budget)
+static int ep93xx_rx(struct net_device *dev, int budget)
{
struct ep93xx_priv *ep = netdev_priv(dev);
+ int processed = 0;
while (processed < budget) {
int entry;
@@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
+ napi_gro_receive(&ep->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += length;
@@ -310,35 +311,17 @@ err:
return processed;
}
-static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
-{
- struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
- return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
-}
-
static int ep93xx_poll(struct napi_struct *napi, int budget)
{
struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
struct net_device *dev = ep->dev;
- int rx = 0;
-
-poll_some_more:
- rx = ep93xx_rx(dev, rx, budget);
- if (rx < budget) {
- int more = 0;
+ int rx;
+ rx = ep93xx_rx(dev, budget);
+ if (rx < budget && napi_complete_done(napi, rx)) {
spin_lock_irq(&ep->rx_lock);
- __napi_complete(napi);
wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
- if (ep93xx_have_more_rx(ep)) {
- wrl(ep, REG_INTEN, REG_INTEN_TX);
- wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
- more = 1;
- }
spin_unlock_irq(&ep->rx_lock);
-
- if (more && napi_reschedule(napi))
- goto poll_some_more;
}
if (rx) {
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 9023c858715d..2b23f46b34d3 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -135,6 +135,11 @@ struct enic_rfs_flw_tbl {
struct timer_list rfs_may_expire;
};
+struct vxlan_offload {
+ u16 vxlan_udp_port_number;
+ u8 patch_level;
+};
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -175,6 +180,7 @@ struct enic {
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
unsigned int rq_count;
+ struct vxlan_offload vxlan;
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index cdd7a1a59aa7..4b87beeabce1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -43,10 +43,9 @@
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#endif
#include <linux/crash_dump.h>
+#include <net/busy_poll.h>
+#include <net/vxlan.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -178,6 +177,134 @@ static void enic_unset_affinity_hint(struct enic *enic)
irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
}
+static void enic_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct enic *enic = netdev_priv(netdev);
+ __be16 port = ti->port;
+ int err;
+
+ spin_lock_bh(&enic->devcmd_lock);
+
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) {
+ netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported");
+ goto error;
+ }
+
+ if (ti->sa_family != AF_INET) {
+ netdev_info(netdev, "vxlan: only IPv4 offload supported");
+ goto error;
+ }
+
+ if (enic->vxlan.vxlan_udp_port_number) {
+ if (ntohs(port) == enic->vxlan.vxlan_udp_port_number)
+ netdev_warn(netdev, "vxlan: udp port already offloaded");
+ else
+ netdev_info(netdev, "vxlan: offload supported for only one UDP port");
+
+ goto error;
+ }
+
+ err = vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ ntohs(port));
+ if (err)
+ goto error;
+
+ err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
+ enic->vxlan.patch_level);
+ if (err)
+ goto error;
+
+ enic->vxlan.vxlan_udp_port_number = ntohs(port);
+
+ netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ",
+ (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family);
+
+ goto unlock;
+
+error:
+ netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d",
+ ntohs(port), ti->sa_family, ti->type);
+unlock:
+ spin_unlock_bh(&enic->devcmd_lock);
+}
+
+static void enic_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct enic *enic = netdev_priv(netdev);
+ int err;
+
+ spin_lock_bh(&enic->devcmd_lock);
+
+ if ((ti->sa_family != AF_INET) ||
+ ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number)) ||
+ (ti->type != UDP_TUNNEL_TYPE_VXLAN)) {
+ netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded",
+ ntohs(ti->port), ti->sa_family, ti->type);
+ goto unlock;
+ }
+
+ err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_DISABLE);
+ if (err) {
+ netdev_err(netdev, "vxlan: del offload udp port: %d failed",
+ ntohs(ti->port));
+ goto unlock;
+ }
+
+ enic->vxlan.vxlan_udp_port_number = 0;
+
+ netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n",
+ ntohs(ti->port), ti->sa_family);
+
+unlock:
+ spin_unlock_bh(&enic->devcmd_lock);
+}
+
+static netdev_features_t enic_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
+ struct enic *enic = netdev_priv(dev);
+ struct udphdr *udph;
+ u16 port = 0;
+ u16 proto;
+
+ if (!skb->encapsulation)
+ return features;
+
+ features = vxlan_features_check(skb, features);
+
+ /* hardware only supports IPv4 vxlan tunnel */
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
+ goto out;
+
+ /* hardware does not support offload of ipv6 inner pkt */
+ if (eth->h_proto != ntohs(ETH_P_IP))
+ goto out;
+
+ proto = ip_hdr(skb)->protocol;
+
+ if (proto == IPPROTO_UDP) {
+ udph = udp_hdr(skb);
+ port = be16_to_cpu(udph->dest);
+ }
+
+ /* HW supports offload of only one UDP port. Remove CSUM and GSO MASK
+ * for other UDP port tunnels
+ */
+ if (port != enic->vxlan.vxlan_udp_port_number)
+ goto out;
+
+ return features;
+
+out:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -506,20 +633,19 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
return err;
}
-static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
- struct sk_buff *skb, unsigned int mss,
- int vlan_tag_insert, unsigned int vlan_tag,
- int loopback)
+static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
{
- unsigned int frag_len_left = skb_headlen(skb);
- unsigned int len_left = skb->len - frag_len_left;
- unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- int eop = (len_left == 0);
- unsigned int len;
- dma_addr_t dma_addr;
- unsigned int offset = 0;
- skb_frag_t *frag;
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ inner_ip_hdr(skb)->check = 0;
+ inner_tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ }
+}
+static void enic_preload_tcp_csum(struct sk_buff *skb)
+{
/* Preload TCP csum field with IP pseudo hdr calculated
* with IP length set to zero. HW will later add in length
* to each TCP segment resulting from the TSO.
@@ -533,6 +659,30 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
+}
+
+static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb, unsigned int mss,
+ int vlan_tag_insert, unsigned int vlan_tag,
+ int loopback)
+{
+ unsigned int frag_len_left = skb_headlen(skb);
+ unsigned int len_left = skb->len - frag_len_left;
+ int eop = (len_left == 0);
+ unsigned int offset = 0;
+ unsigned int hdr_len;
+ dma_addr_t dma_addr;
+ unsigned int len;
+ skb_frag_t *frag;
+
+ if (skb->encapsulation) {
+ hdr_len = skb_inner_transport_header(skb) - skb->data;
+ hdr_len += inner_tcp_hdrlen(skb);
+ enic_preload_tcp_csum_encap(skb);
+ } else {
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ enic_preload_tcp_csum(skb);
+ }
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for the main skb fragment
@@ -581,6 +731,38 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
return 0;
}
+static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
+ struct sk_buff *skb,
+ int vlan_tag_insert,
+ unsigned int vlan_tag, int loopback)
+{
+ unsigned int head_len = skb_headlen(skb);
+ unsigned int len_left = skb->len - head_len;
+ /* Hardware will overwrite the checksum fields, calculating from
+ * scratch and ignoring the value placed by software.
+ * Offload mode = 00
+ * mss[2], mss[1], mss[0] bits are set
+ */
+ unsigned int mss_or_csum = 7;
+ int eop = (len_left == 0);
+ dma_addr_t dma_addr;
+ int err = 0;
+
+ dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(enic_dma_map_check(enic, dma_addr)))
+ return -ENOMEM;
+
+ enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0,
+ vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop,
+ loopback);
+ if (!eop)
+ err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+ return err;
+}
+
static inline void enic_queue_wq_skb(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb)
{
@@ -603,6 +785,9 @@ static inline void enic_queue_wq_skb(struct enic *enic,
err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
vlan_tag_insert, vlan_tag,
loopback);
+ else if (skb->encapsulation)
+ err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
+ vlan_tag, loopback);
else if (skb->ip_summed == CHECKSUM_PARTIAL)
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
@@ -680,8 +865,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
}
/* dev_base_lock rwlock held, nominally process context */
-static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
- struct rtnl_link_stats64 *net_stats)
+static void enic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *net_stats)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
@@ -693,7 +878,7 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
* recorded stats.
*/
if (err == -ENOMEM)
- return net_stats;
+ return;
net_stats->tx_packets = stats->tx.tx_frames_ok;
net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -707,8 +892,6 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
net_stats->rx_over_errors = enic->rq_truncated_pkts;
net_stats->rx_crc_errors = enic->rq_bad_fcs;
net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
-
- return net_stats;
}
static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
@@ -1117,6 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
u8 packet_error;
u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
u32 rss_hash;
+ bool outer_csum_ok = true, encap = false;
if (skipped)
return;
@@ -1165,7 +1349,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
- if (netdev->features & NETIF_F_RXHASH) {
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
+ (type == 3)) {
switch (rss_type) {
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
@@ -1179,22 +1364,45 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
break;
}
}
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if ((type == 7) &&
+ (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
/* Hardware does not provide whole packet checksum. It only
* provides pseudo checksum. Since hw validates the packet
* checksum but not provide us the checksum value. use
* CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
*/
- if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
- ipv4_csum_ok)
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ }
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (enic_poll_busy_polling(rq) ||
- !(netdev->features & NETIF_F_GRO))
+ if (!(netdev->features & NETIF_F_GRO))
netif_receive_skb(skb);
else
napi_gro_receive(&enic->napi[q_number], skb);
@@ -1298,15 +1506,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
enic_wq_service, NULL);
- if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
- if (wq_work_done > 0)
- vnic_intr_return_credits(&enic->intr[intr],
- wq_work_done,
- 0 /* dont unmask intr */,
- 0 /* dont reset intr timer */);
- return budget;
- }
-
if (budget > 0)
rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
rq_work_to_do, enic_rq_service, NULL);
@@ -1325,7 +1524,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't reset intr timer */);
err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
- enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1345,7 +1543,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
* exit polling
*/
- napi_complete(napi);
+ napi_complete_done(napi, rq_work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1392,34 +1590,6 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)
#endif /* CONFIG_RFS_ACCEL */
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int enic_busy_poll(struct napi_struct *napi)
-{
- struct net_device *netdev = napi->dev;
- struct enic *enic = netdev_priv(netdev);
- unsigned int rq = (napi - &enic->napi[0]);
- unsigned int cq = enic_cq_rq(enic, rq);
- unsigned int intr = enic_msix_rq_intr(enic, rq);
- unsigned int work_to_do = -1; /* clean all pkts possible */
- unsigned int work_done;
-
- if (!enic_poll_lock_poll(&enic->rq[rq]))
- return LL_FLUSH_BUSY;
- work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
- enic_rq_service, NULL);
-
- if (work_done > 0)
- vnic_intr_return_credits(&enic->intr[intr],
- work_done, 0, 0);
- vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_calc_int_moderation(enic, &enic->rq[rq]);
- enic_poll_unlock_poll(&enic->rq[rq]);
-
- return work_done;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1461,8 +1631,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
unsigned int work_done = 0;
int err;
- if (!enic_poll_lock_napi(&enic->rq[rq]))
- return budget;
/* Service RQ
*/
@@ -1495,14 +1663,13 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
- enic_poll_unlock_napi(&enic->rq[rq], napi);
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
* exit polling
*/
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1753,10 +1920,9 @@ static int enic_open(struct net_device *netdev)
netif_tx_wake_all_queues(netdev);
- for (i = 0; i < enic->rq_count; i++) {
- enic_busy_poll_init_lock(&enic->rq[i]);
+ for (i = 0; i < enic->rq_count; i++)
napi_enable(&enic->napi[i]);
- }
+
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
@@ -1800,13 +1966,8 @@ static int enic_stop(struct net_device *netdev)
enic_dev_disable(enic);
- for (i = 0; i < enic->rq_count; i++) {
+ for (i = 0; i < enic->rq_count; i++)
napi_disable(&enic->napi[i]);
- local_bh_disable();
- while (!enic_poll_lock_napi(&enic->rq[i]))
- mdelay(1);
- local_bh_enable();
- }
netif_carrier_off(netdev);
netif_tx_disable(netdev);
@@ -2337,9 +2498,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = enic_busy_poll,
-#endif
+ .ndo_udp_tunnel_add = enic_udp_tunnel_add,
+ .ndo_udp_tunnel_del = enic_udp_tunnel_del,
+ .ndo_features_check = enic_features_check,
};
static const struct net_device_ops enic_netdev_ops = {
@@ -2363,9 +2524,9 @@ static const struct net_device_ops enic_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = enic_busy_poll,
-#endif
+ .ndo_udp_tunnel_add = enic_udp_tunnel_add,
+ .ndo_udp_tunnel_del = enic_udp_tunnel_del,
+ .ndo_features_check = enic_features_check,
};
static void enic_dev_deinit(struct enic *enic)
@@ -2741,6 +2902,39 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXHASH;
if (ENIC_SETTING(enic, RXCSUM))
netdev->hw_features |= NETIF_F_RXCSUM;
+ if (ENIC_SETTING(enic, VXLAN)) {
+ u64 patch_level;
+
+ netdev->hw_enc_features |= NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_HW_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= netdev->hw_enc_features;
+ /* get bit mask from hw about supported offload bit level
+ * BIT(0) = fw supports patch_level 0
+ * fcoe bit = encap
+ * fcoe_fc_crc_ok = outer csum ok
+ * BIT(1) = always set by fw
+ * BIT(2) = fw supports patch_level 2
+ * BIT(0) in rss_hash = encap
+ * BIT(1,2) in rss_hash = outer_ip_csum_ok/
+ * outer_tcp_csum_ok
+ * used in enic_rq_indicate_buf
+ */
+ err = vnic_dev_get_supported_feature_ver(enic->vdev,
+ VIC_FEATURE_VXLAN,
+ &patch_level);
+ if (err)
+ patch_level = 0;
+ /* mask bits that are supported by driver
+ */
+ patch_level &= BIT_ULL(0) | BIT_ULL(2);
+ patch_level = fls(patch_level);
+ patch_level = patch_level ? patch_level - 1 : 0;
+ enic->vxlan.patch_level = patch_level;
+ }
netdev->features |= netdev->hw_features;
netdev->vlan_features |= netdev->features;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 8f27df3207bc..1841ad45d215 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1247,3 +1247,37 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
return ret;
}
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+ u64 a0 = overlay;
+ u64 a1 = config;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number)
+{
+ u64 a1 = vxlan_udp_port_number;
+ u64 a0 = overlay;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
+ u64 *supported_versions)
+{
+ u64 a0 = feature;
+ int wait = 1000;
+ u64 a1 = 0;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ if (!ret)
+ *supported_versions = a0;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 54156c484424..9d43d6bb9907 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -179,5 +179,10 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter *data);
int vnic_devcmd_init(struct vnic_dev *vdev);
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config);
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number);
+int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
+ u64 *supported_versions);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 2a812880b884..d83880b0d468 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -406,6 +406,31 @@ enum vnic_devcmd_cmd {
* in: (u32) a0=Queue Pair number
*/
CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
+
+ /* Use this devcmd for agreeing on the highest common version supported
+ * by both driver and fw for features who need such a facility.
+ * in: (u64) a0 = feature (driver requests for the supported versions
+ * on this feature)
+ * out: (u64) a0 = bitmap of all supported versions for that feature
+ */
+ CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+ /* Control (Enable/Disable) overlay offloads on the given vnic
+ * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
+ * a0 = OVERLAY_FEATURE_VXLAN : VxLAN
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+ * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
+ */
+ CMD_OVERLAY_OFFLOAD_CTRL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+
+ /* Configuration of overlay offloads feature on a given vNIC
+ * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE
+ * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN
+ * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a2 = unsigned short int port information
+ */
+ CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
};
/* CMD_ENABLE2 flags */
@@ -657,4 +682,30 @@ struct devcmd2_result {
#define DEVCMD2_RING_SIZE 32
#define DEVCMD2_DESC_SIZE 128
+enum overlay_feature_t {
+ OVERLAY_FEATURE_NVGRE = 1,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_FEATURE_MAX,
+};
+
+enum overlay_ofld_cmd {
+ OVERLAY_OFFLOAD_ENABLE,
+ OVERLAY_OFFLOAD_DISABLE,
+ OVERLAY_OFFLOAD_ENABLE_P2,
+ OVERLAY_OFFLOAD_MAX,
+};
+
+#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+
+/* Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+enum vic_feature_t {
+ VIC_FEATURE_VXLAN,
+ VIC_FEATURE_RDMA,
+ VIC_FEATURE_VXLAN_PATCH,
+ VIC_FEATURE_MAX,
+};
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 75aced2de869..7d6fbb5635a4 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -48,6 +48,7 @@ struct vnic_enet_config {
#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
#define VENETF_LOOP 0x800 /* Loopback enabled */
+#define VENETF_VXLAN 0x10000 /* VxLAN offload */
#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index b9c82f143d7e..0413103ebe94 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -92,9 +92,6 @@ struct vnic_rq {
struct vnic_rq_buf *to_clean;
void *os_buf_head;
unsigned int pkts_outstanding;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- atomic_t bpoll_state;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
@@ -207,81 +204,6 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
return 0;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
-{
- atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
-}
-
-static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
-{
- int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
- ENIC_POLL_STATE_NAPI);
-
- return (rc == ENIC_POLL_STATE_IDLE);
-}
-
-static inline void enic_poll_unlock_napi(struct vnic_rq *rq,
- struct napi_struct *napi)
-{
- WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI);
- napi_gro_flush(napi, false);
- atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
-}
-
-static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
-{
- int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
- ENIC_POLL_STATE_POLL);
-
- return (rc == ENIC_POLL_STATE_IDLE);
-}
-
-
-static inline void enic_poll_unlock_poll(struct vnic_rq *rq)
-{
- WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL);
- atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
-}
-
-static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
-{
- return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL;
-}
-
-#else
-
-static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
-{
-}
-
-static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
-{
- return true;
-}
-
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq,
- struct napi_struct *napi)
-{
- return false;
-}
-
-static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
-{
- return false;
-}
-
-static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
-{
- return false;
-}
-
-static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
-{
- return false;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
void vnic_rq_free(struct vnic_rq *rq);
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 57c17e797ae3..127ce9707378 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1485,95 +1485,104 @@ static void __de_get_regs(struct de_private *de, u8 *buf)
de_rx_missed(de, rbuf[8]);
}
-static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+static int __de_get_link_ksettings(struct de_private *de,
+ struct ethtool_link_ksettings *cmd)
{
- ecmd->supported = de->media_supported;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->phy_address = 0;
- ecmd->advertising = de->media_advertise;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ de->media_supported);
+ cmd->base.phy_address = 0;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ de->media_advertise);
switch (de->media_type) {
case DE_MEDIA_AUI:
- ecmd->port = PORT_AUI;
+ cmd->base.port = PORT_AUI;
break;
case DE_MEDIA_BNC:
- ecmd->port = PORT_BNC;
+ cmd->base.port = PORT_BNC;
break;
default:
- ecmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
break;
}
- ethtool_cmd_speed_set(ecmd, 10);
+ cmd->base.speed = 10;
if (dr32(MacMode) & FullDuplex)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
if (de->media_lock)
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
else
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
/* ignore maxtxpkt, maxrxpkt for now */
return 0;
}
-static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+static int __de_set_link_ksettings(struct de_private *de,
+ const struct ethtool_link_ksettings *cmd)
{
u32 new_media;
unsigned int media_lock;
+ u8 duplex = cmd->base.duplex;
+ u8 port = cmd->base.port;
+ u8 autoneg = cmd->base.autoneg;
+ u32 advertising;
- if (ethtool_cmd_speed(ecmd) != 10)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.speed != 10)
return -EINVAL;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
+ if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
return -EINVAL;
- if (de->de21040 && ecmd->port == PORT_BNC)
+ if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC)
return -EINVAL;
- if (ecmd->transceiver != XCVR_INTERNAL)
+ if (de->de21040 && port == PORT_BNC)
return -EINVAL;
- if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE)
return -EINVAL;
- if (ecmd->advertising & ~de->media_supported)
+ if (advertising & ~de->media_supported)
return -EINVAL;
- if (ecmd->autoneg == AUTONEG_ENABLE &&
- (!(ecmd->advertising & ADVERTISED_Autoneg)))
+ if (autoneg == AUTONEG_ENABLE &&
+ (!(advertising & ADVERTISED_Autoneg)))
return -EINVAL;
- switch (ecmd->port) {
+ switch (port) {
case PORT_AUI:
new_media = DE_MEDIA_AUI;
- if (!(ecmd->advertising & ADVERTISED_AUI))
+ if (!(advertising & ADVERTISED_AUI))
return -EINVAL;
break;
case PORT_BNC:
new_media = DE_MEDIA_BNC;
- if (!(ecmd->advertising & ADVERTISED_BNC))
+ if (!(advertising & ADVERTISED_BNC))
return -EINVAL;
break;
default:
- if (ecmd->autoneg == AUTONEG_ENABLE)
+ if (autoneg == AUTONEG_ENABLE)
new_media = DE_MEDIA_TP_AUTO;
- else if (ecmd->duplex == DUPLEX_FULL)
+ else if (duplex == DUPLEX_FULL)
new_media = DE_MEDIA_TP_FD;
else
new_media = DE_MEDIA_TP;
- if (!(ecmd->advertising & ADVERTISED_TP))
+ if (!(advertising & ADVERTISED_TP))
return -EINVAL;
- if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
+ if (!(advertising & (ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half)))
return -EINVAL;
break;
}
- media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
+ media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1;
if ((new_media == de->media_type) &&
(media_lock == de->media_lock) &&
- (ecmd->advertising == de->media_advertise))
+ (advertising == de->media_advertise))
return 0; /* nothing to change */
de_link_down(de);
@@ -1582,7 +1591,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
de->media_type = new_media;
de->media_lock = media_lock;
- de->media_advertise = ecmd->advertising;
+ de->media_advertise = advertising;
de_set_media(de);
if (netif_running(de->dev))
de_start_rxtx(de);
@@ -1604,25 +1613,27 @@ static int de_get_regs_len(struct net_device *dev)
return DE_REGS_SIZE;
}
-static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int de_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct de_private *de = netdev_priv(dev);
int rc;
spin_lock_irq(&de->lock);
- rc = __de_get_settings(de, ecmd);
+ rc = __de_get_link_ksettings(de, cmd);
spin_unlock_irq(&de->lock);
return rc;
}
-static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int de_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct de_private *de = netdev_priv(dev);
int rc;
spin_lock_irq(&de->lock);
- rc = __de_set_settings(de, ecmd);
+ rc = __de_set_link_ksettings(de, cmd);
spin_unlock_irq(&de->lock);
return rc;
@@ -1690,13 +1701,13 @@ static const struct ethtool_ops de_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_drvinfo = de_get_drvinfo,
.get_regs_len = de_get_regs_len,
- .get_settings = de_get_settings,
- .set_settings = de_set_settings,
.get_msglevel = de_get_msglevel,
.set_msglevel = de_set_msglevel,
.get_eeprom = de_get_eeprom,
.nway_reset = de_nway_reset,
.get_regs = de_get_regs,
+ .get_link_ksettings = de_get_link_ksettings,
+ .set_link_ksettings = de_set_link_ksettings,
};
static void de21040_get_mac_address(struct de_private *de)
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 92306b320840..ba6ae24acf62 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Remove us from polling list and enable RX intr. */
- napi_complete(napi);
- iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+ napi_complete_done(napi, work_done);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
/* The last op happens after poll completion. Which means the following:
* 1. it can race with disabling irqs in irq handler
@@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
* before we did napi_complete(). See? We would lose it. */
/* remove ourselves from the polling list */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
return work_done;
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index f82ebe5d89ee..8d98b259d1ba 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -926,48 +926,53 @@ static void uli526x_set_filter_mode(struct net_device * dev)
}
static void
-ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
+ULi_ethtool_get_link_ksettings(struct uli526x_board_info *db,
+ struct ethtool_link_ksettings *cmd)
{
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ u32 supported, advertising;
+
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_MII);
- ecmd->advertising = (ADVERTISED_10baseT_Half |
+ advertising = (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_Autoneg |
ADVERTISED_MII);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- ecmd->port = PORT_MII;
- ecmd->phy_address = db->phy_addr;
-
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = db->phy_addr;
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_HALF;
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
{
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
}
if(db->link_failed)
{
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
if (db->media_mode & ULI526X_AUTO)
{
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
}
}
@@ -981,10 +986,12 @@ static void netdev_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
struct uli526x_board_info *np = netdev_priv(dev);
- ULi_ethtool_gset(np, cmd);
+ ULi_ethtool_get_link_ksettings(np, cmd);
return 0;
}
@@ -1006,9 +1013,9 @@ static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
.get_link = netdev_get_link,
.get_wol = uli526x_get_wol,
+ .get_link_ksettings = netdev_get_link_ksettings,
};
/*
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index bc9bf88e5831..d1f2f3cc7cfa 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1391,25 +1391,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_gset(&np->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_sset(&np->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
return rc;
@@ -1439,12 +1441,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 8c95a8a81e3c..1e350135f11d 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1256,52 +1256,63 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
-static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rio_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
+ u32 supported, advertising;
+
if (np->phy_media) {
/* fiber device */
- cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
+ supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
} else {
/* copper device */
- cmd->supported = SUPPORTED_10baseT_Half |
+ supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_MII;
- cmd->advertising = ADVERTISED_10baseT_Half |
+ advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
+ ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full |
ADVERTISED_Autoneg | ADVERTISED_MII;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_MII;
}
- if ( np->link_status ) {
- ethtool_cmd_speed_set(cmd, np->speed);
- cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ if (np->link_status) {
+ cmd->base.speed = np->speed;
+ cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- if ( np->an_enable)
- cmd->autoneg = AUTONEG_ENABLE;
+ if (np->an_enable)
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ cmd->base.phy_address = np->phy_addr;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- cmd->phy_address = np->phy_addr;
return 0;
}
-static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rio_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
+ u32 speed = cmd->base.speed;
+ u8 duplex = cmd->base.duplex;
+
netif_carrier_off(dev);
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (np->an_enable)
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (np->an_enable) {
return 0;
- else {
+ } else {
np->an_enable = 1;
mii_set_media(dev);
return 0;
@@ -1309,18 +1320,18 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else {
np->an_enable = 0;
if (np->speed == 1000) {
- ethtool_cmd_speed_set(cmd, SPEED_100);
- cmd->duplex = DUPLEX_FULL;
+ speed = SPEED_100;
+ duplex = DUPLEX_FULL;
printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
}
- switch (ethtool_cmd_speed(cmd)) {
+ switch (speed) {
case SPEED_10:
np->speed = 10;
- np->full_duplex = (cmd->duplex == DUPLEX_FULL);
+ np->full_duplex = (duplex == DUPLEX_FULL);
break;
case SPEED_100:
np->speed = 100;
- np->full_duplex = (cmd->duplex == DUPLEX_FULL);
+ np->full_duplex = (duplex == DUPLEX_FULL);
break;
case SPEED_1000: /* not supported */
default:
@@ -1339,9 +1350,9 @@ static u32 rio_get_link(struct net_device *dev)
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = rio_get_drvinfo,
- .get_settings = rio_get_settings,
- .set_settings = rio_set_settings,
.get_link = rio_get_link,
+ .get_link_ksettings = rio_get_link_ksettings,
+ .set_link_ksettings = rio_set_link_ksettings,
};
static int
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 2e5b66762e15..2704bcf023be 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1664,21 +1664,23 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
spin_lock_irq(&np->lock);
- mii_ethtool_gset(&np->mii_if, ecmd);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
return 0;
}
-static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
int res;
spin_lock_irq(&np->lock);
- res = mii_ethtool_sset(&np->mii_if, ecmd);
+ res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
return res;
}
@@ -1800,8 +1802,6 @@ static int sundance_set_wol(struct net_device *dev,
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = get_drvinfo,
- .get_settings = get_settings,
- .set_settings = set_settings,
.nway_reset = nway_reset,
.get_link = get_link,
.get_wol = sundance_get_wol,
@@ -1811,6 +1811,8 @@ static const struct ethtool_ops ethtool_ops = {
.get_strings = get_strings,
.get_sset_count = get_sset_count,
.get_ethtool_stats = get_ethtool_stats,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 2a17c59f69f9..3e77dd863175 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct *napi, int budget)
/* We processed all packets available. Tell NAPI it can
* stop polling then re-enable rx interrupts.
*/
- napi_complete(napi);
+ napi_complete_done(napi, npackets);
int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 7bf78a0d322c..278f139f2a22 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -457,7 +457,7 @@ static int ec_bhf_stop(struct net_device *net_dev)
return 0;
}
-static struct rtnl_link_stats64 *
+static void
ec_bhf_get_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
@@ -472,8 +472,6 @@ ec_bhf_get_stats(struct net_device *net_dev,
stats->tx_bytes = priv->stat_tx_bytes;
stats->rx_bytes = priv->stat_rx_bytes;
-
- return stats;
}
static const struct net_device_ops ec_bhf_netdev_ops = {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4c30c44b242e..d49528ad7821 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -226,11 +226,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
u64 tx_reqs_prev; /* Used to calculate TX pps */
};
-enum {
- NAPI_POLLING,
- BUSY_POLLING
-};
-
struct be_mcc_obj {
struct be_queue_info q;
struct be_queue_info cq;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0e74529a4209..30e855004c57 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 0a48a31225e6..7d1819c9e8cc 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -606,7 +606,8 @@ bool be_pause_supported(struct be_adapter *adapter)
false : true;
}
-static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int be_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct be_adapter *adapter = netdev_priv(netdev);
u8 link_status;
@@ -614,13 +615,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
int status;
u32 auto_speeds;
u32 fixed_speeds;
+ u32 supported = 0, advertising = 0;
if (adapter->phy.link_speed < 0) {
status = be_cmd_link_status_query(adapter, &link_speed,
&link_status, 0);
if (!status)
be_link_status_update(adapter, link_status);
- ethtool_cmd_speed_set(ecmd, link_speed);
+ cmd->base.speed = link_speed;
status = be_cmd_get_phy_info(adapter);
if (!status) {
@@ -629,58 +631,51 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
be_cmd_query_cable_type(adapter);
- ecmd->supported =
+ supported =
convert_to_et_setting(adapter,
auto_speeds |
fixed_speeds);
- ecmd->advertising =
+ advertising =
convert_to_et_setting(adapter, auto_speeds);
- ecmd->port = be_get_port_type(adapter);
+ cmd->base.port = be_get_port_type(adapter);
if (adapter->phy.auto_speeds_supported) {
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->advertising |= ADVERTISED_Autoneg;
+ supported |= SUPPORTED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ advertising |= ADVERTISED_Autoneg;
}
- ecmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
if (be_pause_supported(adapter))
- ecmd->advertising |= ADVERTISED_Pause;
-
- switch (adapter->phy.interface_type) {
- case PHY_TYPE_KR_10GB:
- case PHY_TYPE_KX4_10GB:
- ecmd->transceiver = XCVR_INTERNAL;
- break;
- default:
- ecmd->transceiver = XCVR_EXTERNAL;
- break;
- }
+ advertising |= ADVERTISED_Pause;
} else {
- ecmd->port = PORT_OTHER;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.autoneg = AUTONEG_DISABLE;
}
/* Save for future use */
- adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
- adapter->phy.port_type = ecmd->port;
- adapter->phy.transceiver = ecmd->transceiver;
- adapter->phy.autoneg = ecmd->autoneg;
- adapter->phy.advertising = ecmd->advertising;
- adapter->phy.supported = ecmd->supported;
+ adapter->phy.link_speed = cmd->base.speed;
+ adapter->phy.port_type = cmd->base.port;
+ adapter->phy.autoneg = cmd->base.autoneg;
+ adapter->phy.advertising = advertising;
+ adapter->phy.supported = supported;
} else {
- ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
- ecmd->port = adapter->phy.port_type;
- ecmd->transceiver = adapter->phy.transceiver;
- ecmd->autoneg = adapter->phy.autoneg;
- ecmd->advertising = adapter->phy.advertising;
- ecmd->supported = adapter->phy.supported;
+ cmd->base.speed = adapter->phy.link_speed;
+ cmd->base.port = adapter->phy.port_type;
+ cmd->base.autoneg = adapter->phy.autoneg;
+ advertising = adapter->phy.advertising;
+ supported = adapter->phy.supported;
}
- ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
- ecmd->phy_address = adapter->port_num;
+ cmd->base.duplex = netif_carrier_ok(netdev) ?
+ DUPLEX_FULL : DUPLEX_UNKNOWN;
+ cmd->base.phy_address = adapter->port_num;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
@@ -1399,7 +1394,6 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags)
}
const struct ethtool_ops be_ethtool_ops = {
- .get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
@@ -1433,5 +1427,6 @@ const struct ethtool_ops be_ethtool_ops = {
.get_channels = be_get_channels,
.set_channels = be_set_channels,
.get_module_info = be_get_module_info,
- .get_module_eeprom = be_get_module_eeprom
+ .get_module_eeprom = be_get_module_eeprom,
+ .get_link_ksettings = be_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ec010ced6c99..6be3b9aba8ed 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
+ /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
+ * address
+ */
+ if (BEx_chip(adapter) && be_virtfn(adapter) &&
+ !check_privilege(adapter, BE_PRIV_FILTMGMT))
+ return -EPERM;
+
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
@@ -355,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
-done:
+
+ /* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
ether_addr_copy(netdev->dev_addr, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
@@ -638,8 +647,8 @@ void be_parse_stats(struct be_adapter *adapter)
}
}
-static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void be_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -703,7 +712,6 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
drvs->rx_input_fifo_overflow_drop +
drvs->rx_drops_no_pbuf;
- return stats;
}
void be_link_status_update(struct be_adapter *adapter, u8 link_status)
@@ -3055,7 +3063,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget, int polling)
+ int budget)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -3087,8 +3095,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
goto loop_continue;
}
- /* Don't do gro when we're busy_polling */
- if (do_gro(rxcp) && polling != BUSY_POLLING)
+ if (do_gro(rxcp))
be_rx_compl_process_gro(rxo, napi, rxcp);
else
be_rx_compl_process(rxo, napi, rxcp);
@@ -3186,106 +3193,6 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
}
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
- bool status = true;
-
- spin_lock(&eqo->lock); /* BH is already disabled */
- if (eqo->state & BE_EQ_LOCKED) {
- WARN_ON(eqo->state & BE_EQ_NAPI);
- eqo->state |= BE_EQ_NAPI_YIELD;
- status = false;
- } else {
- eqo->state = BE_EQ_NAPI;
- }
- spin_unlock(&eqo->lock);
- return status;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
- spin_lock(&eqo->lock); /* BH is already disabled */
-
- WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
- eqo->state = BE_EQ_IDLE;
-
- spin_unlock(&eqo->lock);
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
- bool status = true;
-
- spin_lock_bh(&eqo->lock);
- if (eqo->state & BE_EQ_LOCKED) {
- eqo->state |= BE_EQ_POLL_YIELD;
- status = false;
- } else {
- eqo->state |= BE_EQ_POLL;
- }
- spin_unlock_bh(&eqo->lock);
- return status;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
- spin_lock_bh(&eqo->lock);
-
- WARN_ON(eqo->state & (BE_EQ_NAPI));
- eqo->state = BE_EQ_IDLE;
-
- spin_unlock_bh(&eqo->lock);
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
- spin_lock_init(&eqo->lock);
- eqo->state = BE_EQ_IDLE;
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
- local_bh_disable();
-
- /* It's enough to just acquire napi lock on the eqo to stop
- * be_busy_poll() from processing any queueus.
- */
- while (!be_lock_napi(eqo))
- mdelay(1);
-
- local_bh_enable();
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
- return true;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
- return false;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
int be_poll(struct napi_struct *napi, int budget)
{
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
@@ -3300,25 +3207,20 @@ int be_poll(struct napi_struct *napi, int budget)
for_all_tx_queues_on_eq(adapter, eqo, txo, i)
be_process_tx(adapter, txo, i);
- if (be_lock_napi(eqo)) {
- /* This loop will iterate twice for EQ0 in which
- * completions of the last RXQ (default one) are also processed
- * For other EQs the loop iterates only once
- */
- for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
- work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
- max_work = max(work, max_work);
- }
- be_unlock_napi(eqo);
- } else {
- max_work = budget;
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, budget);
+ max_work = max(work, max_work);
}
if (is_mcc_eqo(eqo))
be_process_mcc(adapter);
if (max_work < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, max_work);
/* Skyhawk EQ_DB has a provision to set the rearm to interrupt
* delay via a delay multiplier encoding value
@@ -3335,28 +3237,6 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int be_busy_poll(struct napi_struct *napi)
-{
- struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter = eqo->adapter;
- struct be_rx_obj *rxo;
- int i, work = 0;
-
- if (!be_lock_busy_poll(eqo))
- return LL_FLUSH_BUSY;
-
- for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
- work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
- if (work)
- break;
- }
-
- be_unlock_busy_poll(eqo);
- return work;
-}
-#endif
-
void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@ -3609,7 +3489,13 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ eth_zero_addr(adapter->dev_mac);
+ }
+
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@@ -3655,7 +3541,6 @@ static int be_close(struct net_device *netdev)
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
for_all_evt_queues(adapter, eqo, i) {
napi_disable(&eqo->napi);
- be_disable_busy_poll(eqo);
}
adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
}
@@ -3762,11 +3647,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
- /* For BE3 VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ /* Normally this condition usually true as the ->dev_mac is zeroed.
+ * But on BE3 VFs the initial MAC is pre-programmed by PF and
+ * subsequent be_dev_mac_add() can fail (after fresh boot)
+ */
+ if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+ int old_pmac_id = -1;
+
+ /* Remember old programmed MAC if any - can happen on BE3 VF */
+ if (!is_zero_ether_addr(adapter->dev_mac))
+ old_pmac_id = adapter->pmac_id[0];
+
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
+
+ /* Delete the old programmed MAC as we successfully programmed
+ * a new MAC
+ */
+ if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+ be_dev_mac_del(adapter, old_pmac_id);
+
ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
}
@@ -3809,7 +3710,6 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
- be_enable_busy_poll(eqo);
be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -4540,6 +4440,10 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+ /* Initial MAC for BE3 VFs is already programmed by PF */
+ if (BEx_chip(adapter) && be_virtfn(adapter))
+ memcpy(adapter->dev_mac, mac, ETH_ALEN);
}
return 0;
@@ -5211,9 +5115,6 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = be_busy_poll,
-#endif
.ndo_udp_tunnel_add = be_add_vxlan_port,
.ndo_udp_tunnel_del = be_del_vxlan_port,
.ndo_features_check = be_features_check,
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 45abc81f6f55..23d82748f52b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -180,8 +180,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* struct ethoc - driver-private device structure
* @iobase: pointer to I/O memory region
* @membase: pointer to buffer memory region
- * @dma_alloc: dma allocated buffer size
- * @io_region_size: I/O memory region size
* @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
@@ -199,8 +197,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
struct ethoc {
void __iomem *iobase;
void __iomem *membase;
- int dma_alloc;
- resource_size_t io_region_size;
bool big_endian;
unsigned int num_bd;
@@ -618,7 +614,7 @@ static int ethoc_poll(struct napi_struct *napi, int budget)
tx_work_done = ethoc_tx(priv->netdev, budget);
if (rx_work_done < budget && tx_work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
}
@@ -999,7 +995,7 @@ static int ethoc_set_ringparam(struct net_device *dev,
return 0;
}
-const struct ethtool_ops ethoc_ethtool_ops = {
+static const struct ethtool_ops ethoc_ethtool_ops = {
.get_regs_len = ethoc_get_regs_len,
.get_regs = ethoc_get_regs,
.nway_reset = phy_ethtool_nway_reset,
@@ -1035,7 +1031,6 @@ static int ethoc_probe(struct platform_device *pdev)
struct ethoc *priv = NULL;
int num_bd;
int ret = 0;
- bool random_mac = false;
struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
@@ -1096,8 +1091,6 @@ static int ethoc_probe(struct platform_device *pdev)
/* setup driver-private data */
priv = netdev_priv(netdev);
priv->netdev = netdev;
- priv->dma_alloc = 0;
- priv->io_region_size = resource_size(mmio);
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
resource_size(mmio));
@@ -1127,7 +1120,6 @@ static int ethoc_probe(struct platform_device *pdev)
goto free;
}
netdev->mem_end = netdev->mem_start + buffer_size;
- priv->dma_alloc = buffer_size;
}
priv->big_endian = pdata ? pdata->big_endian :
@@ -1176,16 +1168,11 @@ static int ethoc_probe(struct platform_device *pdev)
/* Check the MAC again for validity, if it still isn't choose and
* program a random one.
*/
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- eth_random_addr(netdev->dev_addr);
- random_mac = true;
- }
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ eth_hw_addr_random(netdev);
ethoc_do_set_mac_address(netdev);
- if (random_mac)
- netdev->addr_assign_type = NET_ADDR_RANDOM;
-
/* Allow the platform setup code to adjust MII management bus clock. */
if (!eth_clkfreq) {
struct clk *clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 223f35cc034c..992ebe973d25 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
u32 buf_int_enable_value = 0;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* set tx_done and rx_rdy bits */
buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index dce5f7b7f772..c0ddbbe6c226 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -825,16 +825,18 @@ static void ftmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ftmac100_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ftmac100 *priv = netdev_priv(netdev);
- return mii_ethtool_gset(&priv->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&priv->mii, cmd);
}
-static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ftmac100_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ftmac100 *priv = netdev_priv(netdev);
- return mii_ethtool_sset(&priv->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&priv->mii, cmd);
}
static int ftmac100_nway_reset(struct net_device *netdev)
@@ -850,11 +852,11 @@ static u32 ftmac100_get_link(struct net_device *netdev)
}
static const struct ethtool_ops ftmac100_ethtool_ops = {
- .set_settings = ftmac100_set_settings,
- .get_settings = ftmac100_get_settings,
.get_drvinfo = ftmac100_get_drvinfo,
.nway_reset = ftmac100_nway_reset,
.get_link = ftmac100_get_link,
+ .get_link_ksettings = ftmac100_get_link_ksettings,
+ .set_link_ksettings = ftmac100_set_link_ksettings,
};
/******************************************************************************
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 9cb436cb3745..766636a7c25e 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1817,25 +1817,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_gset(&np->mii, cmd);
+ rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_sset(&np->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
@@ -1865,12 +1867,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index c9b7ad65e563..e2ca107f9d94 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -313,8 +313,8 @@ static void dpaa_tx_timeout(struct net_device *net_dev)
/* Calculates the statistics for the given device by adding the statistics
* collected by each CPU.
*/
-static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev,
- struct rtnl_link_stats64 *s)
+static void dpaa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *s)
{
int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
struct dpaa_priv *priv = netdev_priv(net_dev);
@@ -332,8 +332,6 @@ static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev,
for (j = 0; j < numstats; j++)
netstats[j] += cpustats[j];
}
-
- return s;
}
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
@@ -1668,7 +1666,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
free_buffers:
/* compensate sw bpool counter changes */
- for (i--; i > 0; i--) {
+ for (i--; i >= 0; i--) {
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -2003,7 +2001,7 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
int cleaned = qman_p_poll_dqrr(np->p, budget);
if (cleaned < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
} else if (np->down) {
@@ -2335,6 +2333,13 @@ static int dpaa_eth_stop(struct net_device *net_dev)
return err;
}
+static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
+{
+ if (!net_dev->phydev)
+ return -EINVAL;
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+}
+
static const struct net_device_ops dpaa_ops = {
.ndo_open = dpaa_open,
.ndo_start_xmit = dpaa_start_xmit,
@@ -2344,6 +2349,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
+ .ndo_do_ioctl = dpaa_ioctl,
};
static int dpaa_napi_add(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 27e7044667d1..15571e251fb9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -72,8 +72,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
#define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
#define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
-static int dpaa_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *et_cmd)
+static int dpaa_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
int err;
@@ -82,13 +82,13 @@ static int dpaa_get_settings(struct net_device *net_dev,
return 0;
}
- err = phy_ethtool_gset(net_dev->phydev, et_cmd);
+ err = phy_ethtool_ksettings_get(net_dev->phydev, cmd);
return err;
}
-static int dpaa_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *et_cmd)
+static int dpaa_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
int err;
@@ -97,9 +97,9 @@ static int dpaa_set_settings(struct net_device *net_dev,
return -ENODEV;
}
- err = phy_ethtool_sset(net_dev->phydev, et_cmd);
+ err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if (err < 0)
- netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err);
+ netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
return err;
}
@@ -402,8 +402,6 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
}
const struct ethtool_ops dpaa_ethtool_ops = {
- .get_settings = dpaa_get_settings,
- .set_settings = dpaa_set_settings,
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
.set_msglevel = dpaa_set_msglevel,
@@ -414,4 +412,6 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_sset_count = dpaa_get_sset_count,
.get_ethtool_stats = dpaa_get_ethtool_stats,
.get_strings = dpaa_get_strings,
+ .get_link_ksettings = dpaa_get_link_ksettings,
+ .set_link_ksettings = dpaa_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38160c2bebcb..91a16641e851 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
fec_enet_tx(ndev);
if (pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
return pkts;
@@ -2910,6 +2910,7 @@ static void set_multicast_list(struct net_device *ndev)
struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
+ unsigned int hash_high = 0, hash_low = 0;
if (ndev->flags & IFF_PROMISC) {
tmp = readl(fep->hwp + FEC_R_CNTRL);
@@ -2932,11 +2933,7 @@ static void set_multicast_list(struct net_device *ndev)
return;
}
- /* Clear filter and add the addresses in hash register
- */
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-
+ /* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
crc = 0xffffffff;
@@ -2954,16 +2951,14 @@ static void set_multicast_list(struct net_device *ndev)
*/
hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
- if (hash > 31) {
- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- tmp |= 1 << (hash - 32);
- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- } else {
- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- tmp |= 1 << hash;
- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- }
+ if (hash > 31)
+ hash_high |= 1 << (hash - 32);
+ else
+ hash_low |= 1 << hash;
}
+
+ writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
}
/* Set a MAC change in hardware. */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index c88918c4c5f3..84ea130eed36 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -337,7 +337,7 @@ struct fman_mac {
u8 mac_id;
u32 exceptions;
bool ptp_tsu_enabled;
- bool en_tsu_err_exeption;
+ bool en_tsu_err_exception;
struct dtsec_cfg *dtsec_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
@@ -1247,12 +1247,12 @@ int dtsec_set_exception(struct fman_mac *dtsec,
switch (exception) {
case FM_MAC_EX_1G_1588_TS_RX_ERR:
if (enable) {
- dtsec->en_tsu_err_exeption = true;
+ dtsec->en_tsu_err_exception = true;
iowrite32be(ioread32be(&regs->tmr_pemask) |
TMR_PEMASK_TSREEN,
&regs->tmr_pemask);
} else {
- dtsec->en_tsu_err_exeption = false;
+ dtsec->en_tsu_err_exception = false;
iowrite32be(ioread32be(&regs->tmr_pemask) &
~TMR_PEMASK_TSREEN,
&regs->tmr_pemask);
@@ -1420,7 +1420,7 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
dtsec->event_cb = params->event_cb;
dtsec->dev_id = params->dev_id;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
- dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
+ dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
dtsec->basex_if = params->basex_if;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 71a5ded9d1de..cd6a53eaf161 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/of_mdio.h>
/* PCS registers */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 1f98838f32b7..753259091b22 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (received < budget && tx_left) {
/* done */
- napi_complete(napi);
+ napi_complete_done(napi, received);
(*fep->ops->napi_enable)(dev);
return received;
@@ -964,11 +964,10 @@ static int fs_enet_probe(struct platform_device *ofdev)
*/
clk = devm_clk_get(&ofdev->dev, "per");
if (!IS_ERR(clk)) {
- err = clk_prepare_enable(clk);
- if (err) {
- ret = err;
+ ret = clk_prepare_enable(clk);
+ if (ret)
goto out_deregister_fixed_link;
- }
+
fpi->clk_per = clk;
}
@@ -1045,10 +1044,10 @@ out_cleanup_data:
out_free_dev:
free_netdev(ndev);
out_put:
- of_node_put(fpi->phy_node);
if (fpi->clk_per)
clk_disable_unprepare(fpi->clk_per);
out_deregister_fixed_link:
+ of_node_put(fpi->phy_node);
if (of_phy_is_fixed_link(ofdev->dev.of_node))
of_phy_deregister_fixed_link(ofdev->dev.of_node);
out_free_fpi:
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a6e7afa878be..0ff166ec3e7e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
if (!rxb->page)
continue;
- dma_unmap_single(rx_queue->dev, rxb->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
}
/* try reuse page */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */
@@ -3183,7 +3183,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
if (work_done < budget) {
u32 imask;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
@@ -3272,7 +3272,7 @@ static int gfar_poll_rx(struct napi_struct *napi, int budget)
if (!num_act_queues) {
u32 imask;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9d660888510f..3f7ae9f64cd8 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, howmany);
setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 97b184774784..0cec06bec63e 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -555,7 +555,7 @@ refill:
priv->reg_inten |= RCV_INT;
writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
}
- napi_complete(napi);
+ napi_complete_done(napi, rx);
done:
/* clean up tx descriptors and start a new timer if necessary */
tx_remaining = hip04_tx_reclaim(ndev, false);
@@ -701,11 +701,6 @@ static void hip04_tx_timeout_task(struct work_struct *work)
hip04_mac_open(priv->ndev);
}
-static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
-{
- return &ndev->stats;
-}
-
static int hip04_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -764,7 +759,6 @@ static const struct ethtool_ops hip04_ethtool_ops = {
static const struct net_device_ops hip04_netdev_ops = {
.ndo_open = hip04_mac_open,
.ndo_stop = hip04_mac_stop,
- .ndo_get_stats = hip04_get_stats,
.ndo_start_xmit = hip04_mac_start_xmit,
.ndo_set_mac_address = hip04_set_mac_address,
.ndo_tx_timeout = hip04_timeout,
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 979852d56f31..2c2808830e95 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_struct *napi, int budget)
} while (ints & DEF_INT_MASK);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
hisi_femac_irq_enable(priv, DEF_INT_MASK &
(~IRQ_INT_TX_PER_PACKET));
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 418ca1f3774a..25a6c8722eca 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -662,7 +662,7 @@ static int hix5hd2_poll(struct napi_struct *napi, int budget)
} while (ints & DEF_INT_MASK);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
hix5hd2_irq_enable(priv);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 87226685f742..8fa18fc17cd2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1014,9 +1014,7 @@
static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
- writel(value, reg_addr + reg);
+ writel(value, base + reg);
}
#define dsaf_write_dev(a, reg, value) \
@@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
- return readl(reg_addr + reg);
+ return readl(base + reg);
}
static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 672b64606321..fca37e2c7f01 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -305,8 +305,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device *dev = priv->dev;
struct hnae_ring *ring = ring_data->ring;
+ struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int buf_num;
@@ -797,7 +797,6 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
skb->protocol = eth_type_trans(skb, ndev);
(void)napi_gro_receive(&ring_data->napi, skb);
- ndev->last_rx = jiffies;
}
static int hns_desc_unused(struct hnae_ring *ring)
@@ -1203,43 +1202,48 @@ static void hns_set_irq_affinity(struct hns_nic_priv *priv)
struct hns_nic_ring_data *rd;
int i;
int cpu;
- cpumask_t mask;
+ cpumask_var_t mask;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return;
/*diffrent irq banlance for 16core and 32core*/
if (h->q_num == num_possible_cpus()) {
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
if (cpu_online(rd->queue_index)) {
- cpumask_clear(&mask);
+ cpumask_clear(mask);
cpu = rd->queue_index;
- cpumask_set_cpu(cpu, &mask);
+ cpumask_set_cpu(cpu, mask);
(void)irq_set_affinity_hint(rd->ring->irq,
- &mask);
+ mask);
}
}
} else {
for (i = 0; i < h->q_num; i++) {
rd = &priv->ring_data[i];
if (cpu_online(rd->queue_index * 2)) {
- cpumask_clear(&mask);
+ cpumask_clear(mask);
cpu = rd->queue_index * 2;
- cpumask_set_cpu(cpu, &mask);
+ cpumask_set_cpu(cpu, mask);
(void)irq_set_affinity_hint(rd->ring->irq,
- &mask);
+ mask);
}
}
for (i = h->q_num; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
if (cpu_online(rd->queue_index * 2 + 1)) {
- cpumask_clear(&mask);
+ cpumask_clear(mask);
cpu = rd->queue_index * 2 + 1;
- cpumask_set_cpu(cpu, &mask);
+ cpumask_set_cpu(cpu, mask);
(void)irq_set_affinity_hint(rd->ring->irq,
- &mask);
+ mask);
}
}
}
+
+ free_cpumask_var(mask);
}
static int hns_nic_init_irq(struct hns_nic_priv *priv)
@@ -1625,8 +1629,8 @@ void hns_nic_set_rx_mode(struct net_device *ndev)
netdev_err(ndev, "sync uc address fail\n");
}
-struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
+static void hns_nic_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
{
int idx = 0;
u64 tx_bytes = 0;
@@ -1668,8 +1672,6 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
stats->tx_window_errors = ndev->stats.tx_window_errors;
stats->rx_compressed = ndev->stats.rx_compressed;
stats->tx_compressed = ndev->stats.tx_compressed;
-
- return stats;
}
static u16
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 85a3866459cf..4f58d338d739 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -31,9 +31,11 @@
#include "ehea.h"
#include "ehea_phyp.h"
-static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ehea_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ehea_port *port = netdev_priv(dev);
+ u32 supported, advertising;
u32 speed;
int ret;
@@ -60,68 +62,75 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed = -1;
break; /* BUG */
}
- cmd->duplex = port->full_duplex == 1 ?
+ cmd->base.duplex = port->full_duplex == 1 ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
speed = SPEED_UNKNOWN;
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_cmd_speed_set(cmd, speed);
+ cmd->base.speed = speed;
- if (cmd->speed == SPEED_10000) {
- cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- cmd->port = PORT_FIBRE;
+ if (cmd->base.speed == SPEED_10000) {
+ supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ cmd->base.port = PORT_FIBRE;
} else {
- cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
+ supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
| SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
| SUPPORTED_TP);
- cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
+ advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
| ADVERTISED_TP);
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
}
- cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.autoneg = port->autoneg == 1 ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ehea_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ehea_port *port = netdev_priv(dev);
int ret = 0;
u32 sp;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
sp = EHEA_SPEED_AUTONEG;
goto doit;
}
- switch (cmd->speed) {
+ switch (cmd->base.speed) {
case SPEED_10:
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
sp = H_SPEED_10M_F;
else
sp = H_SPEED_10M_H;
break;
case SPEED_100:
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
sp = H_SPEED_100M_F;
else
sp = H_SPEED_100M_H;
break;
case SPEED_1000:
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
sp = H_SPEED_1G_F;
else
ret = -EINVAL;
break;
case SPEED_10000:
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
sp = H_SPEED_10G_F;
else
ret = -EINVAL;
@@ -264,7 +273,6 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops ehea_ethtool_ops = {
- .get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_msglevel = ehea_get_msglevel,
.set_msglevel = ehea_set_msglevel,
@@ -272,8 +280,9 @@ static const struct ethtool_ops ehea_ethtool_ops = {
.get_strings = ehea_get_strings,
.get_sset_count = ehea_get_sset_count,
.get_ethtool_stats = ehea_get_ethtool_stats,
- .set_settings = ehea_set_settings,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
+ .get_link_ksettings = ehea_get_link_ksettings,
+ .set_link_ksettings = ehea_set_link_ksettings,
};
void ehea_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 702446a93697..1e53d7a82675 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -328,8 +328,8 @@ out:
spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
}
-static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void ehea_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct ehea_port *port = netdev_priv(dev);
u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
@@ -352,7 +352,6 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
stats->multicast = port->stats.multicast;
stats->rx_errors = port->stats.rx_errors;
- return stats;
}
static void ehea_update_stats(struct work_struct *work)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5909615c27f7..6ead2335a169 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1991,69 +1991,79 @@ static struct mal_commac_ops emac_commac_sg_ops = {
};
/* Ethtool support */
-static int emac_ethtool_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
+static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct emac_instance *dev = netdev_priv(ndev);
+ u32 supported, advertising;
- cmd->supported = dev->phy.features;
- cmd->port = PORT_MII;
- cmd->phy_address = dev->phy.address;
- cmd->transceiver =
- dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
+ supported = dev->phy.features;
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = dev->phy.address;
mutex_lock(&dev->link_lock);
- cmd->advertising = dev->phy.advertising;
- cmd->autoneg = dev->phy.autoneg;
- cmd->speed = dev->phy.speed;
- cmd->duplex = dev->phy.duplex;
+ advertising = dev->phy.advertising;
+ cmd->base.autoneg = dev->phy.autoneg;
+ cmd->base.speed = dev->phy.speed;
+ cmd->base.duplex = dev->phy.duplex;
mutex_unlock(&dev->link_lock);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int emac_ethtool_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
+static int
+emac_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct emac_instance *dev = netdev_priv(ndev);
u32 f = dev->phy.features;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
- cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
/* Basic sanity checks */
if (dev->phy.address < 0)
return -EOPNOTSUPP;
- if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+ if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
return -EINVAL;
- if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE) {
- switch (cmd->speed) {
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ switch (cmd->base.speed) {
case SPEED_10:
- if (cmd->duplex == DUPLEX_HALF &&
+ if (cmd->base.duplex == DUPLEX_HALF &&
!(f & SUPPORTED_10baseT_Half))
return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
+ if (cmd->base.duplex == DUPLEX_FULL &&
!(f & SUPPORTED_10baseT_Full))
return -EINVAL;
break;
case SPEED_100:
- if (cmd->duplex == DUPLEX_HALF &&
+ if (cmd->base.duplex == DUPLEX_HALF &&
!(f & SUPPORTED_100baseT_Half))
return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
+ if (cmd->base.duplex == DUPLEX_FULL &&
!(f & SUPPORTED_100baseT_Full))
return -EINVAL;
break;
case SPEED_1000:
- if (cmd->duplex == DUPLEX_HALF &&
+ if (cmd->base.duplex == DUPLEX_HALF &&
!(f & SUPPORTED_1000baseT_Half))
return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
+ if (cmd->base.duplex == DUPLEX_FULL &&
!(f & SUPPORTED_1000baseT_Full))
return -EINVAL;
break;
@@ -2062,8 +2072,8 @@ static int emac_ethtool_set_settings(struct net_device *ndev,
}
mutex_lock(&dev->link_lock);
- dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
- cmd->duplex);
+ dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
+ cmd->base.duplex);
mutex_unlock(&dev->link_lock);
} else {
@@ -2072,7 +2082,7 @@ static int emac_ethtool_set_settings(struct net_device *ndev,
mutex_lock(&dev->link_lock);
dev->phy.def->ops->setup_aneg(&dev->phy,
- (cmd->advertising & f) |
+ (advertising & f) |
(dev->phy.advertising &
(ADVERTISED_Pause |
ADVERTISED_Asym_Pause)));
@@ -2234,8 +2244,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
}
static const struct ethtool_ops emac_ethtool_ops = {
- .get_settings = emac_ethtool_get_settings,
- .set_settings = emac_ethtool_set_settings,
.get_drvinfo = emac_ethtool_get_drvinfo,
.get_regs_len = emac_ethtool_get_regs_len,
@@ -2251,6 +2259,8 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = emac_ethtool_get_link_ksettings,
+ .set_link_ksettings = emac_ethtool_set_link_ksettings,
};
static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index aaf6fec566b5..cd3227b088b7 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget)
int n;
if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
continue;
- n = mc->ops->poll_rx(mc->dev, budget);
+ n = mc->ops->poll_rx(mc->dev, budget - received);
if (n) {
received += n;
- budget -= n;
- if (budget <= 0)
- goto more_work; // XXX What if this is the last one ?
+ if (received >= budget)
+ return budget;
}
}
- /* We need to disable IRQs to protect from RXDE IRQ here */
- spin_lock_irqsave(&mal->lock, flags);
- __napi_complete(napi);
- mal_enable_eob_irq(mal);
- spin_unlock_irqrestore(&mal->lock, flags);
+ if (napi_complete_done(napi, received)) {
+ /* We need to disable IRQs to protect from RXDE IRQ here */
+ spin_lock_irqsave(&mal->lock, flags);
+ mal_enable_eob_irq(mal);
+ spin_unlock_irqrestore(&mal->lock, flags);
+ }
/* Check for "rotting" packet(s) */
list_for_each(l, &mal->poll_list) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a831f947ca8c..72ab7b6bf20b 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -729,20 +729,26 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ u32 supported, advertising;
+
+ supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
ADVERTISED_FIBRE);
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_FIBRE;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_ENABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 1;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -978,11 +984,11 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
.get_link = ethtool_op_get_link,
.get_strings = ibmveth_get_strings,
.get_sset_count = ibmveth_get_sset_count,
.get_ethtool_stats = ibmveth_get_ethtool_stats,
+ .get_link_ksettings = netdev_get_link_ksettings,
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1320,7 +1326,7 @@ restart_poll:
ibmveth_replenish_task(adapter);
if (frames_processed < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, frames_processed);
/* We think we are done - reenable interrupts,
* then check once more to make sure we are done.
@@ -1601,8 +1607,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_features = NETIF_F_SG;
+ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ }
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c12596676bbb..9198e6bd5160 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
}
ltb->map_id = adapter->map_id;
adapter->map_id++;
+
+ init_completion(&adapter->fw_done);
send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
- init_completion(&adapter->fw_done);
wait_for_completion(&adapter->fw_done);
return 0;
}
@@ -505,7 +506,7 @@ rx_pool_alloc_failed:
adapter->rx_pool = NULL;
rx_pool_arr_alloc_failed:
for (i = 0; i < adapter->req_rx_queues; i++)
- napi_enable(&adapter->napi[i]);
+ napi_disable(&adapter->napi[i]);
alloc_napi_failed:
return -ENOMEM;
}
@@ -987,7 +988,7 @@ restart_poll:
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
- napi_complete(napi);
+ napi_complete_done(napi, frames_processed);
if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
napi_reschedule(napi)) {
disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1025,21 +1026,26 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
/* ethtool functions */
-static int ibmvnic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int ibmvnic_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ u32 supported, advertising;
+
+ supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
ADVERTISED_FIBRE);
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_FIBRE;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_ENABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 1;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -1121,10 +1127,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
crq.request_statistics.len =
cpu_to_be32(sizeof(struct ibmvnic_statistics));
- ibmvnic_send_crq(adapter, &crq);
/* Wait for data to be written */
init_completion(&adapter->stats_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->stats_done);
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -1132,7 +1138,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops ibmvnic_ethtool_ops = {
- .get_settings = ibmvnic_get_settings,
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
.set_msglevel = ibmvnic_set_msglevel,
@@ -1141,6 +1146,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_strings = ibmvnic_get_strings,
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
+ .get_link_ksettings = ibmvnic_get_link_ksettings,
};
/* Routines for managing CRQs/sCRQs */
@@ -1254,8 +1260,6 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
}
adapter->rx_scrq = NULL;
}
-
- adapter->requested_caps = 0;
}
static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
@@ -1277,8 +1281,6 @@ static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
adapter->rx_scrq[i]);
adapter->rx_scrq = NULL;
}
-
- adapter->requested_caps = 0;
}
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
@@ -1496,7 +1498,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
adapter->req_rx_queues = adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
- adapter->req_mtu = adapter->max_mtu;
+ adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
}
total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
@@ -1566,30 +1568,36 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -1597,12 +1605,14 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
@@ -1954,112 +1964,112 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
- atomic_set(&adapter->running_cap_queries, 0);
+ atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
@@ -2185,12 +2195,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
if (!found) {
dev_err(dev, "Couldn't find error id %x\n",
- crq->request_error_rsp.error_id);
+ be32_to_cpu(crq->request_error_rsp.error_id));
return;
}
dev_err(dev, "Detailed info for error id %x:",
- crq->request_error_rsp.error_id);
+ be32_to_cpu(crq->request_error_rsp.error_id));
for (i = 0; i < error_buff->len; i++) {
pr_cont("%02x", (int)error_buff->buff[i]);
@@ -2269,8 +2279,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
crq->error_indication.
flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- crq->error_indication.error_id,
- crq->error_indication.error_cause);
+ be32_to_cpu(crq->error_indication.error_id),
+ be16_to_cpu(crq->error_indication.error_cause));
error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
if (!error_buff)
@@ -2347,6 +2357,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
u64 *req_value;
char *name;
+ atomic_dec(&adapter->running_cap_crqs);
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -2388,10 +2399,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value,
- (long int)be32_to_cpu(crq->request_capability_rsp.
+ (long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
release_sub_crqs_no_irqs(adapter);
- *req_value = be32_to_cpu(crq->request_capability_rsp.number);
+ *req_value = be64_to_cpu(crq->request_capability_rsp.number);
init_sub_crqs(adapter, 1);
return;
default:
@@ -2401,12 +2412,13 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (++adapter->requested_caps == 7) {
+ if (atomic_read(&adapter->running_cap_crqs) == 0) {
union ibmvnic_crq newcrq;
int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
&adapter->ip_offload_buf;
+ adapter->wait_capability = false;
adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
buf_sz,
DMA_FROM_DEVICE);
@@ -2542,9 +2554,9 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
struct device *dev = &adapter->vdev->dev;
long rc;
- atomic_dec(&adapter->running_cap_queries);
+ atomic_dec(&adapter->running_cap_crqs);
netdev_dbg(netdev, "Outstanding queries: %d\n",
- atomic_read(&adapter->running_cap_queries));
+ atomic_read(&adapter->running_cap_crqs));
rc = crq->query_capability.rc.code;
if (rc) {
dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
@@ -2626,12 +2638,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
break;
case MIN_MTU:
adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
- netdev->min_mtu = adapter->min_mtu;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
break;
case MAX_MTU:
adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
- netdev->max_mtu = adapter->max_mtu;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
break;
case MAX_MULTICAST_FILTERS:
@@ -2702,9 +2714,11 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_queries) == 0)
+ if (atomic_read(&adapter->running_cap_crqs) == 0) {
+ adapter->wait_capability = false;
init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
+ }
}
static void handle_control_ras_rsp(union ibmvnic_crq *crq,
@@ -2799,9 +2813,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
- ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@@ -3414,6 +3428,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
{
struct ibmvnic_adapter *adapter = instance;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->crq.lock, flags);
+ vio_disable_interrupts(adapter->vdev);
+ tasklet_schedule(&adapter->tasklet);
+ spin_unlock_irqrestore(&adapter->crq.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void ibmvnic_tasklet(void *data)
+{
+ struct ibmvnic_adapter *adapter = data;
struct ibmvnic_crq_queue *queue = &adapter->crq;
struct vio_dev *vdev = adapter->vdev;
union ibmvnic_crq *crq;
@@ -3435,11 +3461,19 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
} else {
- done = true;
+ /* remain in tasklet until all
+ * capabilities responses are received
+ */
+ if (!adapter->wait_capability)
+ done = true;
}
}
+ /* if capabilities CRQ's were sent in this tasklet, the following
+ * tasklet must wait until all responses are received
+ */
+ if (atomic_read(&adapter->running_cap_crqs) != 0)
+ adapter->wait_capability = true;
spin_unlock_irqrestore(&queue->lock, flags);
- return IRQ_HANDLED;
}
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
@@ -3494,6 +3528,7 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Releasing CRQ\n");
free_irq(vdev->irq, adapter);
+ tasklet_kill(&adapter->tasklet);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -3539,6 +3574,9 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
retrc = 0;
+ tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
+ (unsigned long)adapter);
+
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
adapter);
@@ -3560,6 +3598,7 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
return retrc;
req_irq_failed:
+ tasklet_kill(&adapter->tasklet);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -3581,9 +3620,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v)
memset(&crq, 0, sizeof(crq));
crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
- ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@@ -3629,8 +3668,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
}
}
- send_version_xchg(adapter);
reinit_completion(&adapter->init_done);
+ send_version_xchg(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Passive init timeout\n");
goto task_failed;
@@ -3640,9 +3679,9 @@ static void handle_crq_init_rsp(struct work_struct *work)
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
- send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Passive init timeout\n");
@@ -3656,9 +3695,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues;
- netdev->mtu = adapter->req_mtu;
- netdev->min_mtu = adapter->min_mtu;
- netdev->max_mtu = adapter->max_mtu;
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
if (adapter->failover) {
adapter->failover = false;
@@ -3772,9 +3809,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->debugfs_dump = ent;
}
}
- ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
+ ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout))
return 0;
@@ -3782,9 +3819,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
- send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout))
return 0;
@@ -3798,7 +3835,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
- netdev->mtu = adapter->req_mtu;
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index dd775d951b73..422824f1f42a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -976,11 +976,11 @@ struct ibmvnic_adapter {
dma_addr_t login_rsp_buf_token;
int login_rsp_buf_sz;
- atomic_t running_cap_queries;
+ atomic_t running_cap_crqs;
+ bool wait_capability;
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
- int requested_caps;
bool renegotiate;
/* rx structs */
@@ -1049,5 +1049,6 @@ struct ibmvnic_adapter {
struct work_struct vnic_crq_init;
struct work_struct ibmvnic_xport;
+ struct tasklet_struct tasklet;
bool failover;
};
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 25c6dfd500b4..2b7323d392dc 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
e100_enable_irq(nic);
}
@@ -2426,19 +2426,21 @@ err_clean_rx:
#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
-static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int e100_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct nic *nic = netdev_priv(netdev);
- return mii_ethtool_gset(&nic->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&nic->mii, cmd);
}
-static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int e100_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct nic *nic = netdev_priv(netdev);
int err;
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
- err = mii_ethtool_sset(&nic->mii, cmd);
+ err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
e100_exec_cb(nic, NULL, e100_configure);
return err;
@@ -2741,8 +2743,6 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static const struct ethtool_ops e100_ethtool_ops = {
- .get_settings = e100_get_settings,
- .set_settings = e100_set_settings,
.get_drvinfo = e100_get_drvinfo,
.get_regs_len = e100_get_regs_len,
.get_regs = e100_get_regs,
@@ -2763,6 +2763,8 @@ static const struct ethtool_ops e100_ethtool_ops = {
.get_ethtool_stats = e100_get_ethtool_stats,
.get_sset_count = e100_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = e100_get_link_ksettings,
+ .set_link_ksettings = e100_set_link_ksettings,
};
static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 879cca47b021..a29b12e80855 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e1000_ring *ring);
int e1000e_setup_tx_resources(struct e1000_ring *ring);
void e1000e_free_rx_resources(struct e1000_ring *ring);
void e1000e_free_tx_resources(struct e1000_ring *ring);
-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats);
+void e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index eccf1da9356b..2175cced402f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev), netdev->last_rx);
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
}
/* Print Registers */
@@ -5920,12 +5920,11 @@ static void e1000_reset_task(struct work_struct *work)
*
* Returns the address of the device statistics structure.
**/
-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+void e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
/* Fill out the OS statistics structure */
@@ -5958,7 +5957,6 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
/* Tx Dropped needs to be maintained elsewhere */
spin_unlock(&adapter->stats64_lock);
- return stats;
}
/**
@@ -6276,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
/* Quiesce the device without resetting the hardware */
e1000e_down(adapter, false);
e1000_free_irq(adapter);
+ e1000e_reset_interrupt_capability(adapter);
}
- e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 4d19e46f7c55..52b979443cde 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -260,9 +260,7 @@ struct fm10k_intfc {
#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
-#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3))
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4))
-#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5))
+#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3))
int xcast_mode;
/* Tx fast path data */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index dd95ac4f4c64..62a6ad9b3eed 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -506,7 +506,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* if we somehow dropped the Tx enable we should reset */
- if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+ if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
@@ -523,8 +523,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
/* interface cannot receive traffic without logical ports */
if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
- if (hw->mac.ops.request_lport_map)
- ret_val = hw->mac.ops.request_lport_map(hw);
+ if (mac->ops.request_lport_map)
+ ret_val = mac->ops.request_lport_map(hw);
goto out;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 5241e0873397..0c84fef750f4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -148,7 +148,7 @@ enum {
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
};
-static void fm10k_add_stat_strings(char **p, const char *prefix,
+static void fm10k_add_stat_strings(u8 **p, const char *prefix,
const struct fm10k_stats stats[],
const unsigned int size)
{
@@ -164,32 +164,31 @@ static void fm10k_add_stat_strings(char **p, const char *prefix,
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- char *p = (char *)data;
unsigned int i;
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats,
FM10K_NETDEV_STATS_LEN);
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats,
FM10K_GLOBAL_STATS_LEN);
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats,
FM10K_MBX_STATS_LEN);
if (interface->hw.mac.type != fm10k_mac_vf)
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats,
FM10K_PF_STATS_LEN);
for (i = 0; i < interface->hw.mac.max_queues; i++) {
char prefix[ETH_GSTRING_LEN];
snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
- fm10k_add_stat_strings(&p, prefix,
+ fm10k_add_stat_strings(&data, prefix,
fm10k_gstrings_queue_stats,
FM10K_QUEUE_STATS_LEN);
snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
- fm10k_add_stat_strings(&p, prefix,
+ fm10k_add_stat_strings(&data, prefix,
fm10k_gstrings_queue_stats,
FM10K_QUEUE_STATS_LEN);
}
@@ -198,18 +197,16 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
static void fm10k_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
- char *p = (char *)data;
-
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *fm10k_gstrings_test,
+ memcpy(data, fm10k_gstrings_test,
FM10K_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
fm10k_get_stat_strings(dev, data);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(p, fm10k_prv_flags,
+ memcpy(data, fm10k_prv_flags,
FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
break;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5de937852436..5bb233a9614c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -28,7 +28,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.21.2-k"
+#define DRV_VERSION "0.21.7-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
@@ -251,6 +251,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/**
* fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_buffer: buffer containing page to add
+ * @size: packet size from rx_desc
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
*
@@ -263,12 +264,12 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
* true if the buffer can be reused by the interface.
**/
static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
+ unsigned int size,
union fm10k_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->w.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
@@ -314,6 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
union fm10k_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ unsigned int size = le16_to_cpu(rx_desc->w.length);
struct fm10k_rx_buffer *rx_buffer;
struct page *page;
@@ -350,11 +352,11 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- FM10K_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
+ if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
fm10k_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -473,6 +475,8 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
fm10k_rx_checksum(rx_ring, rx_desc, skb);
+ FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
+
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
skb_record_rx_queue(skb, rx_ring->queue_index);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index c9dfa6564fcf..334088a101c3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -2011,9 +2011,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
* function can also be used to respond to an error as the connection
* resetting would also be a means of dealing with errors.
**/
-static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
- struct fm10k_mbx_info *mbx)
+static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
{
+ s32 err = 0;
const enum fm10k_mbx_state state = mbx->state;
switch (state) {
@@ -2026,6 +2027,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
case FM10K_STATE_OPEN:
/* flush any incomplete work */
fm10k_sm_mbx_connect_reset(mbx);
+ err = FM10K_ERR_RESET_REQUESTED;
break;
case FM10K_STATE_CONNECT:
/* Update remote value to match local value */
@@ -2035,6 +2037,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
}
fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+ return err;
}
/**
@@ -2115,7 +2119,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
case 0:
- fm10k_sm_mbx_process_reset(hw, mbx);
+ err = fm10k_sm_mbx_process_reset(hw, mbx);
break;
case FM10K_SM_MBX_VERSION:
err = fm10k_sm_mbx_process_version_1(hw, mbx);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index bc5ef6eb3dd6..01db688cf539 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1118,8 +1118,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
* Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
* function replaces fm10k_get_stats for kernels which support it.
*/
-static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void fm10k_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_ring *ring;
@@ -1164,8 +1164,6 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
/* following stats updated by fm10k_service_task() */
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
-
- return stats;
}
int fm10k_setup_tc(struct net_device *dev, u8 tc)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index b1a2f8437d59..e372a5823480 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1144,6 +1144,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
struct fm10k_hw *hw = &interface->hw;
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 eicr;
+ s32 err = 0;
/* unmask any set bits related to this interrupt */
eicr = fm10k_read_reg(hw, FM10K_EICR);
@@ -1159,12 +1160,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* service mailboxes */
if (fm10k_mbx_trylock(interface)) {
- mbx->ops.process(hw, mbx);
+ err = mbx->ops.process(hw, mbx);
/* handle VFLRE events */
fm10k_iov_event(interface);
fm10k_mbx_unlock(interface);
}
+ if (err == FM10K_ERR_RESET_REQUESTED)
+ interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+
/* if switch toggled state we should reset GLORTs */
if (eicr & FM10K_EICR_SWITCHNOTREADY) {
/* force link down for at least 4 seconds */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 23fb319fd2a0..40ee0242a80a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -72,10 +72,6 @@ force_reset:
fm10k_write_flush(hw);
udelay(FM10K_RESET_TIMEOUT);
- /* Reset mailbox global interrupts */
- reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT;
- fm10k_write_reg(hw, FM10K_GMBX, reg);
-
/* Verify we made it out of reset */
reg = fm10k_read_reg(hw, FM10K_IP);
if (!(reg & FM10K_IP_NOTINRESET))
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ba8d30984bee..82d8040fa418 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -134,19 +134,6 @@
/* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
-/**
- * i40e_is_mac_710 - Return true if MAC is X710/XL710
- * @hw: ptr to the hardware info
- **/
-static inline bool i40e_is_mac_710(struct i40e_hw *hw)
-{
- if ((hw->mac.type == I40E_MAC_X710) ||
- (hw->mac.type == I40E_MAC_XL710))
- return true;
-
- return false;
-}
-
/* driver state flags */
enum i40e_state_t {
__I40E_TESTING,
@@ -361,6 +348,8 @@ struct i40e_pf {
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
+#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -480,6 +469,22 @@ struct i40e_mac_filter {
enum i40e_filter_state state;
};
+/* Wrapper structure to keep track of filters while we are preparing to send
+ * firmware commands. We cannot send firmware commands while holding a
+ * spinlock, since it might sleep. To avoid this, we wrap the added filters in
+ * a separate structure, which will track the state change and update the real
+ * filter while under lock. We can't simply hold the filters in a separate
+ * list, as this opens a window for a race condition when adding new MAC
+ * addresses to all VLANs, or when adding new VLANs to all MAC addresses.
+ */
+struct i40e_new_mac_filter {
+ struct hlist_node hlist;
+ struct i40e_mac_filter *f;
+
+ /* Track future changes to state separately */
+ enum i40e_filter_state state;
+};
+
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
@@ -762,6 +767,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
@@ -804,7 +810,6 @@ int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf);
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
-void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
@@ -834,9 +839,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
#ifdef I40E_FCOE
-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *storage);
+void i40e_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *storage);
int i40e_set_mac(struct net_device *netdev, void *p);
void i40e_set_rx_mode(struct net_device *netdev);
#endif
@@ -853,12 +857,12 @@ int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid);
void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
- const u8 *macaddr);
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr);
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid);
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr);
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b2101a51534c..451f48b7540a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -538,6 +538,8 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 7fe72abc0b4a..d570219efd9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -174,8 +174,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
if (!vsi)
return;
- memset(&params, 0, sizeof(params));
- i40e_client_get_params(vsi, &params);
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.pf == vsi->back) {
@@ -186,6 +184,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change routine\n");
continue;
}
+ memset(&params, 0, sizeof(params));
+ i40e_client_get_params(vsi, &params);
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state)) {
dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
@@ -201,41 +201,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
}
/**
- * i40e_notify_client_of_netdev_open - call the client open callback
- * @vsi: the VSI with netdev opened
- *
- * If there is a client to this netdev, call the client with open
- **/
-void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
-{
- struct i40e_client_instance *cdev;
- int ret = 0;
-
- if (!vsi)
- return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.netdev == vsi->netdev) {
- if (!cdev->client ||
- !cdev->client->ops || !cdev->client->ops->open) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance open routine\n");
- continue;
- }
- if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state))) {
- ret = cdev->client->ops->open(&cdev->lan_info,
- cdev->client);
- if (!ret)
- set_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state);
- }
- }
- }
- mutex_unlock(&i40e_client_instance_mutex);
-}
-
-/**
* i40e_client_release_qvlist
* @ldev: pointer to L2 context.
*
@@ -545,9 +510,10 @@ void i40e_client_subtask(struct i40e_pf *pf)
continue;
if (!existing) {
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
+ pf->hw.bus.bus_id, pf->hw.bus.device,
+ pf->hw.bus.func);
}
mutex_lock(&i40e_client_instance_mutex);
@@ -596,8 +562,9 @@ int i40e_lan_add_device(struct i40e_pf *pf)
ldev->pf = pf;
INIT_LIST_HEAD(&ldev->list);
list_add(&ldev->list, &i40e_devices);
- dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
- pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
+ dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.bus_id,
+ pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients if
@@ -625,9 +592,9 @@ int i40e_lan_del_device(struct i40e_pf *pf)
mutex_lock(&i40e_device_mutex);
list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
if (ldev->pf == pf) {
- dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
- pf->hw.pf_id, pf->hw.bus.device,
- pf->hw.bus.func);
+ dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.bus_id,
+ pf->hw.bus.device, pf->hw.bus.func);
list_del(&ldev->list);
kfree(ldev);
ret = 0;
@@ -688,13 +655,11 @@ static int i40e_client_release(struct i40e_client *client)
* i40e_client_prepare - prepare client specific resources
* @client: pointer to the registered client
*
- * Return 0 on success or < 0 on error
**/
-static int i40e_client_prepare(struct i40e_client *client)
+static void i40e_client_prepare(struct i40e_client *client)
{
struct i40e_device *ldev;
struct i40e_pf *pf;
- int ret = 0;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
@@ -704,7 +669,6 @@ static int i40e_client_prepare(struct i40e_client *client)
i40e_service_event_schedule(pf);
}
mutex_unlock(&i40e_device_mutex);
- return ret;
}
/**
@@ -961,13 +925,9 @@ int i40e_register_client(struct i40e_client *client)
set_bit(__I40E_CLIENT_REGISTERED, &client->state);
mutex_unlock(&i40e_client_mutex);
- if (i40e_client_prepare(client)) {
- ret = -EIO;
- goto out;
- }
+ i40e_client_prepare(client);
- pr_info("i40e: Registered client %s with return code %d\n",
- client->name, ret);
+ pr_info("i40e: Registered client %s\n", client->name);
out:
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 128735975caa..ece57d6a6e23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -300,7 +300,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len;
u8 *buf = (u8 *)buffer;
- u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
@@ -328,12 +327,18 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
if (buf_len < len)
len = buf_len;
/* write the full 16-byte chunks */
- for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
- /* write whatever's left over without overrunning the buffer */
- if (i < len)
- i40e_debug(hw, mask, "\t0x%04X %*ph\n",
- i, len - i, buf + i);
+ if (hw->debug_mask & mask) {
+ char prefix[20];
+
+ snprintf(prefix, 20,
+ "i40e %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+ }
}
}
@@ -1838,6 +1843,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index f1f41f12902f..267ad2588255 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -974,7 +974,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_dcbx_config *r_cfg =
&pf->hw.remote_dcbx_config;
int i, ret;
- u32 switch_id;
+ u16 switch_id;
bw_data = kzalloc(sizeof(
struct i40e_aqc_query_port_ets_config_resp),
@@ -986,7 +986,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
vsi = pf->vsi[pf->lan_vsi];
switch_id =
- vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK;
+ le16_to_cpu(vsi->info.switch_id) &
+ I40E_AQ_VSI_SW_ID_MASK;
ret = i40e_aq_query_port_ets_config(&pf->hw,
switch_id,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index cc1465aac2ef..a22e26200bcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -803,9 +803,12 @@ static int i40e_set_settings(struct net_device *netdev,
if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
/* save the requested speeds */
hw->phy.link_info.requested_speeds = config.link_speed;
@@ -2072,7 +2075,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector;
u16 vector, intrl;
- intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+ intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -2116,6 +2119,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 intrl_reg;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
@@ -2127,8 +2131,9 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
- netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+ if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
+ INTRL_REG_TO_USEC(I40E_MAX_INTRL));
return -EINVAL;
}
@@ -2141,7 +2146,12 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+ intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
+ vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
+ if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
+ netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
+ vsi->int_rate_limit);
+ }
if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ad4cf639430e..e8a8351c8ea9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_BUILD 27
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -77,7 +77,6 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
@@ -409,15 +408,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
**/
-#ifdef I40E_FCOE
-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-#else
-static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+#ifndef I40E_FCOE
+static
#endif
+void i40e_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_ring *tx_ring, *rx_ring;
@@ -426,10 +421,10 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
int i;
if (test_bit(__I40E_DOWN, &vsi->state))
- return stats;
+ return;
if (!vsi->tx_rings)
- return stats;
+ return;
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -469,8 +464,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
stats->rx_dropped = vsi_stats->rx_dropped;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
-
- return stats;
}
/**
@@ -1260,7 +1253,9 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
struct hlist_head *tmp_del_list,
int vlan_filters)
{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new;
struct hlist_node *h;
int bkt, new_vlan;
@@ -1279,13 +1274,13 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
*/
/* Update the filters about to be added in place */
- hlist_for_each_entry(f, tmp_add_list, hlist) {
- if (vsi->info.pvid && f->vlan != vsi->info.pvid)
- f->vlan = vsi->info.pvid;
- else if (vlan_filters && f->vlan == I40E_VLAN_ANY)
- f->vlan = 0;
- else if (!vlan_filters && f->vlan == 0)
- f->vlan = I40E_VLAN_ANY;
+ hlist_for_each_entry(new, tmp_add_list, hlist) {
+ if (pvid && new->f->vlan != pvid)
+ new->f->vlan = pvid;
+ else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
+ new->f->vlan = 0;
+ else if (!vlan_filters && new->f->vlan == 0)
+ new->f->vlan = I40E_VLAN_ANY;
}
/* Update the remaining active filters */
@@ -1295,12 +1290,12 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
* order to avoid duplicating code for adding the new filter
* then deleting the old filter.
*/
- if ((vsi->info.pvid && f->vlan != vsi->info.pvid) ||
+ if ((pvid && f->vlan != pvid) ||
(vlan_filters && f->vlan == I40E_VLAN_ANY) ||
(!vlan_filters && f->vlan == 0)) {
/* Determine the new vlan we will be adding */
- if (vsi->info.pvid)
- new_vlan = vsi->info.pvid;
+ if (pvid)
+ new_vlan = pvid;
else if (vlan_filters)
new_vlan = 0;
else
@@ -1311,9 +1306,16 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
if (!add_head)
return -ENOMEM;
- /* Put the replacement filter into the add list */
- hash_del(&add_head->hlist);
- hlist_add_head(&add_head->hlist, tmp_add_list);
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+
+ new->f = add_head;
+ new->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new->hlist, tmp_add_list);
/* Put the original filter into the delete list */
f->state = I40E_FILTER_REMOVE;
@@ -1440,23 +1442,25 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry().
**/
-static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
{
if (!f)
return;
+ /* If the filter was never added to firmware then we can just delete it
+ * directly and we don't want to set the status to remove or else an
+ * admin queue command will unnecessarily fire.
+ */
if ((f->state == I40E_FILTER_FAILED) ||
(f->state == I40E_FILTER_NEW)) {
- /* this one never got added by the FW. Just remove it,
- * no need to sync anything.
- */
hash_del(&f->hlist);
kfree(f);
} else {
f->state = I40E_FILTER_REMOVE;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
/**
@@ -1483,18 +1487,19 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
}
/**
- * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
*
- * Goes through all the macvlan filters and adds a macvlan filter for each
+ * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
+ * go through all the macvlan filters and add a macvlan filter for each
* unique vlan that already exists. If a PVID has been assigned, instead only
* add the macaddr to that VLAN.
*
* Returns last filter added on success, else NULL
**/
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
- const u8 *macaddr)
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr)
{
struct i40e_mac_filter *f, *add = NULL;
struct hlist_node *h;
@@ -1504,6 +1509,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
+ if (!i40e_is_vsi_in_vlan(vsi))
+ return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
+
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE)
continue;
@@ -1516,15 +1524,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
}
/**
- * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * i40e_del_mac_filter - Remove a MAC filter from all VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be removed
*
- * Removes a given MAC address from a VSI, regardless of VLAN
+ * Removes a given MAC address from a VSI regardless of what VLAN it has been
+ * associated with.
*
* Returns 0 for success, or error
**/
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1585,8 +1594,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
- i40e_put_mac_in_vlan(vsi, addr->sa_data);
+ i40e_del_mac_filter(vsi, netdev->dev_addr);
+ i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
@@ -1762,14 +1771,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_mac_filter *f;
-
- if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, addr);
- else
- f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
- if (f)
+ if (i40e_add_mac_filter(vsi, addr))
return 0;
else
return -ENOMEM;
@@ -1788,10 +1791,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_del_mac_all_vlan(vsi, addr);
- else
- i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
+ i40e_del_mac_filter(vsi, addr);
return 0;
}
@@ -1829,16 +1829,15 @@ static void i40e_set_rx_mode(struct net_device *netdev)
}
/**
- * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
* @vsi: Pointer to VSI struct
* @from: Pointer to list which contains MAC filter entries - changes to
* those entries needs to be undone.
*
- * MAC filter entries from list were slated to be sent to firmware, either for
- * addition or deletion.
+ * MAC filter entries from this list were slated for deletion.
**/
-static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
- struct hlist_head *from)
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1853,6 +1852,53 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
}
/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from this list were slated for addition.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
+{
+ struct i40e_new_mac_filter *new;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(new, h, from, hlist) {
+ /* We can simply free the wrapper structure */
+ hlist_del(&new->hlist);
+ kfree(new);
+ }
+}
+
+/**
+ * i40e_next_entry - Get the next non-broadcast filter from a list
+ * @next: pointer to filter in list
+ *
+ * Returns the next non-broadcast filter in the list. Required so that we
+ * ignore broadcast filters within the list, since these are not handled via
+ * the normal firmware update path.
+ */
+static
+struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
+{
+ while (next) {
+ next = hlist_entry(next->hlist.next,
+ typeof(struct i40e_new_mac_filter),
+ hlist);
+
+ /* keep going if we found a broadcast filter */
+ if (next && is_broadcast_ether_addr(next->f->macaddr))
+ continue;
+
+ break;
+ }
+
+ return next;
+}
+
+/**
* i40e_update_filter_state - Update filter state based on return data
* from firmware
* @count: Number of filters added
@@ -1865,7 +1911,7 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
static int
i40e_update_filter_state(int count,
struct i40e_aqc_add_macvlan_element_data *add_list,
- struct i40e_mac_filter *add_head)
+ struct i40e_new_mac_filter *add_head)
{
int retval = 0;
int i;
@@ -1884,9 +1930,9 @@ i40e_update_filter_state(int count,
retval++;
}
- add_head = hlist_entry(add_head->hlist.next,
- typeof(struct i40e_mac_filter),
- hlist);
+ add_head = i40e_next_filter(add_head);
+ if (!add_head)
+ break;
}
return retval;
@@ -1943,7 +1989,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
static
void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_aqc_add_macvlan_element_data *list,
- struct i40e_mac_filter *add_head,
+ struct i40e_new_mac_filter *add_head,
int num_add, bool *promisc_changed)
{
struct i40e_hw *hw = &vsi->back->hw;
@@ -1971,10 +2017,12 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
* This function sets or clears the promiscuous broadcast flags for VLAN
* filters in order to properly receive broadcast frames. Assumes that only
* broadcast filters are passed.
+ *
+ * Returns status indicating success or failure;
**/
-static
-void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
- struct i40e_mac_filter *f)
+static i40e_status
+i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
struct i40e_hw *hw = &vsi->back->hw;
@@ -1993,15 +2041,13 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
NULL);
}
- if (aq_ret) {
+ if (aq_ret)
dev_warn(&vsi->back->pdev->dev,
"Error %s setting broadcast promiscuous mode on %s\n",
i40e_aq_str(hw, hw->aq.asq_last_status),
vsi_name);
- f->state = I40E_FILTER_FAILED;
- } else if (enable) {
- f->state = I40E_FILTER_ACTIVE;
- }
+
+ return aq_ret;
}
/**
@@ -2015,7 +2061,8 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
struct hlist_head tmp_add_list, tmp_del_list;
- struct i40e_mac_filter *f, *add_head = NULL;
+ struct i40e_mac_filter *f;
+ struct i40e_new_mac_filter *new, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
unsigned int failed_filters = 0;
unsigned int vlan_filters = 0;
@@ -2069,8 +2116,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
continue;
}
if (f->state == I40E_FILTER_NEW) {
- hash_del(&f->hlist);
- hlist_add_head(&f->hlist, &tmp_add_list);
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto err_no_memory_locked;
+
+ /* Store pointer to the real filter */
+ new->f = f;
+ new->state = f->state;
+
+ /* Add it to the hash list */
+ hlist_add_head(&new->hlist, &tmp_add_list);
}
/* Count the number of active (current and new) VLAN
@@ -2105,7 +2161,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* handle broadcast filters by updating the broadcast
- * promiscuous flag instead of deleting a MAC filter.
+ * promiscuous flag and release filter list.
*/
if (is_broadcast_ether_addr(f->macaddr)) {
i40e_aqc_broadcast_filter(vsi, vsi_name, f);
@@ -2163,36 +2219,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
goto err_no_memory;
num_add = 0;
- hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
- f->state = I40E_FILTER_FAILED;
+ new->state = I40E_FILTER_FAILED;
continue;
}
/* handle broadcast filters by updating the broadcast
* promiscuous flag instead of adding a MAC filter.
*/
- if (is_broadcast_ether_addr(f->macaddr)) {
- u64 key = i40e_addr_to_hkey(f->macaddr);
- i40e_aqc_broadcast_filter(vsi, vsi_name, f);
-
- hlist_del(&f->hlist);
- hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ if (is_broadcast_ether_addr(new->f->macaddr)) {
+ if (i40e_aqc_broadcast_filter(vsi, vsi_name,
+ new->f))
+ new->state = I40E_FILTER_FAILED;
+ else
+ new->state = I40E_FILTER_ACTIVE;
continue;
}
/* add to add array */
if (num_add == 0)
- add_head = f;
+ add_head = new;
cmd_flags = 0;
- ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
- if (f->vlan == I40E_VLAN_ANY) {
+ ether_addr_copy(add_list[num_add].mac_addr,
+ new->f->macaddr);
+ if (new->f->vlan == I40E_VLAN_ANY) {
add_list[num_add].vlan_tag = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
} else {
add_list[num_add].vlan_tag =
- cpu_to_le16((u16)(f->vlan));
+ cpu_to_le16((u16)(new->f->vlan));
}
add_list[num_add].queue_number = 0;
/* set invalid match method for later detection */
@@ -2218,11 +2275,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
* the VSI's list.
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
- u64 key = i40e_addr_to_hkey(f->macaddr);
-
- hlist_del(&f->hlist);
- hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
+ /* Only update the state if we're still NEW */
+ if (new->f->state == I40E_FILTER_NEW)
+ new->f->state = new->state;
+ hlist_del(&new->hlist);
+ kfree(new);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list);
@@ -2383,8 +2441,8 @@ err_no_memory:
/* Restore elements on the temporary add and delete lists */
spin_lock_bh(&vsi->mac_filter_hash_lock);
err_no_memory_locked:
- i40e_undo_filter_entries(vsi, &tmp_del_list);
- i40e_undo_filter_entries(vsi, &tmp_add_list);
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi, &tmp_add_list);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -2574,12 +2632,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/**
* i40e_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be added (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be added
**/
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
{
int err;
+ if (!vid || vsi->info.pvid)
+ return -EINVAL;
+
/* Locked once because all functions invoked below iterates list*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
err = i40e_add_vlan_all_mac(vsi, vid);
@@ -2622,10 +2683,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/**
* i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be removed (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be removed
**/
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
{
+ if (!vid || vsi->info.pvid)
+ return;
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_rm_vlan_all_mac(vsi, vid);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -3272,7 +3336,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr);
wr32(hw, I40E_PFINT_RATEN(vector - 1),
- INTRL_USEC_TO_REG(vsi->int_rate_limit));
+ i40e_intrl_usec_to_reg(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
@@ -4621,8 +4685,10 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
*/
if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
(!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
+ local_bh_disable();
if (napi_reschedule(&tx_ring->q_vector->napi))
tx_ring->tx_stats.tx_lost_interrupt++;
+ local_bh_enable();
}
}
@@ -5276,6 +5342,8 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
enum i40e_aq_link_speed new_speed;
char *speed = "Unknown";
char *fc = "Unknown";
+ char *fec = "";
+ char *an = "";
new_speed = vsi->back->hw.phy.link_info.link_speed;
@@ -5335,8 +5403,23 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
break;
}
- netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
- speed, fc);
+ if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+ fec = ", FEC: None";
+ an = ", Autoneg: False";
+
+ if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = ", Autoneg: True";
+
+ if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = ", FEC: CL74 FC-FEC/BASE-R";
+ else if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_RS_ENA)
+ fec = ", FEC: CL108 RS-FEC";
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n",
+ speed, fec, an, fc);
}
/**
@@ -6271,7 +6354,16 @@ static void i40e_link_event(struct i40e_pf *pf)
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
status = i40e_get_link_status(&pf->hw, &new_link);
- if (status) {
+
+ /* On success, disable temp link polling */
+ if (status == I40E_SUCCESS) {
+ if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
+ pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
+ } else {
+ /* Enable link polling temporarily until i40e_get_link_status
+ * returns I40E_SUCCESS
+ */
+ pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
status);
return;
@@ -6323,7 +6415,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+ if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
@@ -8688,7 +8781,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.func_caps.fd_filters_best_effort;
}
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4))) {
pf->flags |= I40E_FLAG_RESTART_AUTONEG;
@@ -8697,13 +8790,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
/* Disable FW LLDP if FW < v4.3 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4)))
pf->flags |= I40E_FLAG_STOP_FW_LLDP;
/* Use the FW Set LLDP MIB API if FW > v4.40 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
(pf->hw.aq.fw_maj_ver >= 5)))
pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
@@ -8734,16 +8827,17 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
#endif /* CONFIG_PCI_IOV */
if (pf->hw.mac.type == I40E_MAC_X722) {
- pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
- I40E_FLAG_128_QP_RSS_CAPABLE |
- I40E_FLAG_HW_ATR_EVICT_CAPABLE |
- I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
- I40E_FLAG_WB_ON_ITR_CAPABLE |
- I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
- I40E_FLAG_NO_PCI_LINK_CHECK |
- I40E_FLAG_USE_SET_LLDP_MIB |
- I40E_FLAG_GENEVE_OFFLOAD_CAPABLE |
- I40E_FLAG_PTP_L4_CAPABLE;
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE
+ | I40E_FLAG_128_QP_RSS_CAPABLE
+ | I40E_FLAG_HW_ATR_EVICT_CAPABLE
+ | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE
+ | I40E_FLAG_WB_ON_ITR_CAPABLE
+ | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
+ | I40E_FLAG_NO_PCI_LINK_CHECK
+ | I40E_FLAG_USE_SET_LLDP_MIB
+ | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
+ | I40E_FLAG_PTP_L4_CAPABLE
+ | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
((pf->hw.aq.api_maj_ver == 1) &&
(pf->hw.aq.api_min_ver > 4))) {
@@ -9345,7 +9439,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
@@ -9354,7 +9448,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
@@ -9373,7 +9467,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
*/
eth_broadcast_addr(broadcast);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, mac_addr);
@@ -10679,7 +10773,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_pf_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -10994,6 +11087,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
pf->instance = pfs_found;
/* set up the locks for the AQ, do this only once in probe
@@ -11659,6 +11753,53 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
}
/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 mac_addr[6];
+ u16 flags = 0;
+
+ /* Get current MAC address in case it's an LAA */
+ if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
+ ether_addr_copy(mac_addr,
+ pf->vsi[pf->lan_vsi]->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
* i40e_shutdown - PCI callback for shutting down
* @pdev: PCI device information struct
**/
@@ -11680,6 +11821,9 @@ static void i40e_shutdown(struct pci_dev *pdev)
cancel_work_sync(&pf->service_task);
i40e_fdir_teardown(pf);
+ if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
@@ -11711,6 +11855,9 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
+ if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 5b6feb7edeb1..fea81ed065db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -55,7 +55,7 @@ struct i40e_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
-} __packed;
+};
#define i40e_allocate_dma_mem(h, m, unused, s, a) \
i40e_allocate_dma_mem_d(h, m, s, a)
@@ -64,17 +64,17 @@ struct i40e_dma_mem {
struct i40e_virt_mem {
void *va;
u32 size;
-} __packed;
+};
#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
-#define i40e_debug(h, m, s, ...) \
-do { \
- if (((m) & (h)->debug_mask)) \
- pr_info("i40e %02x.%x " s, \
- (h)->bus.device, (h)->bus.func, \
- ##__VA_ARGS__); \
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40e %02x:%02x.%x " s, \
+ (h)->bus.bus_id, (h)->bus.device, \
+ (h)->bus.func, ##__VA_ARGS__); \
} while (0)
typedef enum i40e_status_code i40e_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 9e49ffafce28..2caee35528fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -280,7 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- int i;
+ unsigned int i, cleared = 0;
/* Since we cannot turn off the Rx timestamp logic if the device is
* configured for Tx timestamping, we check if Rx timestamping is
@@ -306,14 +306,25 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
time_is_before_jiffies(pf->latch_events[i] + HZ)) {
rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
pf->latch_event_flags &= ~BIT(i);
- pf->rx_hwtstamp_cleared++;
- dev_warn(&pf->pdev->dev,
- "Clearing a missed Rx timestamp event for RXTIME[%d]\n",
- i);
+ cleared++;
}
}
spin_unlock_bh(&pf->ptp_rx_lock);
+
+ /* Log a warning if more than 2 timestamps got dropped in the same
+ * check. We don't want to warn about all drops because it can occur
+ * in normal scenarios such as PTP frames on multicast addresses we
+ * aren't listening to. However, administrator should know if this is
+ * the reason packets aren't receiving timestamps.
+ */
+ if (cleared > 2)
+ dev_dbg(&pf->pdev->dev,
+ "Dropped %d missed RXTIME timestamp events\n",
+ cleared);
+
+ /* Finally, update the rx_hwtstamp_cleared counter */
+ pf->rx_hwtstamp_cleared += cleared;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 352cf7cd2ef4..97d46058d71d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -432,7 +432,12 @@ unsupported_flow:
ret = -EINVAL;
}
- /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
+ /* The buffer allocated here will be normally be freed by
+ * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
+ * completion. In the event of an error adding the buffer to the FDIR
+ * ring, it will immediately be freed. It may also be freed by
+ * i40e_clean_tx_ring() when closing the VSI.
+ */
return ret;
}
@@ -1013,14 +1018,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->skb) {
- dev_kfree_skb(rx_bi->skb);
- rx_bi->skb = NULL;
- }
if (!rx_bi->page)
continue;
@@ -1425,45 +1431,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
}
/**
- * i40e_pull_tail - i40e specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an i40e specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* i40e_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
@@ -1478,10 +1445,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
**/
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- i40e_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -1513,19 +1476,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_page_is_reserved - check if reuse is possible
+ * i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
*/
-static inline bool i40e_page_is_reserved(struct page *page)
+static inline bool i40e_page_is_reusable(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ return (page_to_nid(page) == numa_mem_id()) &&
+ !page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ * @page: page address from rx_buffer
+ * @truesize: actual size of the buffer in this page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page. We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack. We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet. If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size). This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer. Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct page *page,
+ const unsigned int truesize)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+ /* Is any reuse possible? */
+ if (unlikely(!i40e_page_is_reusable(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Inc ref count on page before passing it up to the stack */
+ get_page(page);
+
+ return true;
}
/**
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
+ * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1538,30 +1567,29 @@ static inline bool i40e_page_is_reserved(struct page *page)
**/
static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- union i40e_rx_desc *rx_desc,
+ unsigned int size,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
- u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = I40E_RXBUFFER_2048;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
+ unsigned int pull_len;
+
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
/* will the data fit in the skb we allocated? if so, just
* copy it as it is pretty small anyway
*/
- if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
+ if (size <= I40E_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!i40e_page_is_reserved(page)))
+ /* page is reusable, we can reuse buffer as-is */
+ if (likely(i40e_page_is_reusable(page)))
return true;
/* this page cannot be reused so discard it */
@@ -1569,34 +1597,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return false;
}
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
-
- /* avoid re-using remote pages */
- if (unlikely(i40e_page_is_reserved(page)))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ /* we need the header to contain the greater of either
+ * ETH_HLEN or 60 bytes if the skb->len is less than
+ * 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+ /* align pull length to size of long to optimize
+ * memcpy performance
+ */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
- if (rx_buffer->page_offset > last_offset)
- return false;
-#endif
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- get_page(rx_buffer->page);
+add_tail_frag:
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
- return true;
+ return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
}
/**
@@ -1611,18 +1631,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
*/
static inline
struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
+ u64 local_status_error_len =
+ le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size =
+ (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
struct i40e_rx_buffer *rx_buffer;
- struct sk_buff *skb;
struct page *page;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
- skb = rx_buffer->skb;
-
if (likely(!skb)) {
void *page_addr = page_address(page) + rx_buffer->page_offset;
@@ -1646,19 +1669,17 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
* it now to avoid a possible cache miss
*/
prefetchw(skb->data);
- } else {
- rx_buffer->skb = NULL;
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- I40E_RXBUFFER_2048,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
@@ -1700,7 +1721,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
#define staterrlen rx_desc->wb.qword1.status_error_len
if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
i40e_clean_programming_status(rx_ring, rx_desc);
- rx_ring->rx_bi[ntc].skb = skb;
return true;
}
/* if we are the last buffer then there is nothing else to do */
@@ -1708,8 +1728,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_bi[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -1730,12 +1748,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
- struct sk_buff *skb;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1764,7 +1782,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+ skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
if (!skb)
break;
@@ -1783,8 +1801,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
continue;
}
- if (i40e_cleanup_headers(rx_ring, skb))
+ if (i40e_cleanup_headers(rx_ring, skb)) {
+ skb = NULL;
continue;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1809,11 +1829,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_packets++;
}
+ rx_ring->skb = skb;
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1841,14 +1864,14 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
-static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
{
- return !!(vsi->rx_rings[idx]->rx_itr_setting);
+ return vsi->rx_rings[idx]->rx_itr_setting;
}
-static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
{
- return !!(vsi->tx_rings[idx]->tx_itr_setting);
+ return vsi->tx_rings[idx]->tx_itr_setting;
}
/**
@@ -1874,8 +1897,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
- rx_itr_setting = get_rx_itr_enabled(vsi, idx);
- tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+ rx_itr_setting = get_rx_itr(vsi, idx);
+ tx_itr_setting = get_tx_itr(vsi, idx);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(rx_itr_setting) &&
@@ -2251,14 +2274,16 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @skb: ptr to the skb we're sending
+ * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss)
{
+ struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
union {
struct iphdr *v4;
@@ -2271,6 +2296,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
+ u16 gso_segs, gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -2309,7 +2335,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from outer checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.udp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
@@ -2330,15 +2357,23 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ /* update GSO size and bytecount with header size */
+ first->gso_segs = gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
- cd_mss = skb_shinfo(skb)->gso_size;
+ cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
@@ -2699,7 +2734,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 gso_segs;
u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
@@ -2708,15 +2742,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_SHIFT;
}
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
-
- /* multiply data chunks by size of headers */
- first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
- first->gso_segs = gso_segs;
- first->skb = skb;
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2902,8 +2927,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
- if (__skb_linearize(skb))
- goto out_drop;
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2919,6 +2946,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
@@ -2926,16 +2959,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
/* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
@@ -2973,7 +3003,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index e065321ce8ed..f80979025c01 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -52,7 +52,20 @@
*/
#define INTRL_ENA BIT(6)
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
-#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+/**
+ * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
+ * @intrl: interrupt rate limit to convert
+ *
+ * This function converts a decimal interrupt rate limit to the appropriate
+ * register format expected by the firmware when setting interrupt rate limit.
+ */
+static inline u16 i40e_intrl_usec_to_reg(int intrl)
+{
+ if (intrl >> 2)
+ return ((intrl >> 2) | INTRL_ENA);
+ else
+ return 0;
+}
#define I40E_INTRL_8K 125 /* 8000 ints/sec */
#define I40E_INTRL_62K 16 /* 62500 ints/sec */
#define I40E_INTRL_83K 12 /* 83333 ints/sec */
@@ -240,7 +253,6 @@ struct i40e_tx_buffer {
};
struct i40e_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
@@ -341,6 +353,14 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
+ struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must
+ * return before it sees the EOP for
+ * the current packet, we save that skb
+ * here and resume receiving this
+ * packet the next time
+ * i40e_clean_rx_ring_irq() is called
+ * for this ring.
+ */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index edc0abdf4783..939f9fdc8f85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -125,7 +125,6 @@ enum i40e_debug_mask {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
@@ -185,6 +184,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -470,6 +470,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index a6198b727e24..cbbf8648307a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
- f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ?
- vf->port_vlan_id : -1);
+ f = i40e_add_mac_filter(vsi,
+ vf->default_lan_addr.addr);
if (!f)
dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
eth_broadcast_addr(broadcast);
- f = i40e_add_filter(vsi, broadcast,
- vf->port_vlan_id ? vf->port_vlan_id : -1);
+ f = i40e_add_mac_filter(vsi, broadcast);
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
@@ -1942,12 +1940,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
- if (!f) {
- if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
- else
- f = i40e_add_filter(vsi, al->list[i].addr, -1);
- }
+ if (!f)
+ f = i40e_add_mac_filter(vsi, al->list[i].addr);
if (!f) {
dev_err(&pf->pdev->dev,
@@ -2012,7 +2006,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
- if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
+ if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@@ -2722,14 +2716,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr))
- i40e_del_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1);
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
- i40e_del_filter(vsi, f->macaddr, f->vlan);
+ __i40e_del_filter(vsi, f);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index aa63b7fb993d..89dfdbca13db 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
hw->mac.type = I40E_MAC_X722_VF;
break;
case I40E_DEV_ID_VF:
@@ -305,7 +304,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
- u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
@@ -333,12 +331,18 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
if (buf_len < len)
len = buf_len;
/* write the full 16-byte chunks */
- for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
- /* write whatever's left over without overrunning the buffer */
- if (i < len)
- i40e_debug(hw, mask, "\t0x%04X %*ph\n",
- i, len - i, buf + i);
+ if (hw->debug_mask & mask) {
+ char prefix[20];
+
+ snprintf(prefix, 20,
+ "i40evf %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+ }
}
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 21dcaee1ad1d..d76393c95056 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -48,7 +48,6 @@
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index df67ef37b7f3..c91fcf43ccbc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -501,14 +501,15 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->skb) {
- dev_kfree_skb(rx_bi->skb);
- rx_bi->skb = NULL;
- }
if (!rx_bi->page)
continue;
@@ -903,45 +904,6 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
}
/**
- * i40e_pull_tail - i40e specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an i40e specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* i40e_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
@@ -956,10 +918,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
**/
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- i40e_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -991,19 +949,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_page_is_reserved - check if reuse is possible
+ * i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
*/
-static inline bool i40e_page_is_reserved(struct page *page)
+static inline bool i40e_page_is_reusable(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ return (page_to_nid(page) == numa_mem_id()) &&
+ !page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ * @page: page address from rx_buffer
+ * @truesize: actual size of the buffer in this page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page. We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack. We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet. If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size). This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer. Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct page *page,
+ const unsigned int truesize)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+ /* Is any reuse possible? */
+ if (unlikely(!i40e_page_is_reusable(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Inc ref count on page before passing it up to the stack */
+ get_page(page);
+
+ return true;
}
/**
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
+ * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1016,30 +1040,29 @@ static inline bool i40e_page_is_reserved(struct page *page)
**/
static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- union i40e_rx_desc *rx_desc,
+ unsigned int size,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
- u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = I40E_RXBUFFER_2048;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
+ unsigned int pull_len;
+
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
/* will the data fit in the skb we allocated? if so, just
* copy it as it is pretty small anyway
*/
- if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
+ if (size <= I40E_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!i40e_page_is_reserved(page)))
+ /* page is reusable, we can reuse buffer as-is */
+ if (likely(i40e_page_is_reusable(page)))
return true;
/* this page cannot be reused so discard it */
@@ -1047,34 +1070,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return false;
}
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
-
- /* avoid re-using remote pages */
- if (unlikely(i40e_page_is_reserved(page)))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ /* we need the header to contain the greater of either
+ * ETH_HLEN or 60 bytes if the skb->len is less than
+ * 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+ /* align pull length to size of long to optimize
+ * memcpy performance
+ */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
- if (rx_buffer->page_offset > last_offset)
- return false;
-#endif
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- get_page(rx_buffer->page);
+add_tail_frag:
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
- return true;
+ return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
}
/**
@@ -1089,18 +1104,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
*/
static inline
struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
+ u64 local_status_error_len =
+ le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size =
+ (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
struct i40e_rx_buffer *rx_buffer;
- struct sk_buff *skb;
struct page *page;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
- skb = rx_buffer->skb;
-
if (likely(!skb)) {
void *page_addr = page_address(page) + rx_buffer->page_offset;
@@ -1124,19 +1142,17 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
* it now to avoid a possible cache miss
*/
prefetchw(skb->data);
- } else {
- rx_buffer->skb = NULL;
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- I40E_RXBUFFER_2048,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
@@ -1180,8 +1196,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_bi[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -1202,12 +1216,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
- struct sk_buff *skb;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1236,7 +1250,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+ skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
if (!skb)
break;
@@ -1255,8 +1269,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
continue;
}
- if (i40e_cleanup_headers(rx_ring, skb))
+ if (i40e_cleanup_headers(rx_ring, skb)) {
+ skb = NULL;
continue;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1273,11 +1289,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_packets++;
}
+ rx_ring->skb = skb;
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1305,18 +1324,18 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
-static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
- return !!(adapter->rx_rings[idx].rx_itr_setting);
+ return adapter->rx_rings[idx].rx_itr_setting;
}
-static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
- return !!(adapter->tx_rings[idx].tx_itr_setting);
+ return adapter->tx_rings[idx].tx_itr_setting;
}
/**
@@ -1342,8 +1361,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
- rx_itr_setting = get_rx_itr_enabled(vsi, idx);
- tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+ rx_itr_setting = get_rx_itr(vsi, idx);
+ tx_itr_setting = get_tx_itr(vsi, idx);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(rx_itr_setting) &&
@@ -1549,14 +1568,16 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @skb: ptr to the skb we're sending
+ * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss)
{
+ struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
union {
struct iphdr *v4;
@@ -1569,6 +1590,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
+ u16 gso_segs, gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1607,7 +1629,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from outer checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.udp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
@@ -1628,15 +1651,23 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ /* update GSO size and bytecount with header size */
+ first->gso_segs = gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
- cd_mss = skb_shinfo(skb)->gso_size;
+ cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
@@ -1949,7 +1980,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 gso_segs;
u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
@@ -1958,15 +1988,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_SHIFT;
}
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
-
- /* multiply data chunks by size of headers */
- first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
- first->gso_segs = gso_segs;
- first->skb = skb;
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2151,8 +2172,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
- if (__skb_linearize(skb))
- goto out_drop;
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2168,6 +2191,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
/* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
@@ -2175,16 +2204,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
/* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
@@ -2211,7 +2237,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index a5fc789f78eb..8274ba68bd32 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -239,7 +239,6 @@ struct i40e_tx_buffer {
};
struct i40e_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
@@ -340,6 +339,14 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
+ struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must
+ * return before it sees the EOP for
+ * the current packet, we save that skb
+ * here and resume receiving this
+ * packet the next time
+ * i40evf_clean_rx_ring_irq() is called
+ * for this ring.
+ */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c85e8a31c072..16bb88084bb9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -100,7 +100,6 @@ enum i40e_debug_mask {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
@@ -159,6 +158,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -443,6 +443,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index fc374f833aa9..d38a2b2aea2b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,6 +81,7 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fffe4cf2c20b..00c42d803276 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -195,6 +195,7 @@ struct i40evf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
+ u32 client_pending;
struct msix_entry *msix_entries;
u32 flags;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index c0fc53361800..f35dcaac5bb7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_BUILD 27
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},
/* required last entry */
{0, }
};
@@ -2154,6 +2153,11 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ /* We explicitly don't free resources here because the hardware is
+ * still active and can DMA into memory. Resources are cleared in
+ * i40evf_virtchnl_completion() after we get confirmation from the PF
+ * driver that the rings have been stopped.
+ */
return 0;
}
@@ -2727,6 +2731,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
@@ -2871,7 +2876,8 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_request_reset(adapter);
msleep(50);
}
-
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
i40evf_misc_irq_disable(adapter);
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 2059a8e88908..bee58af390e1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -999,6 +999,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ adapter->client_pending &=
+ ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ break;
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct i40e_virtchnl_rss_hena *vrh =
(struct i40e_virtchnl_rss_hena *)msg;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a61447fd778e..ee443985581f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -245,6 +245,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
+ /* Make sure the PHY is in a good state. Several people have reported
+ * firmware leaving the PHY's page select register set to something
+ * other than the default of zero, which causes the PHY ID read to
+ * access something other than the intended register.
+ */
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 8aa798737d4d..07d48f2e3369 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
ret_val = igb_pool_flash_update_done_i210(hw);
if (ret_val)
- hw_dbg("Flash update complete\n");
- else
hw_dbg("Flash update time out\n");
+ else
+ hw_dbg("Flash update complete\n");
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 5010e2232c50..5eff82678f0b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -792,15 +792,13 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
* control setting, then the variable hw->fc will
* be initialized based on a value in the EEPROM.
*/
- if (hw->mac.type == e1000_i350) {
+ if (hw->mac.type == e1000_i350)
lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
- + lan_offset, 1, &nvm_data);
- } else {
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
- 1, &nvm_data);
- }
+ else
+ lan_offset = 0;
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
+ 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
@@ -808,8 +806,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
hw->fc.requested_mode = e1000_fc_none;
- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
- NVM_WORD0F_ASM_DIR)
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
hw->fc.requested_mode = e1000_fc_tx_pause;
else
hw->fc.requested_mode = e1000_fc_full;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 5b54254aed4f..2788a5409023 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_id;
+ /* ensure PHY page selection to fix misconfigured i210 */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index d84afdd83e53..58adbf234e07 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -320,7 +320,7 @@
#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
-#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */
#define E1000_MANC 0x05820 /* Management Control - RW */
#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1515abaa5ac9..be456bae8169 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+static void igb_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter, bool set);
@@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev), netdev->last_rx);
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
}
/* Print Registers */
@@ -3275,7 +3275,9 @@ static int __igb_close(struct net_device *netdev, bool suspending)
int igb_close(struct net_device *netdev)
{
- return __igb_close(netdev, false);
+ if (netif_device_present(netdev))
+ return __igb_close(netdev, false);
+ return 0;
}
/**
@@ -3394,7 +3396,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(reg_idx), tdba >> 32);
- ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+ ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
wr32(E1000_TDH(reg_idx), 0);
writel(0, ring->tail);
@@ -3733,7 +3735,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
ring->count * sizeof(union e1000_adv_rx_desc));
/* initialize head and tail */
- ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+ ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
wr32(E1000_RDH(reg_idx), 0);
writel(0, ring->tail);
@@ -5402,8 +5404,8 @@ static void igb_reset_task(struct work_struct *work)
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
**/
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void igb_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -5411,8 +5413,6 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
igb_update_stats(adapter, &adapter->stats64);
memcpy(stats, &adapter->stats64, sizeof(*stats));
spin_unlock(&adapter->stats64_lock);
-
- return stats;
}
/**
@@ -7564,6 +7564,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
if (netif_running(netdev))
@@ -7572,6 +7573,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_ptp_suspend(adapter);
igb_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -7690,16 +7692,15 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- if (netdev->flags & IFF_UP) {
- rtnl_lock();
+ rtnl_lock();
+ if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
- rtnl_unlock();
- if (err)
- return err;
- }
- netif_device_attach(netdev);
- return 0;
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return err;
}
static int igb_runtime_idle(struct device *dev)
@@ -7898,6 +7899,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ /* In case of PCI error, adapter lose its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
igb_reset(adapter);
wr32(E1000_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5826b1ddedcf..fbd220d137b3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (!test_bit(__IXGB_DOWN, &adapter->flags))
ixgb_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ef81c3d8c295..a2cc43d28888 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -55,9 +55,6 @@
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BP_EXTENDED_STATS
-#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -94,6 +91,14 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IXGBE_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
+#endif
+
/*
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
@@ -107,6 +112,9 @@
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IXGBE_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
enum ixgbe_tx_flags {
/* cmd_type flags */
IXGBE_TX_FLAGS_HW_VLAN = 0x01,
@@ -159,6 +167,7 @@ enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_NONE = 0,
IXGBEVF_XCAST_MODE_MULTI,
IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
};
struct vf_macvlans {
@@ -194,17 +203,17 @@ struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
-#ifdef BP_EXTENDED_STATS
- u64 yields;
- u64 misses;
- u64 cleaned;
-#endif /* BP_EXTENDED_STATS */
};
struct ixgbe_tx_queue_stats {
@@ -225,15 +234,20 @@ struct ixgbe_rx_queue_stats {
#define IXGBE_TS_HDR_LEN 8
enum ixgbe_ring_state_t {
+ __IXGBE_RX_3K_BUFFER,
+ __IXGBE_RX_BUILD_SKB_ENABLED,
+ __IXGBE_RX_RSC_ENABLED,
+ __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+ __IXGBE_RX_FCOE,
__IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
- __IXGBE_RX_RSC_ENABLED,
- __IXGBE_RX_CSUM_UDP_ZERO_ERR,
- __IXGBE_RX_FCOE,
};
+#define ring_uses_build_skb(ring) \
+ test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
@@ -343,19 +357,20 @@ struct ixgbe_ring_feature {
*/
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{
-#ifdef IXGBE_FCOE
- if (test_bit(__IXGBE_RX_FCOE, &ring->state))
- return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
- IXGBE_RXBUFFER_3K;
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+ return IXGBE_RXBUFFER_3K;
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_build_skb(ring))
+ return IXGBE_MAX_FRAME_BUILD_SKB;
#endif
return IXGBE_RXBUFFER_2K;
}
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
-#ifdef IXGBE_FCOE
- if (test_bit(__IXGBE_RX_FCOE, &ring->state))
- return (PAGE_SIZE < 8192) ? 1 : 0;
+#if (PAGE_SIZE < 8192)
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+ return 1;
#endif
return 0;
}
@@ -398,127 +413,10 @@ struct ixgbe_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
-#ifdef CONFIG_NET_RX_BUSY_POLL
- atomic_t state;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum ixgbe_qv_state_t {
- IXGBE_QV_STATE_IDLE = 0,
- IXGBE_QV_STATE_NAPI,
- IXGBE_QV_STATE_POLL,
- IXGBE_QV_STATE_DISABLE
-};
-
-static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
-{
- /* reset state to idle */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* called from the device poll routine to get ownership of a q_vector */
-static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
-{
- int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
- IXGBE_QV_STATE_NAPI);
-#ifdef BP_EXTENDED_STATS
- if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->tx.ring->stats.yields++;
-#endif
-
- return rc == IXGBE_QV_STATE_IDLE;
-}
-
-/* returns true is someone tried to get the qv while napi had it */
-static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
-{
- WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);
-
- /* flush any outstanding Rx frames */
- if (q_vector->napi.gro_list)
- napi_gro_flush(&q_vector->napi, false);
-
- /* reset state to idle */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* called from ixgbe_low_latency_poll() */
-static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
-{
- int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
- IXGBE_QV_STATE_POLL);
-#ifdef BP_EXTENDED_STATS
- if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->rx.ring->stats.yields++;
-#endif
- return rc == IXGBE_QV_STATE_IDLE;
-}
-
-/* returns true if someone tried to get the qv while it was locked */
-static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
-{
- WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);
-
- /* reset state to idle */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
-{
- return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
-}
-
-/* false if QV is currently owned */
-static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
-{
- int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
- IXGBE_QV_STATE_DISABLE);
-
- return rc == IXGBE_QV_STATE_IDLE;
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
-{
-}
-
-static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
-{
- return true;
-}
-
-static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
-{
- return true;
-}
-
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
#ifdef CONFIG_IXGBE_HWMON
#define IXGBE_HWMON_TYPE_LOC 0
@@ -661,6 +559,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
+#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
+#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
+#define IXGBE_FLAG2_RX_LEGACY BIT(16)
/* Tx fast path data */
int num_tx_queues;
@@ -862,6 +763,7 @@ enum ixgbe_boards {
board_X550,
board_X550EM_x,
board_x550em_a,
+ board_x550em_a_fw,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -870,8 +772,9 @@ extern const struct ixgbe_info ixgbe_X540_info;
extern const struct ixgbe_info ixgbe_X550_info;
extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
+extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
#ifdef CONFIG_IXGBE_DCB
-extern const struct dcbnl_rtnl_ops dcbnl_ops;
+extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
extern char ixgbe_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 805ab319e578..523f9d05a810 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
case ixgbe_phy_tn:
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e00aaeb91827..30535e6b68f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
case ixgbe_phy_tn:
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 8832df3eba25..c38d50c1fcf7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -100,6 +100,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
supported = true;
break;
default:
@@ -348,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
-#ifndef CONFIG_SPARC
+#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
u32 regval;
@@ -3382,6 +3384,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ }
+ break;
default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
@@ -3578,7 +3587,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
{
u32 i;
u8 sum = 0;
@@ -3593,43 +3602,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
}
/**
- * ixgbe_host_interface_command - Issue command to manageability block
+ * ixgbe_hic_unlocked - Issue command to manageability block unlocked
* @hw: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
+ * @buffer: command to write and where the return status will be placed
* @length: length of buffer, must be multiple of 4 bytes
* @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (true) or not (false)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
*
- * Communicates with the manageability block. On success return 0
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * Communicates with the manageability block. On success return 0
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ *
+ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
+ * by the caller.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
- u32 length, u32 timeout,
- bool return_data)
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ u32 timeout)
{
- u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- u32 hicr, i, bi, fwsts;
- u16 buf_len, dword_len;
- union {
- struct ixgbe_hic_hdr hdr;
- u32 u32arr[1];
- } *bp = buffer;
- s32 status;
+ u32 hicr, i, fwsts;
+ u16 dword_len;
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
- /* Take management host interface semaphore */
- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
- if (status)
- return status;
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3639,15 +3634,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto rel_out;
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- status = IXGBE_ERR_INVALID_ARGUMENT;
- goto rel_out;
+ return IXGBE_ERR_INVALID_ARGUMENT;
}
dword_len = length >> 2;
@@ -3657,7 +3650,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(bp->u32arr[i]));
+ i, cpu_to_le32(buffer[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3671,11 +3664,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
/* Check command successful completion. */
if ((timeout && i == timeout) ||
- !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
- hw_dbg(hw, "Command has failed with no status valid.\n");
- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto rel_out;
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ return 0;
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ *
+ * Communicates with the manageability block. On success return 0
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+ u32 length, u32 timeout,
+ bool return_data)
+{
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ union {
+ struct ixgbe_hic_hdr hdr;
+ u32 u32arr[1];
+ } *bp = buffer;
+ u16 buf_len, dword_len;
+ s32 status;
+ u32 bi;
+
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
+
+ status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
+ if (status)
+ goto rel_out;
if (!return_data)
goto rel_out;
@@ -3722,6 +3758,8 @@ rel_out:
* @min: driver version minor number
* @build: driver version build number
* @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
@@ -3729,7 +3767,8 @@ rel_out:
* semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
**/
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub)
+ u8 build, u8 sub, __always_unused u16 len,
+ __always_unused const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
int i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 5b3e3c65927e..e083732adf64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -111,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 ver);
+ u8 build, u8 ver, u16 len, const char *str);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
u32 timeout, bool return_data);
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index b8fc3cfec831..78c52375acc6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return err ? 1 : 0;
}
-const struct dcbnl_rtnl_ops dcbnl_ops = {
+const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = {
.ieee_getets = ixgbe_dcbnl_ieee_getets,
.ieee_setets = ixgbe_dcbnl_ieee_setets,
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index fd192bf29b26..a7574c7b12af 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
+static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
+
/* currently supported speeds for 10G */
#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
SUPPORTED_10000baseKX4_Full | \
@@ -197,15 +204,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
SUPPORTED_1000baseKX_Full :
SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
- SUPPORTED_1000baseKX_Full :
- SUPPORTED_1000baseT_Full;
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_10_FULL)
+ ecmd->supported |= SUPPORTED_10baseT_Full;
/* default advertised speed if phy.autoneg_advertised isn't set */
ecmd->advertising = ecmd->supported;
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
ecmd->advertising = 0;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
@@ -237,6 +246,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_tn:
case ixgbe_phy_aq:
case ixgbe_phy_x550em_ext_t:
+ case ixgbe_phy_fw:
case ixgbe_phy_cu_unknown:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
@@ -337,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case IXGBE_LINK_SPEED_10GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_10000);
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_5000);
+ break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_2500);
break;
@@ -346,6 +359,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case IXGBE_LINK_SPEED_100_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_100);
break;
+ case IXGBE_LINK_SPEED_10_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ break;
default:
break;
}
@@ -394,6 +410,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
if (ecmd->advertising & ADVERTISED_100baseT_Full)
advertised |= IXGBE_LINK_SPEED_100_FULL;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_10_FULL;
+
if (old == advertised)
return err;
/* this sets the link speed and restarts auto-neg */
@@ -989,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
}
static void ixgbe_get_ringparam(struct net_device *netdev,
@@ -1128,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IXGBE_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1170,12 +1193,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = 0;
- data[i+1] = 0;
- data[i+2] = 0;
- i += 3;
-#endif
continue;
}
@@ -1185,12 +1202,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i+1] = ring->stats.misses;
- data[i+2] = ring->stats.cleaned;
- i += 3;
-#endif
}
for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->rx_ring[j];
@@ -1198,12 +1209,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = 0;
- data[i+1] = 0;
- data[i+2] = 0;
- i += 3;
-#endif
continue;
}
@@ -1213,12 +1218,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i+1] = ring->stats.misses;
- data[i+2] = ring->stats.cleaned;
- i += 3;
-#endif
}
for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
@@ -1255,28 +1254,12 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_bp_napi_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_bp_poll_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
sprintf(p, "tx_pb_%u_pxon", i);
@@ -1292,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ixgbe_priv_flags_strings,
+ IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
}
}
@@ -1896,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
- while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
+ while (rx_desc->wb.upper.length) {
/* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
@@ -1918,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* unmap buffer on Tx side */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -3173,6 +3168,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
u8 sff8472_rev, addr_mode;
bool page_swap = false;
+ if (hw->phy.type == ixgbe_phy_fw)
+ return -ENXIO;
+
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
@@ -3218,6 +3216,9 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
if (ee->len == 0)
return -EINVAL;
+ if (hw->phy.type == ixgbe_phy_fw)
+ return -ENXIO;
+
for (i = ee->offset; i < ee->offset + ee->len; i++) {
/* I2C reads can take long time */
if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
@@ -3237,6 +3238,167 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
return 0;
}
+static const struct {
+ ixgbe_link_speed mac_speed;
+ u32 supported;
+} ixgbe_ls_map[] = {
+ { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
+ { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
+ { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
+ { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
+ { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+};
+
+static const struct {
+ u32 lp_advertised;
+ u32 mac_speed;
+} ixgbe_lp_map[] = {
+ { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
+ { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
+ { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
+ { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
+ { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
+ { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+};
+
+static int
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 rc;
+ u16 i;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
+ if (rc)
+ return rc;
+
+ edata->lp_advertised = 0;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
+ if (info[0] & ixgbe_lp_map[i].lp_advertised)
+ edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+ }
+
+ edata->supported = 0;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+ if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
+ edata->supported |= ixgbe_ls_map[i].supported;
+ }
+
+ edata->advertised = 0;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+ if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
+ edata->advertised |= ixgbe_ls_map[i].supported;
+ }
+
+ edata->eee_enabled = !!edata->advertised;
+ edata->tx_lpi_enabled = edata->eee_enabled;
+ if (edata->advertised & edata->lp_advertised)
+ edata->eee_active = true;
+
+ return 0;
+}
+
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
+ return -EOPNOTSUPP;
+
+ if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
+ return ixgbe_get_eee_fw(adapter, edata);
+
+ return -EOPNOTSUPP;
+}
+
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_data;
+ s32 ret_val;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
+ return -EOPNOTSUPP;
+
+ memset(&eee_data, 0, sizeof(struct ethtool_eee));
+
+ ret_val = ixgbe_get_eee(netdev, &eee_data);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_data.eee_enabled && !edata->eee_enabled) {
+ if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ e_err(drv, "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
+ e_err(drv,
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_data.advertised != edata->advertised) {
+ e_err(drv,
+ "Setting EEE advertised speeds is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if (eee_data.eee_enabled != edata->eee_enabled) {
+ if (edata->eee_enabled) {
+ adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+ hw->phy.eee_speeds_advertised =
+ hw->phy.eee_speeds_supported;
+ } else {
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+ hw->phy.eee_speeds_advertised = 0;
+ }
+
+ /* reset link */
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
+ return 0;
+}
+
+static u32 ixgbe_get_priv_flags(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
+ if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
+ flags2 |= IXGBE_FLAG2_RX_LEGACY;
+
+ if (flags2 != adapter->flags2) {
+ adapter->flags2 = flags2;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -3269,8 +3431,12 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_eee = ixgbe_get_eee,
+ .set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
+ .get_priv_flags = ixgbe_get_priv_flags,
+ .set_priv_flags = ixgbe_set_priv_flags,
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 15ab337fd7ad..1b8be7d813bd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -308,6 +308,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_rss(adapter);
}
+#define IXGBE_RSS_64Q_MASK 0x3F
#define IXGBE_RSS_16Q_MASK 0xF
#define IXGBE_RSS_8Q_MASK 0x7
#define IXGBE_RSS_4Q_MASK 0x3
@@ -604,6 +605,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
**/
static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring_feature *f;
u16 rss_i;
@@ -612,7 +614,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
rss_i = f->limit;
f->indices = rss_i;
- f->mask = IXGBE_RSS_16Q_MASK;
+
+ if (hw->mac.type < ixgbe_mac_X550)
+ f->mask = IXGBE_RSS_16Q_MASK;
+ else
+ f->mask = IXGBE_RSS_64Q_MASK;
/* disable ATR by default, it will be configured below */
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -847,11 +853,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- /* initialize busy poll */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
-
-#endif
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1e2f39ebd824..060cdce8058f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.4.0-k"
+#define DRV_VERSION "5.0.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
@@ -86,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
+ [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -140,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
/* required last entry */
{0, }
};
@@ -180,6 +183,7 @@ MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
@@ -607,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state "
- "trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n",
+ "trans_start\n");
+ pr_info("%-15s %016lX %016lX\n",
netdev->name,
netdev->state,
- dev_trans_start(netdev),
- netdev->last_rx);
+ dev_trans_start(netdev));
}
/* Print Registers */
@@ -942,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
- struct ixgbe_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* tx_buffer must be completely set up in the transmit path */
-}
-
static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1195,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */
@@ -1549,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
}
}
+static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
+}
+
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
@@ -1567,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
/*
* if mapping failed free memory back to system since
@@ -1583,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ixgbe_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
return true;
}
@@ -1598,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ u16 bufsz;
/* nothing to do */
if (!cleaned_count)
@@ -1607,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
+ bufsz = ixgbe_rx_bufsz(rx_ring);
+
do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset, bufsz,
+ DMA_FROM_DEVICE);
+
/*
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
@@ -1626,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -1717,11 +1713,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
- skb_mark_napi_id(skb, &q_vector->napi);
- if (ixgbe_qv_busy_polling(q_vector))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -1833,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
{
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
+ dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
} else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
- ixgbe_rx_bufsz(rx_ring),
+ skb_frag_size(frag),
DMA_FROM_DEVICE);
}
- IXGBE_CB(skb)->dma = 0;
}
/**
@@ -1881,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
}
/* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
+ if (!skb_headlen(skb))
ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE
@@ -1916,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls and unnecessary copy of skb.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool ixgbe_page_is_reserved(struct page *page)
@@ -1931,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbe_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ return false;
+#else
+ /* The last offset is a bit aggressive in that we assume the
+ * worst case of FCoE being enabled and using a 3K buffer.
+ * However this should have minimal impact as the 1K extra is
+ * still less than one buffer in size.
+ */
+#define IXGBE_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
+ if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
/**
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -1946,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
**/
-static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
- ixgbe_rx_bufsz(rx_ring);
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
#endif
-
- if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ixgbe_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, ixgbe_rx_pg_order(rx_ring));
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
-
- /* avoid re-using remote pages */
- if (unlikely(ixgbe_page_is_reserved(page)))
- return false;
-
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
- return false;
#endif
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- page_ref_inc(page);
-
- return true;
}
-static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc)
+static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff **skb,
+ const unsigned int size)
{
struct ixgbe_rx_buffer *rx_buffer;
- struct sk_buff *skb;
- struct page *page;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ prefetchw(rx_buffer->page);
+ *skb = rx_buffer->skb;
- skb = rx_buffer->skb;
+ /* Delay unmapping of the first packet. It carries the header
+ * information, HW may still access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
+ if (!*skb)
+ goto skip_sync;
+ } else {
+ if (*skb)
+ ixgbe_dma_sync_frag(rx_ring, *skb);
+ }
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+skip_sync:
+ rx_buffer->pagecnt_bias--;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ return rx_buffer;
+}
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi,
- IXGBE_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- return NULL;
+static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ struct sk_buff *skb)
+{
+ if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ /* the page has been released from the ring */
+ IXGBE_CB(skb)->page_released = true;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
}
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
- /*
- * we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+}
- /*
- * Delay unmapping of the first packet. It carries the
- * header information, HW may still access the header
- * after the writeback. Only unmap it when EOP is
- * reached
- */
- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
- goto dma_sync;
+static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ struct sk_buff *skb;
- IXGBE_CB(skb)->dma = rx_buffer->dma;
- } else {
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
- ixgbe_dma_sync_frag(rx_ring, skb);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
-dma_sync:
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
- rx_buffer->skb = NULL;
- }
+ if (size > IXGBE_RX_HDR_SIZE) {
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
- /* pull page into skb */
- if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ixgbe_reuse_rx_page(rx_ring, rx_buffer);
- } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
- /* the page has been released from the ring */
- IXGBE_CB(skb)->page_released = true;
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset,
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+ rx_buffer->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
+ return skb;
+}
+
+static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* build an skb to around the page buffer */
+ skb = build_skb(va - IXGBE_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IXGBE_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* record DMA address if this is the start of a chain of buffers */
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
+
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
return skb;
}
@@ -2115,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -2124,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.status_error)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -2134,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
/* retrieve a buffer from the ring */
- skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
+ if (skb)
+ ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = ixgbe_build_skb(rx_ring, rx_buffer,
+ rx_desc, size);
+ else
+ skb = ixgbe_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_buffer->pagecnt_bias++;
break;
+ }
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
cleaned_count++;
/* place incomplete frames back on ring for completion */
@@ -2198,40 +2270,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
return total_rx_packets;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbe_low_latency_recv(struct napi_struct *napi)
-{
- struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *ring;
- int found = 0;
-
- if (test_bit(__IXGBE_DOWN, &adapter->state))
- return LL_FLUSH_FAILED;
-
- if (!ixgbe_qv_lock_poll(q_vector))
- return LL_FLUSH_BUSY;
-
- ixgbe_for_each_ring(ring, q_vector->rx) {
- found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
- if (found)
- ring->stats.cleaned += found;
- else
- ring->stats.misses++;
-#endif
- if (found)
- break;
- }
-
- ixgbe_qv_unlock_poll(q_vector);
-
- return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* ixgbe_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -2447,6 +2485,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
+ s32 rc;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
@@ -2485,6 +2524,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
return;
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ rc = hw->phy.ops.check_overtemp(hw);
+ if (rc != IXGBE_ERR_OVERTEMP)
+ return;
+ break;
default:
if (adapter->hw.mac.type >= ixgbe_mac_X540)
return;
@@ -2531,6 +2576,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
return;
}
return;
+ case ixgbe_mac_x550em_a:
+ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ IXGBE_EICR_GPI_SDP0_X550EM_a);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP0_X550EM_a);
+ }
+ return;
+ case ixgbe_mac_X550:
case ixgbe_mac_X540:
if (!(eicr & IXGBE_EICR_TS))
return;
@@ -2856,8 +2913,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
- /* Exit if we are called by netpoll or busy polling is active */
- if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
+ /* Exit if we are called by netpoll */
+ if (budget <= 0)
return budget;
/* attempt to distribute budget to each queue fairly, but don't allow
@@ -2876,7 +2933,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
- ixgbe_qv_unlock_napi(q_vector);
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -3214,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct ixgbe_tx_buffer) * ring->count);
+
/* enable queue */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
@@ -3384,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+ srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -3685,6 +3748,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
@@ -3717,8 +3781,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
*/
rxdctl &= ~0x3FFFFF;
rxdctl |= 0x080420;
+#if (PAGE_SIZE < 8192)
+ } else {
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
+
+ /* Limit the maximum frame size so we don't overrun the skb */
+ if (ring_uses_build_skb(ring) &&
+ !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+ rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+ IXGBE_RXDCTL_RLPML_EN;
+#endif
}
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct ixgbe_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IXGBE_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* enable receive descriptor ring */
rxdctl |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
@@ -3855,10 +3938,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
+
+ clear_ring_rsc_enabled(rx_ring);
+ clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring);
- else
- clear_ring_rsc_enabled(rx_ring);
+
+ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ continue;
+
+ set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+ if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+#endif
}
}
@@ -4559,23 +4662,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
- ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_enable(&adapter->q_vector[q_idx]->napi);
- }
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_disable(&adapter->q_vector[q_idx]->napi);
- while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
- pr_info("QV %d locked\n", q_idx);
- usleep_range(1000, 20000);
- }
- }
}
static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
@@ -4879,45 +4975,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
**/
static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
+ u16 i = rx_ring->next_to_clean;
+ struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
-
+ while (i != rx_ring->next_to_alloc) {
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
if (IXGBE_CB(skb)->page_released)
- dma_unmap_page(dev,
- IXGBE_CB(skb)->dma,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev,
+ IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
dev_kfree_skb(skb);
- rx_buffer->skb = NULL;
}
- if (!rx_buffer->page)
- continue;
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
- rx_buffer->page = NULL;
+ i++;
+ rx_buffer++;
+ if (i == rx_ring->count) {
+ i = 0;
+ rx_buffer = rx_ring->rx_buffer_info;
+ }
}
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5294,6 +5392,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
+ if (adapter->hw.phy.type == ixgbe_phy_fw)
+ ixgbe_watchdog_link_is_down(adapter);
ixgbe_down(adapter);
/*
* If SR-IOV enabled then wait a bit before bringing the adapter
@@ -5384,28 +5484,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
**/
static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
- struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned long size;
- u16 i;
+ u16 i = tx_ring->next_to_clean;
+ struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- /* ring already cleared, nothing to do */
- if (!tx_ring->tx_buffer_info)
- return;
+ while (i != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- }
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
- size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
@@ -5554,6 +5683,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_eee_capable - helper function to determine EEE support on X550
+ * @adapter: board private structure
+ */
+static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ if (!hw->phy.eee_speeds_supported)
+ break;
+ adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
+ if (!hw->phy.eee_speeds_advertised)
+ break;
+ adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+ break;
+ default:
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+ break;
+ }
+}
+
+/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
@@ -5717,6 +5871,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_x550em_a:
adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ break;
+ default:
+ break;
+ }
/* fall through */
case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
@@ -5730,6 +5892,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* Fall Through */
case ixgbe_mac_X550:
+ if (hw->mac.type == ixgbe_mac_X550)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
@@ -5816,9 +5980,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
if (tx_ring->q_vector)
ring_node = tx_ring->q_vector->numa_node;
- tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
+ tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -5900,9 +6064,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
if (rx_ring->q_vector)
ring_node = rx_ring->q_vector->numa_node;
- rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
+ rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -6200,7 +6364,8 @@ int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_close_suspend(adapter);
+ if (netif_device_present(netdev))
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
@@ -6245,14 +6410,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
- rtnl_unlock();
- if (err)
- return err;
-
- netif_device_attach(netdev);
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
- return 0;
+ return err;
}
#endif /* CONFIG_PM */
@@ -6267,14 +6430,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
- rtnl_lock();
if (netif_running(netdev))
ixgbe_close_suspend(adapter);
- rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -6808,6 +6971,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case IXGBE_LINK_SPEED_100_FULL:
speed_str = "100 Mbps";
break;
+ case IXGBE_LINK_SPEED_10_FULL:
+ speed_str = "10 Mbps";
+ break;
default:
speed_str = "unknown speed";
break;
@@ -7615,18 +7781,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i--)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -8111,8 +8291,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
}
#endif
-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+
+static void ixgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
@@ -8150,13 +8331,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
}
}
rcu_read_unlock();
+
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
stats->rx_errors = netdev->stats.rx_errors;
stats->rx_length_errors = netdev->stats.rx_length_errors;
stats->rx_crc_errors = netdev->stats.rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- return stats;
}
#ifdef CONFIG_IXGBE_DCB
@@ -9290,9 +9471,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = ixgbe_low_latency_recv,
-#endif
#ifdef IXGBE_FCOE
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
@@ -9596,6 +9774,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
+ ixgbe_set_eee_capable(adapter);
if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -9673,7 +9852,7 @@ skip_sriov:
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
- netdev->dcbnl_ops = &dcbnl_ops;
+ netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
#endif
#ifdef IXGBE_FCOE
@@ -9833,8 +10012,9 @@ skip_sriov:
* since os does not support feature
*/
if (hw->mac.ops.set_fw_drv_ver)
- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
- 0xFF);
+ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
+ sizeof(ixgbe_driver_version) - 1,
+ ixgbe_driver_version);
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -10082,7 +10262,7 @@ skip_bad_vf_detection:
}
if (netif_running(netdev))
- ixgbe_down(adapter);
+ ixgbe_close_suspend(adapter);
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
pci_disable_device(pdev);
@@ -10152,10 +10332,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
}
#endif
+ rtnl_lock();
if (netif_running(netdev))
- ixgbe_up(adapter);
+ ixgbe_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
}
static const struct pci_error_handlers ixgbe_err_handler = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 01c2667c0f92..811cb4f64a5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -74,6 +74,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 3b8362085f57..e55b2602f371 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 10;
+ int max_retry = 3;
int retry = 0;
u8 csum_byte;
u8 high_bits;
@@ -452,10 +452,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*/
for (i = 0; i < 30; i++) {
msleep(100);
- hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl);
- if (!(ctrl & MDIO_CTRL1_RESET)) {
- udelay(2);
- break;
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ MDIO_MMD_PMAPMD, &ctrl);
+ if (status)
+ return status;
+
+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ udelay(2);
+ break;
+ }
+ } else {
+ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_PHYXS, &ctrl);
+ if (status)
+ return status;
+
+ if (!(ctrl & MDIO_CTRL1_RESET)) {
+ udelay(2);
+ break;
+ }
}
}
@@ -751,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
-
- /*
- * Clear autoneg_advertised and set new values based on input link
+ /* Clear autoneg_advertised and set new values based on input link
* speed.
*/
hw->phy.autoneg_advertised = 0;
@@ -761,12 +776,21 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed & IXGBE_LINK_SPEED_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
if (speed & IXGBE_LINK_SPEED_100_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+ if (speed & IXGBE_LINK_SPEED_10_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
+
/* Setup link based on the new speed settings */
hw->phy.ops.setup_link(hw);
@@ -960,40 +984,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status;
-
- status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
- MDIO_MMD_VEND1,
- firmware_version);
-
- return status;
-}
-
-/**
- * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status;
-
- status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
- MDIO_MMD_VEND1,
- firmware_version);
-
- return status;
-}
-
-/**
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
@@ -1738,6 +1728,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ max_retry = 3;
if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
max_retry = IXGBE_SFP_DETECT_RETRIES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index ecf05f838fc5..5aa2c3cf7aec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -168,10 +168,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 1efb404431e9..ef0635e0918c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -858,14 +858,14 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -879,8 +879,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7e5d9850e4b2..044cb44747cf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -512,6 +512,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
/*
* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
@@ -934,7 +935,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
IXGBE_VT_MSGINFO_SHIFT;
int err;
- if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
+ index > 0) {
e_warn(drv,
"VF %d requested MACVLAN filter but is administratively denied\n",
vf);
@@ -978,6 +980,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_10:
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
@@ -1002,6 +1005,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_20:
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
break;
default:
return -1;
@@ -1041,8 +1045,13 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
return -EPERM;
/* verify the PF is supporting the correct API */
- if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
/* This mailbox command is supported (required) only for 82599 and x540
* VFs which support up to 4 RSS queues. Therefore we will compress the
@@ -1068,8 +1077,13 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
return -EPERM;
/* verify the PF is supporting the correct API */
- if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key));
@@ -1081,11 +1095,16 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
{
struct ixgbe_hw *hw = &adapter->hw;
int xcast_mode = msgbuf[1];
- u32 vmolr, disable, enable;
+ u32 vmolr, fctrl, disable, enable;
/* verify the PF is supporting the correct APIs */
switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_12:
+ /* promisc introduced in 1.3 version */
+ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
+ return -EOPNOTSUPP;
+ /* Fall threw */
+ case ixgbe_mbox_api_13:
break;
default:
return -EOPNOTSUPP;
@@ -1101,17 +1120,34 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
enable = 0;
break;
case IXGBEVF_XCAST_MODE_MULTI:
- disable = IXGBE_VMOLR_MPE;
+ disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
break;
case IXGBEVF_XCAST_MODE_ALLMULTI:
- disable = 0;
+ disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
break;
+ case IXGBEVF_XCAST_MODE_PROMISC:
+ if (hw->mac.type <= ixgbe_mac_82599EB)
+ return -EOPNOTSUPP;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ if (!(fctrl & IXGBE_FCTRL_UPE)) {
+ /* VF promisc requires PF in promisc */
+ e_warn(drv,
+ "Enabling VF promisc requires PF in promisc\n");
+ return -EPERM;
+ }
+
+ disable = 0;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index cf21273db201..1d07f2ead914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -92,6 +92,8 @@
#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
@@ -1499,6 +1501,8 @@ enum {
#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
/* VMOLR bitmasks */
+#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */
+#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */
#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
@@ -1914,6 +1918,7 @@ enum {
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINKS_SPEED_10_X550EM_A 0
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
@@ -2619,6 +2624,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
#define FW_WRITE_SHADOW_RAM_CMD 0x33
@@ -2644,6 +2650,59 @@ enum ixgbe_fdir_pballoc_type {
#define FW_INT_PHY_REQ_LEN 10
#define FW_INT_PHY_REQ_READ 0
#define FW_INT_PHY_REQ_WRITE 1
+#define FW_PHY_ACT_REQ_CMD 5
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY 1
+#define FW_PHY_ACT_SETUP_LINK 2
+#define FW_PHY_ACT_LINK_SPEED_10 BIT(0)
+#define FW_PHY_ACT_LINK_SPEED_100 BIT(1)
+#define FW_PHY_ACT_LINK_SPEED_1G BIT(2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3)
+#define FW_PHY_ACT_LINK_SPEED_5G BIT(4)
+#define FW_PHY_ACT_LINK_SPEED_10G BIT(5)
+#define FW_PHY_ACT_LINK_SPEED_20G BIT(6)
+#define FW_PHY_ACT_LINK_SPEED_25G BIT(7)
+#define FW_PHY_ACT_LINK_SPEED_40G BIT(8)
+#define FW_PHY_ACT_LINK_SPEED_50G BIT(9)
+#define FW_PHY_ACT_LINK_SPEED_100G BIT(10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \
+ HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP BIT(18)
+#define FW_PHY_ACT_SETUP_LINK_HP BIT(19)
+#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20)
+#define FW_PHY_ACT_SETUP_LINK_AN BIT(22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0)
+#define FW_PHY_ACT_GET_LINK_INFO 3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN 4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0)
+#define FW_PHY_ACT_PHY_SW_RESET 5
+#define FW_PHY_ACT_PHY_HW_RESET 6
+#define FW_PHY_ACT_GET_PHY_INFO 7
+#define FW_PHY_ACT_UD_2 0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1)
+#define FW_PHY_ACT_RETRIES 50
+#define FW_PHY_INFO_SPEED_MASK 0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2686,6 +2745,16 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+struct ixgbe_hic_drv_info2 {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+};
+
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
union ixgbe_hic_hdr2 hdr;
@@ -2734,6 +2803,19 @@ struct ixgbe_hic_internal_phy_resp {
__be32 read_data;
};
+struct ixgbe_hic_phy_activity_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad;
+ __le16 activity_id;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+struct ixgbe_hic_phy_activity_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2849,6 +2931,7 @@ typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_10_FULL 0x0002
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
@@ -3064,6 +3147,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
ixgbe_phy_sgmii,
+ ixgbe_phy_fw,
ixgbe_phy_generic
};
@@ -3362,7 +3446,8 @@ struct ixgbe_mac_operations {
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ const char *);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
@@ -3392,7 +3477,6 @@ struct ixgbe_phy_operations {
s32 (*setup_internal_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
@@ -3478,6 +3562,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
+ ixgbe_link_speed eee_speeds_supported;
+ ixgbe_link_speed eee_speeds_advertised;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e2ff823ee202..84a467a8ed3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -780,8 +780,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
ixgbe_link_speed speed;
bool link_up;
- /*
- * Link should be up in order for the blink bit in the LED control
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+ /* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
* This will be reversed when we stop the blinking.
*/
@@ -814,6 +816,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
u32 macc_reg;
u32 ledctl_reg;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* Restore the LED to its default value. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -913,7 +918,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = {
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
.set_phy_power = &ixgbe_set_copper_phy_power,
- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
};
static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 11fb433eb924..200f847fd8f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -63,6 +63,18 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
return 0;
}
+static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* Start with X540 invariants, since so similar */
+ ixgbe_get_invariants_X540(hw);
+
+ phy->ops.set_phy_power = NULL;
+
+ return 0;
+}
+
/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
* @hw: pointer to hardware structure
**/
@@ -402,6 +414,204 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
}
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+ union {
+ struct ixgbe_hic_phy_activity_req cmd;
+ struct ixgbe_hic_phy_activity_resp rsp;
+ } hic;
+ u16 retries = FW_PHY_ACT_RETRIES;
+ s32 rc;
+ u32 i;
+
+ do {
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.activity_id = cpu_to_le16(activity);
+ for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i)
+ hic.cmd.data[i] = cpu_to_be32((*data)[i]);
+
+ rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (rc)
+ return rc;
+ if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS) {
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ (*data)[i] = be32_to_cpu(hic.rsp.data[i]);
+ return 0;
+ }
+ usleep_range(20, 30);
+ --retries;
+ } while (retries > 0);
+
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+ u16 fw_speed;
+ ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ u16 phy_speeds;
+ u16 phy_id_lo;
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.id)
+ return 0;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+ if (rc)
+ return rc;
+
+ hw->phy.speeds_supported = 0;
+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
+ if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+ }
+
+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ return 0;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
+/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_rx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_tx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+ setup[0] |= ixgbe_fw_map[i].fw_speed;
+ }
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+ if (hw->phy.eee_speeds_advertised)
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+ return IXGBE_ERR_OVERTEMP;
+ return 0;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
* @hw: pointer to hardware structure
*
@@ -624,41 +834,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
return status;
}
-/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
- * command assuming that the semaphore is already obtained.
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the hostif.
- **/
-static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
- u16 *data)
-{
- s32 status;
- struct ixgbe_hic_read_shadow_ram buffer;
-
- buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
- buffer.hdr.req.buf_lenh = 0;
- buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
- buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
-
- /* convert offset from words to bytes */
- buffer.address = cpu_to_be32(offset * 2);
- /* one word */
- buffer.length = cpu_to_be16(sizeof(u16));
-
- status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
- IXGBE_HI_COMMAND_TIMEOUT, false);
- if (status)
- return status;
-
- *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- FW_NVM_DATA_OFFSET);
-
- return 0;
-}
-
/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
@@ -670,6 +845,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
u16 words_to_read;
@@ -677,7 +853,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u32 i;
/* Take semaphore for the entire operation. */
- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (status) {
hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
return status;
@@ -698,10 +874,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
- status = ixgbe_host_interface_command(hw, &buffer,
- sizeof(buffer),
- IXGBE_HI_COMMAND_TIMEOUT,
- false);
+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
if (status) {
hw_dbg(hw, "Host interface command failed\n");
goto out;
@@ -725,7 +899,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
}
out:
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, mask);
return status;
}
@@ -896,15 +1070,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
**/
static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status = 0;
+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
+ struct ixgbe_hic_read_shadow_ram buffer;
+ s32 status;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = cpu_to_be32(offset * 2);
+ /* one word */
+ buffer.length = cpu_to_be16(sizeof(u16));
+
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ if (status)
+ return status;
+
+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
+ if (!status) {
+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
}
+ hw->mac.ops.release_swfw_sync(hw, mask);
return status;
}
@@ -1768,6 +1959,125 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
return rc;
}
+/**
+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval, flx_val;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ */
+static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg)
+ goto out;
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up)
+ goto out;
+
+ /* Check if auto-negotiation has completed */
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
+ if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Negotiate the flow control */
+ status = ixgbe_negotiate_fc(hw, info[0], info[0],
+ FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
+
+out:
+ if (!status) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -1780,6 +2090,17 @@ static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw)
mac->ops.setup_fc = NULL;
mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
break;
+ case ixgbe_media_type_copper:
+ if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ break;
+ }
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
+ break;
case ixgbe_media_type_backplane:
mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
@@ -1827,7 +2148,7 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
- return;
+ break;
case ixgbe_media_type_backplane:
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
@@ -1870,6 +2191,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
+ if (hw->phy.type == ixgbe_phy_fw) {
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+ return 0;
+ }
+
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
/* CS4227 SFP must not enable auto-negotiation */
@@ -2108,8 +2435,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
return status;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
@@ -2189,12 +2514,11 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY for X550EM_x.
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- if (hw->mac.type != ixgbe_mac_X550EM_x)
+ /* leave link alone for 2.5G */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return 0;
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
@@ -2356,6 +2680,62 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
return 0;
}
+/**
+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
+{
+ struct ixgbe_hic_drv_info2 fw_cmd;
+ s32 ret_val;
+ int i;
+
+ if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ memcpy(fw_cmd.driver_string, driver_ver, len);
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status !=
+ FW_CEM_RESP_STATUS_SUCCESS)
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return 0;
+ }
+
+ return ret_val;
+}
+
/** ixgbe_get_lcd_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
@@ -2655,6 +3035,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+{
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
+ if (rc)
+ return rc;
+ memset(store, 0, sizeof(store));
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
+ if (rc)
+ return rc;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+{
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
+ if (rc)
+ return rc;
+
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+ return IXGBE_ERR_OVERTEMP;
+ }
+ return 0;
+}
+
+/**
* ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
* @hw: pointer to hardware structure
*
@@ -2740,6 +3164,10 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_fw:
+ phy->ops.setup_link = ixgbe_setup_fw_link;
+ phy->ops.reset = ixgbe_reset_phy_fw;
+ break;
default:
break;
}
@@ -2777,6 +3205,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
media_type = ixgbe_media_type_copper;
break;
default:
@@ -2844,6 +3274,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ /* Select fast MDIO clock speed for these devices */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
default:
break;
}
@@ -3275,7 +3712,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.clear_vfta = &ixgbe_clear_vfta_generic, \
.set_vfta = &ixgbe_set_vfta_generic, \
.fc_enable = &ixgbe_fc_enable_generic, \
- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \
.init_uta_tables = &ixgbe_init_uta_tables_generic, \
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
@@ -3355,6 +3792,27 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
+static struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
+ X550_COMMON_MAC
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_X550em,
+ .get_media_type = ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
+ .setup_fc = ixgbe_setup_fc_x550em,
+ .fc_autoneg = ixgbe_fc_autoneg,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
+};
+
#define X550_COMMON_EEP \
.read = &ixgbe_read_ee_hostif_X550, \
.read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \
@@ -3384,12 +3842,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
- .set_phy_power = NULL, \
- .check_overtemp = &ixgbe_tn_check_overtemp, \
- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
+ .set_phy_power = NULL,
static const struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
+ .check_overtemp = &ixgbe_tn_check_overtemp,
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
@@ -3398,6 +3855,7 @@ static const struct ixgbe_phy_operations phy_ops_X550 = {
static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
+ .check_overtemp = &ixgbe_tn_check_overtemp,
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_generic,
@@ -3406,6 +3864,7 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
static const struct ixgbe_phy_operations phy_ops_x550em_a = {
X550_COMMON_PHY
+ .check_overtemp = &ixgbe_tn_check_overtemp,
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_x550a,
@@ -3414,6 +3873,17 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
.write_reg_mdi = &ixgbe_write_phy_reg_mdi,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = {
+ X550_COMMON_PHY
+ .check_overtemp = ixgbe_check_overtemp_fw,
+ .init = ixgbe_init_phy_ops_X550em,
+ .identify = ixgbe_identify_phy_fw,
+ .read_reg = NULL,
+ .write_reg = NULL,
+ .read_reg_mdi = NULL,
+ .write_reg_mdi = NULL,
+};
+
static const struct ixgbe_link_operations link_ops_x550em_x = {
.read_link = &ixgbe_read_i2c_combined_generic,
.read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -3463,3 +3933,13 @@ const struct ixgbe_info ixgbe_x550em_a_info = {
.mbx_ops = &mbx_ops_generic,
.mvals = ixgbe_mvals_x550em_a,
};
+
+const struct ixgbe_info ixgbe_x550em_a_fw_info = {
+ .mac = ixgbe_mac_x550em_a,
+ .get_invariants = ixgbe_get_invariants_X550_a_fw,
+ .mac_ops = &mac_ops_x550em_a_fw,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_a_fw,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 508e72c5f1c2..1f6c0ecd50bb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -432,11 +432,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
if (!ring) {
data[i++] = 0;
data[i++] = 0;
-#ifdef BP_EXTENDED_STATS
- data[i++] = 0;
- data[i++] = 0;
- data[i++] = 0;
-#endif
continue;
}
@@ -446,12 +441,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i + 1] = ring->stats.misses;
- data[i + 2] = ring->stats.cleaned;
- i += 3;
-#endif
}
/* populate Rx queue data */
@@ -460,11 +449,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
if (!ring) {
data[i++] = 0;
data[i++] = 0;
-#ifdef BP_EXTENDED_STATS
- data[i++] = 0;
- data[i++] = 0;
- data[i++] = 0;
-#endif
continue;
}
@@ -474,12 +458,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i + 1] = ring->stats.misses;
- data[i + 2] = ring->stats.cleaned;
- i += 3;
-#endif
}
}
@@ -507,28 +485,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_bp_napi_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_bp_poll_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 5639fbe294d0..a8cbc2dda0dd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -37,11 +37,6 @@
#include "vf.h"
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#define BP_EXTENDED_STATS
-#endif
-
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
@@ -73,11 +68,6 @@ struct ixgbevf_rx_buffer {
struct ixgbevf_stats {
u64 packets;
u64 bytes;
-#ifdef BP_EXTENDED_STATS
- u64 yields;
- u64 misses;
- u64 cleaned;
-#endif
};
struct ixgbevf_tx_queue_stats {
@@ -217,109 +207,6 @@ struct ixgbevf_q_vector {
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
-{
- spin_lock_init(&q_vector->lock);
- q_vector->state = IXGBEVF_QV_STATE_IDLE;
-}
-
-/* called from the device poll routine to get ownership of a q_vector */
-static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
-{
- int rc = true;
-
- spin_lock_bh(&q_vector->lock);
- if (q_vector->state & IXGBEVF_QV_LOCKED) {
- WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
- q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
- rc = false;
-#ifdef BP_EXTENDED_STATS
- q_vector->tx.ring->stats.yields++;
-#endif
- } else {
- /* we don't care if someone yielded */
- q_vector->state = IXGBEVF_QV_STATE_NAPI;
- }
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* returns true is someone tried to get the qv while napi had it */
-static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
-{
- int rc = false;
-
- spin_lock_bh(&q_vector->lock);
- WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
- IXGBEVF_QV_STATE_NAPI_YIELD));
-
- if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
- rc = true;
- /* reset state to idle, unless QV is disabled */
- q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* called from ixgbevf_low_latency_poll() */
-static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
-{
- int rc = true;
-
- spin_lock_bh(&q_vector->lock);
- if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
- q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
- rc = false;
-#ifdef BP_EXTENDED_STATS
- q_vector->rx.ring->stats.yields++;
-#endif
- } else {
- /* preserve yield marks */
- q_vector->state |= IXGBEVF_QV_STATE_POLL;
- }
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* returns true if someone tried to get the qv while it was locked */
-static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
-{
- int rc = false;
-
- spin_lock_bh(&q_vector->lock);
- WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
-
- if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
- rc = true;
- /* reset state to idle, unless QV is disabled */
- q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
-{
- WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
- return q_vector->state & IXGBEVF_QV_USER_PEND;
-}
-
-/* false if QV is currently owned */
-static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
-{
- int rc = true;
-
- spin_lock_bh(&q_vector->lock);
- if (q_vector->state & IXGBEVF_QV_OWNED)
- rc = false;
- q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/* microsecond values for various ITR rates shifted by 2 to fit itr register
* with the first 3 bits reserved 0
*/
@@ -464,6 +351,7 @@ enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_NONE = 0,
IXGBEVF_XCAST_MODE_MULTI,
IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
};
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 6d4bef5803f2..80bab261a0ec 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -457,16 +457,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb)
{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- skb_mark_napi_id(skb, &q_vector->napi);
-
- if (ixgbevf_qv_busy_polling(q_vector)) {
- netif_receive_skb(skb);
- /* exit early if we busy polled */
- return;
- }
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1031,10 +1021,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (budget <= 0)
return budget;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!ixgbevf_qv_lock_napi(q_vector))
- return budget;
-#endif
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling
@@ -1052,10 +1038,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
- ixgbevf_qv_unlock_napi(q_vector);
-#endif
-
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -1090,40 +1072,6 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
-{
- struct ixgbevf_q_vector *q_vector =
- container_of(napi, struct ixgbevf_q_vector, napi);
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *ring;
- int found = 0;
-
- if (test_bit(__IXGBEVF_DOWN, &adapter->state))
- return LL_FLUSH_FAILED;
-
- if (!ixgbevf_qv_lock_poll(q_vector))
- return LL_FLUSH_BUSY;
-
- ixgbevf_for_each_ring(ring, q_vector->rx) {
- found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
- if (found)
- ring->stats.cleaned += found;
- else
- ring->stats.misses++;
-#endif
- if (found)
- break;
- }
-
- ixgbevf_qv_unlock_poll(q_vector);
-
- return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -1930,6 +1878,16 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
(flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
+ /* request the most inclusive mode we need */
+ if (flags & IFF_PROMISC)
+ xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
+ else if (flags & IFF_ALLMULTI)
+ xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
+ else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
+ xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+ else
+ xcast_mode = IXGBEVF_XCAST_MODE_NONE;
+
spin_lock_bh(&adapter->mbx_lock);
hw->mac.ops.update_xcast_mode(hw, xcast_mode);
@@ -1950,9 +1908,6 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
-#ifdef CONFIG_NET_RX_BUSY_POLL
- ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
-#endif
napi_enable(&q_vector->napi);
}
}
@@ -1966,12 +1921,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
- pr_info("QV %d locked\n", q_idx);
- usleep_range(1000, 20000);
- }
-#endif /* CONFIG_NET_RX_BUSY_POLL */
}
}
@@ -2071,7 +2020,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_12,
+ int api[] = { ixgbe_mbox_api_13,
+ ixgbe_mbox_api_12,
ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
@@ -2373,6 +2323,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
switch (hw->api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss;
default:
@@ -3228,6 +3179,21 @@ err_setup_reset:
}
/**
+ * ixgbevf_close_suspend - actions necessary to both suspend and close flows
+ * @adapter: the private adapter struct
+ *
+ * This function should contain the necessary work common to both suspending
+ * and closing of the device.
+ */
+static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
+{
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+}
+
+/**
* ixgbevf_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -3242,11 +3208,8 @@ int ixgbevf_close(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- ixgbevf_down(adapter);
- ixgbevf_free_irq(adapter);
-
- ixgbevf_free_all_tx_resources(adapter);
- ixgbevf_free_all_rx_resources(adapter);
+ if (netif_device_present(netdev))
+ ixgbevf_close_suspend(adapter);
return 0;
}
@@ -3268,6 +3231,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
+ rtnl_lock();
+
if (netif_running(dev))
ixgbevf_close(dev);
@@ -3276,6 +3241,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
if (netif_running(dev))
ixgbevf_open(dev);
+
+ rtnl_unlock();
}
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
@@ -3796,17 +3763,14 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
- if (netif_running(netdev)) {
- rtnl_lock();
- ixgbevf_down(adapter);
- ixgbevf_free_irq(adapter);
- ixgbevf_free_all_tx_resources(adapter);
- ixgbevf_free_all_rx_resources(adapter);
- ixgbevf_clear_interrupt_scheme(adapter);
- rtnl_unlock();
- }
+ if (netif_running(netdev))
+ ixgbevf_close_suspend(adapter);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -3838,6 +3802,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
+
+ adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
@@ -3869,8 +3835,8 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_suspend(pdev, PMSG_SUSPEND);
}
-static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void ixgbevf_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
unsigned int start;
@@ -3903,8 +3869,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
-
- return stats;
}
#define IXGBEVF_MAX_MAC_HDR_LEN 127
@@ -3953,9 +3917,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = ixgbevf_busy_poll_recv,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
@@ -4102,6 +4063,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4244,7 +4206,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
}
if (netif_running(netdev))
- ixgbevf_down(adapter);
+ ixgbevf_close_suspend(adapter);
if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
pci_disable_device(pdev);
@@ -4272,6 +4234,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+ adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
@@ -4292,12 +4255,13 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
static void ixgbevf_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
if (netif_running(netdev))
- ixgbevf_up(adapter);
+ ixgbevf_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
}
/* PCI Error Recovery (ERS) */
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 340cdd469455..bc0442acae78 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -84,6 +84,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d46ba1dabcb7..8a5db9d7219d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -330,9 +330,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* Thus return an error if API doesn't support RETA querying or querying
* is not supported for this device type.
*/
- if (hw->api_version != ixgbe_mbox_api_12 ||
- hw->mac.type >= ixgbe_mac_X550_vf)
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
+ break;
+ default:
return -EOPNOTSUPP;
+ }
msgbuf[0] = IXGBE_VF_GET_RETA;
@@ -391,9 +396,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* Thus return an error if API doesn't support RSS Random Key retrieval
* or if the operation is not supported for this device type.
*/
- if (hw->api_version != ixgbe_mbox_api_12 ||
- hw->mac.type >= ixgbe_mac_X550_vf)
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
+ break;
+ default:
return -EOPNOTSUPP;
+ }
msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
@@ -545,6 +555,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
switch (hw->api_version) {
case ixgbe_mbox_api_12:
+ /* promisc introduced in 1.3 version */
+ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
+ return -EOPNOTSUPP;
+ /* Fall threw */
+ case ixgbe_mbox_api_13:
break;
default:
return -EOPNOTSUPP;
@@ -884,6 +899,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
switch (hw->api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
break;
default:
return 0;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f9fcab54783c..f580b49e6b67 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1879,7 +1879,7 @@ jme_open(struct net_device *netdev)
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
- jme_set_settings(netdev, &jme->old_ecmd);
+ jme_set_link_ksettings(netdev, &jme->old_cmd);
else
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
@@ -2374,7 +2374,7 @@ jme_tx_timeout(struct net_device *netdev)
jme->phylink = 0;
jme_reset_phy_processor(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
- jme_set_settings(netdev, &jme->old_ecmd);
+ jme_set_link_ksettings(netdev, &jme->old_cmd);
/*
* Force to Reset the link again
@@ -2648,27 +2648,27 @@ jme_set_wol(struct net_device *netdev,
}
static int
-jme_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+jme_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
spin_lock_bh(&jme->phy_lock);
- rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
+ rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
spin_unlock_bh(&jme->phy_lock);
return rc;
}
static int
-jme_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+jme_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
int rc, fdc = 0;
- if (ethtool_cmd_speed(ecmd) == SPEED_1000
- && ecmd->autoneg != AUTONEG_ENABLE)
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
/*
@@ -2676,18 +2676,18 @@ jme_set_settings(struct net_device *netdev,
* Hardware would not generate link change interrupt.
*/
if (jme->mii_if.force_media &&
- ecmd->autoneg != AUTONEG_ENABLE &&
- (jme->mii_if.full_duplex != ecmd->duplex))
+ cmd->base.autoneg != AUTONEG_ENABLE &&
+ (jme->mii_if.full_duplex != cmd->base.duplex))
fdc = 1;
spin_lock_bh(&jme->phy_lock);
- rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
+ rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd);
spin_unlock_bh(&jme->phy_lock);
if (!rc) {
if (fdc)
jme_reset_link(jme);
- jme->old_ecmd = *ecmd;
+ jme->old_cmd = *cmd;
set_bit(JME_FLAG_SSET, &jme->flags);
}
@@ -2716,7 +2716,7 @@ jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
if (!rc && (cmd == SIOCSMIIREG)) {
if (duplex_chg)
jme_reset_link(jme);
- jme_get_settings(netdev, &jme->old_ecmd);
+ jme_get_link_ksettings(netdev, &jme->old_cmd);
set_bit(JME_FLAG_SSET, &jme->flags);
}
@@ -2915,8 +2915,6 @@ static const struct ethtool_ops jme_ethtool_ops = {
.set_pauseparam = jme_set_pauseparam,
.get_wol = jme_get_wol,
.set_wol = jme_set_wol,
- .get_settings = jme_get_settings,
- .set_settings = jme_set_settings,
.get_link = jme_get_link,
.get_msglevel = jme_get_msglevel,
.set_msglevel = jme_set_msglevel,
@@ -2924,6 +2922,8 @@ static const struct ethtool_ops jme_ethtool_ops = {
.get_eeprom_len = jme_get_eeprom_len,
.get_eeprom = jme_get_eeprom,
.set_eeprom = jme_set_eeprom,
+ .get_link_ksettings = jme_get_link_ksettings,
+ .set_link_ksettings = jme_set_link_ksettings,
};
static int
@@ -3306,7 +3306,7 @@ jme_resume(struct device *dev)
jme_clear_pm_disable_wol(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
- jme_set_settings(netdev, &jme->old_ecmd);
+ jme_set_link_ksettings(netdev, &jme->old_cmd);
else
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 58cd67c0c8e4..89535c019f04 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -447,7 +447,7 @@ struct jme_adapter {
u8 chip_sub_rev;
u8 pcirev;
u32 msg_enable;
- struct ethtool_cmd old_ecmd;
+ struct ethtool_link_ksettings old_cmd;
unsigned int old_mtu;
struct dynpcc_info dpi;
atomic_t intr_sem;
@@ -1270,8 +1270,8 @@ static inline int new_phy_power_ctrl(u8 chip_main_rev)
/*
* Function prototypes
*/
-static int jme_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd);
+static int jme_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd);
static void jme_set_unicastaddr(struct net_device *netdev);
static void jme_set_multi(struct net_device *netdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 8037426ec50f..9fae98caf83a 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -464,7 +464,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
work_done = korina_rx(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
writel(readl(&lp->rx_dma_regs->dmasm) &
~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
@@ -695,25 +695,27 @@ static void netdev_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct korina_private *lp = netdev_priv(dev);
int rc;
spin_lock_irq(&lp->lock);
- rc = mii_ethtool_gset(&lp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct korina_private *lp = netdev_priv(dev);
int rc;
spin_lock_irq(&lp->lock);
- rc = mii_ethtool_sset(&lp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
korina_set_carrier(&lp->mii_if);
@@ -729,9 +731,9 @@ static u32 netdev_get_link(struct net_device *dev)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.get_link = netdev_get_link,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int korina_alloc_ring(struct net_device *dev)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index faea52da8dae..afc810069440 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget)
{
struct ltq_etop_chan *ch = container_of(napi,
struct ltq_etop_chan, napi);
- int rx = 0;
- int complete = 0;
+ int work_done = 0;
- while ((rx < budget) && !complete) {
+ while (work_done < budget) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
- if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
- ltq_etop_hw_receive(ch);
- rx++;
- } else {
- complete = 1;
- }
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
+ break;
+ ltq_etop_hw_receive(ch);
+ work_done++;
}
- if (complete || !rx) {
- napi_complete(&ch->napi);
+ if (work_done < budget) {
+ napi_complete_done(&ch->napi, work_done);
ltq_dma_ack_irq(&ch->dma);
}
- return rx;
+ return work_done;
}
static int
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f4b7cf18fb0f..d2555e8b947e 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -83,9 +83,8 @@ config MVNETA_BM
config MVPP2
tristate "Marvell Armada 375 network interface support"
- depends on MACH_ARMADA_375 || COMPILE_TEST
+ depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
- depends on !64BIT
select MVMDIO
---help---
This driver supports the network interface units in the
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1fa7c03edec2..25642dee49d3 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1504,9 +1504,7 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
int err;
u32 supported, advertising;
- err = phy_read_status(dev->phydev);
- if (err == 0)
- err = phy_ethtool_ksettings_get(dev->phydev, cmd);
+ err = phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
@@ -2319,7 +2317,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
if (mp->oom)
mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
wrlp(mp, INT_MASK, mp->int_mask);
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e05e22705cf7..61dd4462411c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,6 +28,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -224,6 +225,7 @@
#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_DEC_SENT_MASK 0xff
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
#define MVNETA_TXQ_SENT_DESC_SHIFT 16
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
@@ -525,6 +527,7 @@ struct mvneta_tx_queue {
* descriptor ring
*/
int count;
+ int pending;
int tx_stop_threshold;
int tx_wake_threshold;
@@ -652,7 +655,7 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
}
/* Get System Network Statistics */
-static struct rtnl_link_stats64 *
+static void
mvneta_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -686,8 +689,6 @@ mvneta_get_stats64(struct net_device *dev,
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
-
- return stats;
}
/* Rx descriptors helper methods */
@@ -820,8 +821,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
/* Only 255 descriptors can be added at once ; Assume caller
* process TX desriptors in quanta less than 256
*/
- val = pend_desc;
+ val = pend_desc + txq->pending;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ txq->pending = 0;
}
/* Get pointer to next TX descriptor to be processed (send) by HW */
@@ -1758,8 +1760,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
- struct mvneta_tx_queue *txq, int num)
+ struct mvneta_tx_queue *txq, int num,
+ struct netdev_queue *nq)
{
+ unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
for (i = 0; i < num; i++) {
@@ -1767,6 +1771,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
txq->txq_get_index;
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+ if (skb) {
+ bytes_compl += skb->len;
+ pkts_compl++;
+ }
+
mvneta_txq_inc_get(txq);
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
@@ -1777,6 +1786,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
continue;
dev_kfree_skb_any(skb);
}
+
+ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
/* Handle end of transmission */
@@ -1790,7 +1801,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
txq->count -= tx_done;
@@ -2400,12 +2411,18 @@ out:
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
- txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
+ netdev_tx_sent_queue(nq, len);
+ txq->count += frags;
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+ else
+ txq->pending += frags;
+
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
@@ -2424,9 +2441,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
/* reset txq */
txq->count = 0;
@@ -2750,11 +2768,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
}
- budget -= rx_done;
-
- if (budget > 0) {
+ if (rx_done < budget) {
cause_rx_tx = 0;
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
if (pp->neta_armada3700) {
unsigned long flags;
@@ -2952,6 +2968,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
static void mvneta_txq_deinit(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+
kfree(txq->tx_skb);
if (txq->tso_hdrs)
@@ -2963,6 +2981,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
+ netdev_tx_reset_queue(nq);
+
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
@@ -3908,6 +3928,25 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
return 0;
}
+static void mvneta_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int mvneta_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_wol(dev->phydev, wol);
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -3920,7 +3959,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_do_ioctl = mvneta_ioctl,
};
-const struct ethtool_ops mvneta_eth_tool_ops = {
+static const struct ethtool_ops mvneta_eth_tool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
@@ -3937,6 +3976,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
.set_rxfh = mvneta_ethtool_set_rxfh,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
+ .get_wol = mvneta_ethtool_get_wol,
+ .set_wol = mvneta_ethtool_set_wol,
};
/* Initialize hw */
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 4fe430ceb194..d00421b9ffea 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -154,6 +154,7 @@
/* Interrupt Cause and Mask registers */
#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
+#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
@@ -252,12 +253,8 @@
#define MVPP2_SRC_ADDR_HIGH 0x28
#define MVPP2_PHY_AN_CFG0_REG 0x34
#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
-#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
- 0x400 + (port) * 0x400)
-#define MVPP2_MIB_LATE_COLLISION 0x7c
-#define MVPP2_ISR_SUM_MASK_REG 0x220c
#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
-#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
/* Per-port registers */
#define MVPP2_GMAC_CTRL_0_REG 0x0
@@ -513,28 +510,28 @@ enum mvpp2_tag_type {
/* Sram result info bits assignment */
#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
#define MVPP2_PRS_RI_DSA_MASK 0x2
-#define MVPP2_PRS_RI_VLAN_MASK 0xc
-#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_NONE 0x0
#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
-#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
-#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_UCAST 0x0
#define MVPP2_PRS_RI_L2_MCAST BIT(9)
#define MVPP2_PRS_RI_L2_BCAST BIT(10)
#define MVPP2_PRS_RI_PPPOE_MASK 0x800
-#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
-#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_UN 0x0
#define MVPP2_PRS_RI_L3_IP4 BIT(12)
#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
#define MVPP2_PRS_RI_L3_IP6 BIT(14)
#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
-#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
-#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_UCAST 0x0
#define MVPP2_PRS_RI_L3_MCAST BIT(15)
#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
@@ -822,9 +819,6 @@ struct mvpp2_tx_queue {
/* Per-CPU control of physical Tx queues */
struct mvpp2_txq_pcpu __percpu *pcpu;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
-
u32 done_pkts_coal;
/* Virtual address of thex Tx DMA descriptors array */
@@ -924,6 +918,7 @@ struct mvpp2_bm_pool {
int buf_size;
/* Packet size */
int pkt_size;
+ int frag_size;
/* BPPE virtual base address */
u32 *virt_addr;
@@ -932,10 +927,6 @@ struct mvpp2_bm_pool {
/* Ports using BM pool */
u32 port_map;
-
- /* Occupied buffers indicator */
- atomic_t in_use;
- int in_use_thresh;
};
struct mvpp2_buff_hdr {
@@ -991,7 +982,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
txq_pcpu->buffs + txq_pcpu->txq_put_index;
tx_buf->skb = skb;
tx_buf->size = tx_desc->data_size;
- tx_buf->phys = tx_desc->buf_phys_addr;
+ tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3364,6 +3355,22 @@ static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
+static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pool->frag_size);
+ else
+ return kmalloc(pool->frag_size, GFP_ATOMIC);
+}
+
+static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ skb_free_frag(data);
+ else
+ kfree(data);
+}
+
/* Buffer Manager configuration routines */
/* Create pool */
@@ -3381,7 +3388,8 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
if (!bm_pool->virt_addr)
return -ENOMEM;
- if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
+ if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
+ MVPP2_BM_POOL_PTR_ALIGN)) {
dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
bm_pool->phys_addr);
dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
@@ -3401,7 +3409,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->size = size;
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
- atomic_set(&bm_pool->in_use, 0);
return 0;
}
@@ -3427,7 +3434,7 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
for (i = 0; i < bm_pool->buf_num; i++) {
dma_addr_t buf_phys_addr;
- u32 vaddr;
+ unsigned long vaddr;
/* Get buffer virtual address (indirect access) */
buf_phys_addr = mvpp2_read(priv,
@@ -3439,7 +3446,8 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
if (!vaddr)
break;
- dev_kfree_skb_any((struct sk_buff *)vaddr);
+
+ mvpp2_frag_free(bm_pool, (void *)vaddr);
}
/* Update BM driver with number of buffers removed from pool */
@@ -3553,29 +3561,28 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
-/* Allocate skb for BM pool */
-static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool,
- dma_addr_t *buf_phys_addr,
- gfp_t gfp_mask)
+static void *mvpp2_buf_alloc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *buf_phys_addr,
+ gfp_t gfp_mask)
{
- struct sk_buff *skb;
dma_addr_t phys_addr;
+ void *data;
- skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
- if (!skb)
+ data = mvpp2_frag_alloc(bm_pool);
+ if (!data)
return NULL;
- phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
+ phys_addr = dma_map_single(port->dev->dev.parent, data,
MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
- dev_kfree_skb_any(skb);
+ mvpp2_frag_free(bm_pool, data);
return NULL;
}
*buf_phys_addr = phys_addr;
- return skb;
+ return data;
}
/* Set pool number in a BM cookie */
@@ -3590,14 +3597,15 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
}
/* Get pool number from a BM cookie */
-static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
+static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
{
return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
}
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- u32 buf_phys_addr, u32 buf_virt_addr)
+ dma_addr_t buf_phys_addr,
+ unsigned long buf_virt_addr)
{
mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
@@ -3605,7 +3613,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
/* Release multicast buffer */
static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
- u32 buf_phys_addr, u32 buf_virt_addr,
+ dma_addr_t buf_phys_addr,
+ unsigned long buf_virt_addr,
int mc_id)
{
u32 val = 0;
@@ -3620,7 +3629,8 @@ static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
/* Refill BM pool */
static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
- u32 phys_addr, u32 cookie)
+ dma_addr_t phys_addr,
+ unsigned long cookie)
{
int pool = mvpp2_bm_cookie_pool_get(bm);
@@ -3631,10 +3641,9 @@ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int buf_num)
{
- struct sk_buff *skb;
int i, buf_size, total_size;
- u32 bm;
dma_addr_t phys_addr;
+ void *buf;
buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
@@ -3647,18 +3656,17 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
return 0;
}
- bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
for (i = 0; i < buf_num; i++) {
- skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
- if (!skb)
+ buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+ if (!buf)
break;
- mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+ mvpp2_bm_pool_put(port, bm_pool->id, phys_addr,
+ (unsigned long)buf);
}
/* Update BM driver with number of buffers added to pool */
bm_pool->buf_num += i;
- bm_pool->in_use_thresh = bm_pool->buf_num / 4;
netdev_dbg(port->dev,
"%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
@@ -3710,6 +3718,9 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
port->priv, new_pool);
new_pool->pkt_size = pkt_size;
+ new_pool->frag_size =
+ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
/* Allocate buffers for this pool */
num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
@@ -3778,6 +3789,8 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
}
port_pool->pkt_size = pkt_size;
+ port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
@@ -4379,27 +4392,50 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
* will be generated by HW.
*/
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq, u32 pkts)
+ struct mvpp2_rx_queue *rxq)
{
- u32 val;
+ if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
+ rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
+ rxq->pkts_coal);
+}
+
+static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
+{
+ u64 tmp = (u64)clk_hz * usec;
+
+ do_div(tmp, USEC_PER_SEC);
+
+ return tmp > U32_MAX ? U32_MAX : tmp;
+}
+
+static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
+{
+ u64 tmp = (u64)cycles * USEC_PER_SEC;
+
+ do_div(tmp, clk_hz);
- rxq->pkts_coal = pkts;
+ return tmp > U32_MAX ? U32_MAX : tmp;
}
/* Set the time delay in usec before Rx interrupt */
static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq, u32 usec)
+ struct mvpp2_rx_queue *rxq)
{
- u32 val;
+ unsigned long freq = port->priv->tclk;
+ u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
- val = (port->priv->tclk / USEC_PER_SEC) * usec;
- mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+ if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
+ rxq->time_coal =
+ mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
+
+ /* re-evaluate to get actual register value */
+ val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
+ }
- rxq->time_coal = usec;
+ mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
}
/* Free Tx queue skbuffs */
@@ -4413,13 +4449,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- mvpp2_txq_inc_get(txq_pcpu);
-
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
tx_buf->size, DMA_TO_DEVICE);
- if (!tx_buf->skb)
- continue;
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+
+ mvpp2_txq_inc_get(txq_pcpu);
}
}
@@ -4543,8 +4578,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
/* Set coalescing pkts and time */
- mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
- mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
@@ -4994,23 +5029,18 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool,
- u32 bm, int is_recycle)
+ struct mvpp2_bm_pool *bm_pool, u32 bm)
{
- struct sk_buff *skb;
dma_addr_t phys_addr;
-
- if (is_recycle &&
- (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
- return 0;
+ void *buf;
/* No recycle or too many buffers are in use, so allocate a new skb */
- skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
- if (!skb)
+ buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+ if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
- atomic_dec(&bm_pool->in_use);
+ mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf);
+
return 0;
}
@@ -5051,10 +5081,10 @@ static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
struct mvpp2_buff_hdr *buff_hdr;
struct sk_buff *skb;
u32 rx_status = rx_desc->status;
- u32 buff_phys_addr;
- u32 buff_virt_addr;
- u32 buff_phys_addr_next;
- u32 buff_virt_addr_next;
+ dma_addr_t buff_phys_addr;
+ unsigned long buff_virt_addr;
+ dma_addr_t buff_phys_addr_next;
+ unsigned long buff_virt_addr_next;
int mc_id;
int pool_id;
@@ -5101,14 +5131,17 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
struct mvpp2_bm_pool *bm_pool;
struct sk_buff *skb;
+ unsigned int frag_size;
dma_addr_t phys_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
+ void *data;
rx_done++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
phys_addr = rx_desc->buf_phys_addr;
+ data = (void *)(uintptr_t)rx_desc->buf_cookie;
bm = mvpp2_bm_cookie_build(rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
@@ -5129,14 +5162,24 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
+
mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
rx_desc->buf_cookie);
continue;
}
- skb = (struct sk_buff *)rx_desc->buf_cookie;
+ if (bm_pool->frag_size > PAGE_SIZE)
+ frag_size = 0;
+ else
+ frag_size = bm_pool->frag_size;
+
+ skb = build_skb(data, frag_size);
+ if (!skb) {
+ netdev_warn(port->dev, "skb build failed\n");
+ goto err_drop_frame;
+ }
- err = mvpp2_rx_refill(port, bm_pool, bm, 0);
+ err = mvpp2_rx_refill(port, bm_pool, bm);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
goto err_drop_frame;
@@ -5147,9 +5190,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
rcvd_pkts++;
rcvd_bytes += rx_bytes;
- atomic_inc(&bm_pool->in_use);
- skb_reserve(skb, MVPP2_MH_SIZE);
+ skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvpp2_rx_csum(port, rx_status, skb);
@@ -5405,7 +5447,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
if (budget > 0) {
cause_rx = 0;
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
mvpp2_interrupts_enable(port);
}
@@ -5739,7 +5781,7 @@ error:
return err;
}
-static struct rtnl_link_stats64 *
+static void
mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -5771,8 +5813,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
-
- return stats;
}
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -5803,8 +5843,8 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
rxq->time_coal = c->rx_coalesce_usecs;
rxq->pkts_coal = c->rx_max_coalesced_frames;
- mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
- mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
}
for (queue = 0; queue < txq_number; queue++) {
@@ -5973,8 +6013,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2_tx_queue *txq;
txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return -ENOMEM;
+ if (!txq) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
if (!txq->pcpu) {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3af2814ada23..28cb36d9e50a 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -274,8 +274,6 @@ enum hash_table_entry {
HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
};
-static int pxa168_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd);
static int pxa168_init_hw(struct pxa168_eth_private *pep);
static int pxa168_init_phy(struct net_device *dev);
static void eth_port_reset(struct net_device *dev);
@@ -987,10 +985,6 @@ static int pxa168_init_phy(struct net_device *dev)
if (err)
return err;
- err = pxa168_get_link_ksettings(dev, &cmd);
- if (err)
- return err;
-
cmd.base.phy_address = pep->phy_addr;
cmd.base.speed = pep->phy_speed;
cmd.base.duplex = pep->phy_duplex;
@@ -1261,7 +1255,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
}
work_done = rxq_process(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
wrl(pep, INT_MASK, ALL_INTS);
}
@@ -1370,18 +1364,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
return -EOPNOTSUPP;
}
-static int pxa168_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- int err;
-
- err = phy_read_status(dev->phydev);
- if (err == 0)
- err = phy_ethtool_ksettings_get(dev->phydev, cmd);
-
- return err;
-}
-
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1396,7 +1378,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = pxa168_get_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9146a514fb33..edb95271a4f2 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
return supported;
}
-static int skge_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int skge_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
+ u32 supported, advertising;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = skge_supported_modes(hw);
+ supported = skge_supported_modes(hw);
if (hw->copper) {
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy_addr;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy_addr;
} else
- ecmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+
+ advertising = skge->advertising;
+ cmd->base.autoneg = skge->autoneg;
+ cmd->base.speed = skge->speed;
+ cmd->base.duplex = skge->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- ecmd->advertising = skge->advertising;
- ecmd->autoneg = skge->autoneg;
- ethtool_cmd_speed_set(ecmd, skge->speed);
- ecmd->duplex = skge->duplex;
return 0;
}
-static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int skge_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct skge_port *skge = netdev_priv(dev);
const struct skge_hw *hw = skge->hw;
u32 supported = skge_supported_modes(hw);
int err = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- ecmd->advertising = supported;
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ advertising = supported;
skge->duplex = -1;
skge->speed = -1;
} else {
u32 setting;
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
switch (speed) {
case SPEED_1000:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_1000baseT_Half;
else
return -EINVAL;
break;
case SPEED_100:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_100baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_100baseT_Half;
else
return -EINVAL;
break;
case SPEED_10:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_10baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_10baseT_Half;
else
return -EINVAL;
@@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
skge->speed = speed;
- skge->duplex = ecmd->duplex;
+ skge->duplex = cmd->base.duplex;
}
- skge->autoneg = ecmd->autoneg;
- skge->advertising = ecmd->advertising;
+ skge->autoneg = cmd->base.autoneg;
+ skge->advertising = advertising;
if (netif_running(dev)) {
skge_down(dev);
@@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
}
static const struct ethtool_ops skge_ethtool_ops = {
- .get_settings = skge_get_settings,
- .set_settings = skge_set_settings,
.get_drvinfo = skge_get_drvinfo,
.get_regs_len = skge_get_regs_len,
.get_regs = skge_get_regs,
@@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = {
.set_phys_id = skge_set_phys_id,
.get_sset_count = skge_get_sset_count,
.get_ethtool_stats = skge_get_ethtool_stats,
+ .get_link_ksettings = skge_get_link_ksettings,
+ .set_link_ksettings = skge_set_link_ksettings,
};
/*
@@ -3190,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev)
}
}
-static int skge_poll(struct napi_struct *napi, int to_do)
+static int skge_poll(struct napi_struct *napi, int budget)
{
struct skge_port *skge = container_of(napi, struct skge_port, napi);
struct net_device *dev = skge->netdev;
@@ -3203,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
- for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
+ for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
u32 control;
@@ -3225,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do)
wmb();
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
- if (work_done < to_do) {
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags;
- napi_gro_flush(napi, false);
spin_lock_irqsave(&hw->hw_lock, flags);
- __napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b60ad0e56a9f..2b2cc3f3ca10 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
sky2->rx_stats.bytes += bytes;
u64_stats_update_end(&sky2->rx_stats.syncp);
- dev->last_rx = jiffies;
+ sky2->last_rx = jiffies;
sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
}
@@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev)
u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
/* If idle and MAC or PCI is stuck */
- if (sky2->check.last == dev->last_rx &&
+ if (sky2->check.last == sky2->last_rx &&
((mac_rp == sky2->check.mac_rp &&
mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
/* Check if the PCI RX hang */
@@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev)
fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
return 1;
} else {
- sky2->check.last = dev->last_rx;
+ sky2->check.last = sky2->last_rx;
sky2->check.mac_rp = mac_rp;
sky2->check.mac_lev = mac_lev;
sky2->check.fifo_rp = fifo_rp;
@@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
| SUPPORTED_1000baseT_Full;
}
-static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
+ u32 supported, advertising;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = sky2_supported_modes(hw);
- ecmd->phy_address = PHY_ADDR_MARV;
+ supported = sky2_supported_modes(hw);
+ cmd->base.phy_address = PHY_ADDR_MARV;
if (sky2_is_copper(hw)) {
- ecmd->port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, sky2->speed);
- ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
+ cmd->base.port = PORT_TP;
+ cmd->base.speed = sky2->speed;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- ecmd->port = PORT_FIBRE;
- ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.port = PORT_FIBRE;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
}
- ecmd->advertising = sky2->advertising;
- ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+ advertising = sky2->advertising;
+ cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
? AUTONEG_ENABLE : AUTONEG_DISABLE;
- ecmd->duplex = sky2->duplex;
+ cmd->base.duplex = sky2->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sky2_port *sky2 = netdev_priv(dev);
const struct sky2_hw *hw = sky2->hw;
u32 supported = sky2_supported_modes(hw);
+ u32 new_advertising;
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- if (ecmd->advertising & ~supported)
+ ethtool_convert_link_mode_to_legacy_u32(&new_advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (new_advertising & ~supported)
return -EINVAL;
if (sky2_is_copper(hw))
- sky2->advertising = ecmd->advertising |
+ sky2->advertising = new_advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
else
- sky2->advertising = ecmd->advertising |
+ sky2->advertising = new_advertising |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
@@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->speed = -1;
} else {
u32 setting;
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
switch (speed) {
case SPEED_1000:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_1000baseT_Half;
else
return -EINVAL;
break;
case SPEED_100:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_100baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_100baseT_Half;
else
return -EINVAL;
break;
case SPEED_10:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_10baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_10baseT_Half;
else
return -EINVAL;
@@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
sky2->speed = speed;
- sky2->duplex = ecmd->duplex;
+ sky2->duplex = cmd->base.duplex;
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
}
@@ -3888,8 +3900,8 @@ static void sky2_set_multicast(struct net_device *dev)
gma_write16(hw, port, GM_RX_CTRL, reg);
}
-static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void sky2_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -3929,8 +3941,6 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
stats->rx_dropped = dev->stats.rx_dropped;
stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
-
- return stats;
}
/* Can have one global because blinking is controlled by
@@ -4407,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
}
static const struct ethtool_ops sky2_ethtool_ops = {
- .get_settings = sky2_get_settings,
- .set_settings = sky2_set_settings,
.get_drvinfo = sky2_get_drvinfo,
.get_wol = sky2_get_wol,
.set_wol = sky2_set_wol,
@@ -4431,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.set_phys_id = sky2_set_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
+ .get_link_ksettings = sky2_get_link_ksettings,
+ .set_link_ksettings = sky2_set_link_ksettings,
};
#ifdef CONFIG_SKY2_DEBUG
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ec6dcd80152b..0fe160796842 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2247,6 +2247,7 @@ struct sky2_port {
u16 rx_data_size;
u16 rx_nfrags;
+ unsigned long last_rx;
struct {
unsigned long last;
u32 mac_rp;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3dd87889e67e..9e757684816d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_eth *eth)
}
}
-static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
+static void mtk_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_hw_stats *hw_stats = mac->hw_stats;
@@ -494,8 +494,6 @@ static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
storage->tx_errors = dev->stats.tx_errors;
storage->rx_dropped = dev->stats.rx_dropped;
storage->tx_dropped = dev->stats.tx_dropped;
-
- return storage;
}
static inline int mtk_max_frag_size(int mtu)
@@ -2517,7 +2515,7 @@ static int mtk_remove(struct platform_device *pdev)
}
const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7623-eth" },
+ { .compatible = "mediatek,mt2701-eth" },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e939945259..53daa6ca5d83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
return -ETIMEDOUT;
}
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da92f857..fa6d2354a0e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -81,8 +81,9 @@ void mlx4_cq_tasklet_cb(unsigned long data)
static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
{
- unsigned long flags;
struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+ unsigned long flags;
+ bool kick;
spin_lock_irqsave(&tasklet_ctx->lock, flags);
/* When migrating CQs between EQs will be implemented, please note
@@ -92,7 +93,10 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
atomic_inc(&cq->refcount);
+ kick = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
+ if (kick)
+ tasklet_schedule(&tasklet_ctx->task);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
@@ -101,13 +105,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
@@ -118,23 +128,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +307,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@@ -349,9 +355,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +376,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ spin_lock(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock(&cq_table->lock);
+
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 504461a464c5..e7b81a305469 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -62,12 +62,13 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp)
{
- unsigned long flags;
+ unsigned int seq;
u64 nsec;
- read_lock_irqsave(&mdev->clock_lock, flags);
- nsec = timecounter_cyc2time(&mdev->clock, timestamp);
- read_unlock_irqrestore(&mdev->clock_lock, flags);
+ do {
+ seq = read_seqbegin(&mdev->clock_lock);
+ nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+ } while (read_seqretry(&mdev->clock_lock, seq));
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
hwts->hwtstamp = ns_to_ktime(nsec);
@@ -95,9 +96,9 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
unsigned long flags;
if (timeout) {
- write_lock_irqsave(&mdev->clock_lock, flags);
+ write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_read(&mdev->clock);
- write_unlock_irqrestore(&mdev->clock_lock, flags);
+ write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev->last_overflow_check = jiffies;
}
}
@@ -128,10 +129,10 @@ static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
- write_lock_irqsave(&mdev->clock_lock, flags);
+ write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_read(&mdev->clock);
mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
- write_unlock_irqrestore(&mdev->clock_lock, flags);
+ write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
}
@@ -149,9 +150,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
ptp_clock_info);
unsigned long flags;
- write_lock_irqsave(&mdev->clock_lock, flags);
+ write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_adjtime(&mdev->clock, delta);
- write_unlock_irqrestore(&mdev->clock_lock, flags);
+ write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
}
@@ -172,9 +173,9 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
unsigned long flags;
u64 ns;
- write_lock_irqsave(&mdev->clock_lock, flags);
+ write_seqlock_irqsave(&mdev->clock_lock, flags);
ns = timecounter_read(&mdev->clock);
- write_unlock_irqrestore(&mdev->clock_lock, flags);
+ write_sequnlock_irqrestore(&mdev->clock_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -198,9 +199,9 @@ static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
unsigned long flags;
/* reset the timecounter */
- write_lock_irqsave(&mdev->clock_lock, flags);
+ write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles, ns);
- write_unlock_irqrestore(&mdev->clock_lock, flags);
+ write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
}
@@ -266,7 +267,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
if (mdev->ptp_clock)
return;
- rwlock_init(&mdev->clock_lock);
+ seqlock_init(&mdev->clock_lock);
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
@@ -276,10 +277,10 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
mdev->nominal_c_mult = mdev->cycles.mult;
- write_lock_irqsave(&mdev->clock_lock, flags);
+ write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles,
ktime_to_ns(ktime_get_real()));
- write_unlock_irqrestore(&mdev->clock_lock, flags);
+ write_sequnlock_irqrestore(&mdev->clock_lock, flags);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d9c9f86a30df..c4d714fcc7da 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -902,6 +902,7 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_ptys_reg ptys_reg;
__be32 proto_admin;
+ u8 cur_autoneg;
int ret;
u32 ptys_adv = ethtool2ptys_link_modes(
@@ -931,10 +932,21 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
return 0;
}
- proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
- cpu_to_be32(ptys_adv) :
- speed_set_ptys_admin(priv, speed,
- ptys_reg.eth_proto_cap);
+ cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
+ AUTONEG_DISABLE : AUTONEG_ENABLE;
+
+ if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
+ proto_admin = speed_set_ptys_admin(priv, speed,
+ ptys_reg.eth_proto_cap);
+ if ((be32_to_cpu(proto_admin) &
+ (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
+ MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
+ (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
+ ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
+ } else {
+ proto_admin = cpu_to_be32(ptys_adv);
+ ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
+ }
proto_admin &= ptys_reg.eth_proto_cap;
if (!proto_admin) {
@@ -942,7 +954,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
return -EINVAL; /* nothing to change due to bad input */
}
- if (proto_admin == ptys_reg.eth_proto_admin)
+ if ((proto_admin == ptys_reg.eth_proto_admin) &&
+ ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
+ (link_ksettings->base.autoneg == cur_autoneg)))
return 0; /* Nothing to change */
en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
@@ -1099,7 +1113,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.tx_ring_size = tx_size;
new_prof.rx_ring_size = rx_size;
- err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
@@ -1732,8 +1746,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- memset(channel, 0, sizeof(*channel));
-
channel->max_rx = MAX_RX_RINGS;
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
@@ -1752,10 +1764,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
int xdp_count;
int err = 0;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
- channel->rx_count > MAX_RX_RINGS ||
- !channel->tx_count || !channel->rx_count)
+ if (!channel->tx_count || !channel->rx_count)
return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
@@ -1779,7 +1788,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
new_prof.tx_ring_num[TX_XDP] = xdp_count;
new_prof.rx_ring_num = channel->rx_count;
- err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
@@ -1793,7 +1802,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- if (dev->num_tc)
+ if (netdev_get_num_tc(dev))
mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
@@ -1985,7 +1994,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
- return -ENOSYS;
+ return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4910d9af1933..afe4444e5434 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1321,7 +1321,7 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
-static struct rtnl_link_stats64 *
+static void
mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1330,8 +1330,6 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx4_en_fold_software_stats(dev);
netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
-
- return stats;
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1384,6 +1382,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
{
unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
+ u32 pkt_rate_high, pkt_rate_low;
struct mlx4_en_cq *cq;
unsigned long packets;
unsigned long rate;
@@ -1397,37 +1396,40 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
return;
+ pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
+ pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
+
for (ring = 0; ring < priv->rx_ring_num; ring++) {
rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
- rx_pkt_diff = ((unsigned long) (rx_packets -
- priv->last_moder_packets[ring]));
+ rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
packets = rx_pkt_diff;
rate = packets * HZ / period;
- avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
- priv->last_moder_bytes[ring])) / packets : 0;
+ avg_pkt_size = packets ? (rx_bytes -
+ priv->last_moder_bytes[ring]) / packets : 0;
/* Apply auto-moderation only when packet rate
* exceeds a rate that it matters */
if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
- if (rate < priv->pkt_rate_low)
+ if (rate <= pkt_rate_low)
moder_time = priv->rx_usecs_low;
- else if (rate > priv->pkt_rate_high)
+ else if (rate >= pkt_rate_high)
moder_time = priv->rx_usecs_high;
else
- moder_time = (rate - priv->pkt_rate_low) *
+ moder_time = (rate - pkt_rate_low) *
(priv->rx_usecs_high - priv->rx_usecs_low) /
- (priv->pkt_rate_high - priv->pkt_rate_low) +
+ (pkt_rate_high - pkt_rate_low) +
priv->rx_usecs_low;
} else {
moder_time = priv->rx_usecs_low;
}
- if (moder_time != priv->last_moder_time[ring]) {
+ cq = priv->rx_cq[ring];
+ if (moder_time != priv->last_moder_time[ring] ||
+ cq->moder_cnt != priv->rx_frames) {
priv->last_moder_time[ring] = moder_time;
- cq = priv->rx_cq[ring];
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
@@ -1697,6 +1699,14 @@ int mlx4_en_start_port(struct net_device *dev)
priv->port, err);
goto tx_err;
}
+
+ err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
+ if (err) {
+ en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
+ dev->mtu, priv->port, err);
+ goto tx_err;
+ }
+
/* Set default qp number */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
if (err) {
@@ -1748,8 +1758,11 @@ int mlx4_en_start_port(struct net_device *dev)
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
- for (i = 0; i < priv->rx_ring_num; i++)
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
+ local_bh_enable();
+ }
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2039,6 +2052,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
if (priv->tx_cq[t] && priv->tx_cq[t][i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
}
+ kfree(priv->tx_ring[t]);
+ kfree(priv->tx_cq[t]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2181,9 +2196,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp,
- struct mlx4_en_port_profile *prof)
+ struct mlx4_en_port_profile *prof,
+ bool carry_xdp_prog)
{
- int t;
+ struct bpf_prog *xdp_prog;
+ int i, t;
mlx4_en_copy_priv(tmp, priv, prof);
@@ -2197,6 +2214,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
}
return -ENOMEM;
}
+
+ /* All rx_rings has the same xdp_prog. Pick the first one. */
+ xdp_prog = rcu_dereference_protected(
+ priv->rx_ring[0]->xdp_prog,
+ lockdep_is_held(&priv->mdev->state_lock));
+
+ if (xdp_prog && carry_xdp_prog) {
+ xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
+ if (IS_ERR(xdp_prog)) {
+ mlx4_en_free_resources(tmp);
+ return PTR_ERR(xdp_prog);
+ }
+ for (i = 0; i < tmp->rx_ring_num; i++)
+ rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+ xdp_prog);
+ }
+
return 0;
}
@@ -2211,7 +2245,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int t;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2245,11 +2278,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mlx4_en_free_resources(priv);
mutex_unlock(&mdev->state_lock);
- for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
- kfree(priv->tx_ring[t]);
- kfree(priv->tx_cq[t]);
- }
-
free_netdev(dev);
}
@@ -2752,7 +2780,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
}
- err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
if (err) {
if (prog)
bpf_prog_sub(prog, priv->rx_ring_num - 1);
@@ -3496,7 +3524,7 @@ int mlx4_en_reset_config(struct net_device *dev,
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
- err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 040da4b16b1c..930f961fee42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -35,7 +35,6 @@
#define _MLX4_EN_PORT_H_
-#define SET_PORT_GEN_ALL_VALID 0x7
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index eac527e25ec9..d85e6446f9d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -33,6 +33,7 @@
#include <net/busy_poll.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
@@ -514,8 +515,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
return;
for (ring = 0; ring < priv->rx_ring_num; ring++) {
- if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+ if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+ local_bh_disable();
napi_reschedule(&priv->rx_cq[ring]->napi);
+ local_bh_enable();
+ }
}
}
@@ -706,7 +710,8 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
- GFP_ATOMIC | __GFP_COLD))
+ GFP_ATOMIC | __GFP_COLD |
+ __GFP_MEMALLOC))
break;
ring->prod++;
} while (--missing);
@@ -925,10 +930,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length, cq->ring,
&doorbell_pending)))
goto consumed;
+ trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(dev, xdp_prog, act);
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 5886ad78058f..3ed42199d3f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -710,7 +710,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 rings_p_up = priv->num_tx_rings_p_up;
u8 up = 0;
- if (dev->num_tc)
+ if (netdev_get_num_tc(dev))
return skb_tx_hash(dev, skb);
if (skb_vlan_tag_present(skb))
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e6fe25..39232b6a974f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -494,7 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
- int cqn = -1;
+ int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
@@ -835,13 +840,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq_set_ci(eq, 1);
- /* cqn is 24bit wide but is initialized such that its higher bits
- * are ones too. Thus, if we got any event, cqn's high bits should be off
- * and we need to schedule the tasklet.
- */
- if (!(cqn & ~0xffffff))
- tasklet_schedule(&eq->tasklet_ctx.task);
-
return eqes_found;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 84bab9f0732e..3fe885ce1902 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -672,7 +672,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
func_cap->physical_port = field;
if (func_cap->physical_port != gen_or_port) {
- err = -ENOSYS;
+ err = -EINVAL;
goto out;
}
@@ -1875,7 +1875,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
- (ilog2(cache_line_size()) - 4) << 5;
+ ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
#if defined(__LITTLE_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
@@ -2983,7 +2983,7 @@ static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
return PTR_ERR(mailbox);
context = mailbox->buf;
- context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
+ context->flags2 |= SET_PORT_GEN_PHV_VALID;
if (phv_bit)
context->phv_en |= SET_PORT_GEN_PHV_EN;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c44931f..8258d08acd8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
return;
mlx4_stop_catas_poll(dev);
+ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+ mlx4_is_slave(dev)) {
+ /* In mlx4_remove_one on a VF */
+ u32 slave_read =
+ swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+ if (mlx4_comm_internal_err(slave_read)) {
+ mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+ __func__);
+ mlx4_enter_error_state(dev->persist);
+ }
+ }
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index bffa6f345f2f..15ef787e71ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -838,7 +838,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
*/
if (hca_param.global_caps) {
mlx4_err(dev, "Unknown hca global capabilities\n");
- return -ENOSYS;
+ return -EINVAL;
}
mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
@@ -896,7 +896,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
PF_CONTEXT_BEHAVIOUR_MASK) {
mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
- return -ENOSYS;
+ return -EINVAL;
}
dev->caps.num_ports = func_cap.num_ports;
@@ -3492,7 +3492,7 @@ slave_start:
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
- err = -ENOSYS;
+ err = -EOPNOTSUPP;
mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
goto err_free_eq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8a5923..b4f1bc56cc68 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -487,6 +487,7 @@ struct mlx4_slave_state {
bool vst_qinq_supported;
u8 function;
dma_addr_t vhcr_dma;
+ u16 user_mtu[MLX4_MAX_PORTS + 1];
u16 mtu[MLX4_MAX_PORTS + 1];
__be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
@@ -590,6 +591,7 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
int init_port_ref[MLX4_MAX_PORTS + 1];
u16 max_mtu[MLX4_MAX_PORTS + 1];
+ u16 max_user_mtu[MLX4_MAX_PORTS + 1];
u8 pptx;
u8 pprx;
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
@@ -774,7 +776,9 @@ struct mlx4_vlan_table {
int max;
};
-#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK | \
+ MLX4_FLAG_V_PPRX_MASK | \
+ MLX4_FLAG_V_PPTX_MASK)
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
@@ -787,7 +791,7 @@ enum {
struct mlx4_set_port_general_context {
u16 reserved1;
- u8 v_ignore_fcs;
+ u8 flags2;
u8 flags;
union {
u8 ignore_fcs;
@@ -803,7 +807,8 @@ struct mlx4_set_port_general_context {
u16 reserved4;
u32 reserved5;
u8 phv_en;
- u8 reserved6[3];
+ u8 reserved6[5];
+ __be16 user_mtu;
};
struct mlx4_set_port_rqp_calc_context {
@@ -1220,6 +1225,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ba1c6cd0cc79..4941b692e947 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -102,7 +102,8 @@
/* Use the maximum between 16384 and a single page */
#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
-#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER
+#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \
+ PAGE_ALLOC_COSTLY_ORDER)
/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
* and 4K allocations) */
@@ -424,9 +425,9 @@ struct mlx4_en_dev {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
- rwlock_t clock_lock;
u32 nominal_c_mult;
struct cyclecounter cycles;
+ seqlock_t clock_lock;
struct timecounter clock;
unsigned long last_overflow_check;
unsigned long overflow_period;
@@ -679,7 +680,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp,
- struct mlx4_en_port_profile *prof);
+ struct mlx4_en_port_profile *prof,
+ bool carry_xdp_prog);
void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index b656dd5772e5..4e36e287d605 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -50,7 +50,11 @@
#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
-#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
+#define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1)
+#define MLX4_FLAG2_V_USER_MTU_MASK BIT(5)
+#define MLX4_FLAG_V_MTU_MASK BIT(0)
+#define MLX4_FLAG_V_PPRX_MASK BIT(1)
+#define MLX4_FLAG_V_PPTX_MASK BIT(2)
#define MLX4_IGNORE_FCS_MASK 0x1
#define MLX4_TC_MAX_NUMBER 8
@@ -1239,13 +1243,96 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
return;
}
+static void
+mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_set_port_general_context *gen_context)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ u16 mtu, prev_mtu;
+
+ /* Mtu is configured as the max USER_MTU among all
+ * the functions on the port.
+ */
+ mtu = be16_to_cpu(gen_context->mtu);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ prev_mtu = slave_st->mtu[port];
+ slave_st->mtu[port] = mtu;
+ if (mtu > master->max_mtu[port])
+ master->max_mtu[port] = mtu;
+ if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
+ int i;
+
+ slave_st->mtu[port] = mtu;
+ master->max_mtu[port] = mtu;
+ for (i = 0; i < dev->num_slaves; i++)
+ master->max_mtu[port] =
+ max_t(u16, master->max_mtu[port],
+ master->slave_state[i].mtu[port]);
+ }
+ gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+}
+
+static void
+mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_set_port_general_context *gen_context)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ u16 user_mtu, prev_user_mtu;
+
+ /* User Mtu is configured as the max USER_MTU among all
+ * the functions on the port.
+ */
+ user_mtu = be16_to_cpu(gen_context->user_mtu);
+ user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
+ prev_user_mtu = slave_st->user_mtu[port];
+ slave_st->user_mtu[port] = user_mtu;
+ if (user_mtu > master->max_user_mtu[port])
+ master->max_user_mtu[port] = user_mtu;
+ if (user_mtu < prev_user_mtu &&
+ prev_user_mtu == master->max_user_mtu[port]) {
+ int i;
+
+ slave_st->user_mtu[port] = user_mtu;
+ master->max_user_mtu[port] = user_mtu;
+ for (i = 0; i < dev->num_slaves; i++)
+ master->max_user_mtu[port] =
+ max_t(u16, master->max_user_mtu[port],
+ master->slave_state[i].user_mtu[port]);
+ }
+ gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
+}
+
+static void
+mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
+ struct mlx4_set_port_general_context *gen_context)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+
+ /* Slave cannot change Global Pause configuration */
+ if (slave != mlx4_master_func_num(dev) &&
+ (gen_context->pptx != master->pptx ||
+ gen_context->pprx != master->pprx)) {
+ gen_context->pptx = master->pptx;
+ gen_context->pprx = master->pprx;
+ mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
+ slave);
+ } else {
+ master->pptx = gen_context->pptx;
+ master->pprx = gen_context->pprx;
+ }
+}
+
static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
u8 op_mod, struct mlx4_cmd_mailbox *inbox)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_port_info *port_info;
- struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
- struct mlx4_slave_state *slave_st = &master->slave_state[slave];
struct mlx4_set_port_rqp_calc_context *qpn_context;
struct mlx4_set_port_general_context *gen_context;
struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
@@ -1256,7 +1343,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
int base;
u32 in_modifier;
u32 promisc;
- u16 mtu, prev_mtu;
int err;
int i, j;
int offset;
@@ -1269,7 +1355,9 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
is_eth = op_mod;
port_info = &priv->port[port];
- /* Slaves cannot perform SET_PORT operations except changing MTU */
+ /* Slaves cannot perform SET_PORT operations,
+ * except for changing MTU and USER_MTU.
+ */
if (is_eth) {
if (slave != dev->caps.function &&
in_modifier != MLX4_SET_PORT_GENERAL &&
@@ -1297,40 +1385,20 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
break;
case MLX4_SET_PORT_GENERAL:
gen_context = inbox->buf;
- /* Mtu is configured as the max MTU among all the
- * the functions on the port. */
- mtu = be16_to_cpu(gen_context->mtu);
- mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
- ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
- prev_mtu = slave_st->mtu[port];
- slave_st->mtu[port] = mtu;
- if (mtu > master->max_mtu[port])
- master->max_mtu[port] = mtu;
- if (mtu < prev_mtu && prev_mtu ==
- master->max_mtu[port]) {
- slave_st->mtu[port] = mtu;
- master->max_mtu[port] = mtu;
- for (i = 0; i < dev->num_slaves; i++) {
- master->max_mtu[port] =
- max(master->max_mtu[port],
- master->slave_state[i].mtu[port]);
- }
- }
- gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
- /* Slave cannot change Global Pause configuration */
- if (slave != mlx4_master_func_num(dev) &&
- ((gen_context->pptx != master->pptx) ||
- (gen_context->pprx != master->pprx))) {
- gen_context->pptx = master->pptx;
- gen_context->pprx = master->pprx;
- mlx4_warn(dev,
- "denying Global Pause change for slave:%d\n",
- slave);
- } else {
- master->pptx = gen_context->pptx;
- master->pprx = gen_context->pprx;
- }
+ if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
+ mlx4_en_set_port_mtu(dev, slave, port,
+ gen_context);
+
+ if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
+ mlx4_en_set_port_user_mtu(dev, slave, port,
+ gen_context);
+
+ if (gen_context->flags &
+ (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
+ mlx4_en_set_port_global_pause(dev, slave,
+ gen_context);
+
break;
case MLX4_SET_PORT_GID_TABLE:
/* change to MULTIPLE entries: number of guest's gids
@@ -1608,6 +1676,30 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
}
EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
+ context->user_mtu = cpu_to_be16(user_mtu);
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
+
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -1619,7 +1711,7 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
+ context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
if (ignore_fcs_value)
context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
else
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 56185a0b827d..6fe9f76ae656 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -77,6 +77,7 @@ struct res_common {
int from_state;
int to_state;
int removing;
+ const char *func_name;
};
enum {
@@ -236,8 +237,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
struct rb_node *node = root->rb_node;
while (node) {
- struct res_common *res = container_of(node, struct res_common,
- node);
+ struct res_common *res = rb_entry(node, struct res_common,
+ node);
if (res_id < res->res_id)
node = node->rb_left;
@@ -255,8 +256,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res)
/* Figure out where to put new node */
while (*new) {
- struct res_common *this = container_of(*new, struct res_common,
- node);
+ struct res_common *this = rb_entry(*new, struct res_common,
+ node);
parent = *new;
if (res->res_id < this->res_id)
@@ -837,6 +838,36 @@ static int mpt_mask(struct mlx4_dev *dev)
return dev->caps.num_mpts - 1;
}
+static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
+{
+ switch (t) {
+ case RES_QP:
+ return "QP";
+ case RES_CQ:
+ return "CQ";
+ case RES_SRQ:
+ return "SRQ";
+ case RES_XRCD:
+ return "XRCD";
+ case RES_MPT:
+ return "MPT";
+ case RES_MTT:
+ return "MTT";
+ case RES_MAC:
+ return "MAC";
+ case RES_VLAN:
+ return "VLAN";
+ case RES_COUNTER:
+ return "COUNTER";
+ case RES_FS_RULE:
+ return "FS_RULE";
+ case RES_EQ:
+ return "EQ";
+ default:
+ return "INVALID RESOURCE";
+ }
+}
+
static void *find_res(struct mlx4_dev *dev, u64 res_id,
enum mlx4_resource type)
{
@@ -846,9 +877,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id,
res_id);
}
-static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
- enum mlx4_resource type,
- void *res)
+static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
+ enum mlx4_resource type,
+ void *res, const char *func_name)
{
struct res_common *r;
int err = 0;
@@ -861,6 +892,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
}
if (r->state == RES_ANY_BUSY) {
+ mlx4_warn(dev,
+ "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
+ func_name, slave, res_id, mlx4_resource_type_to_str(type),
+ r->func_name);
err = -EBUSY;
goto exit;
}
@@ -872,6 +907,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
+ r->func_name = func_name;
if (res)
*((struct res_common **)res) = r;
@@ -881,6 +917,9 @@ exit:
return err;
}
+#define get_res(dev, slave, res_id, type, res) \
+ _get_res((dev), (slave), (res_id), (type), (res), __func__)
+
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
u64 res_id, int *slave)
@@ -911,8 +950,10 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type);
- if (r)
+ if (r) {
r->state = r->from_state;
+ r->func_name = "";
+ }
spin_unlock_irq(mlx4_tlock(dev));
}
@@ -1396,7 +1437,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
case RES_MTT:
return remove_mtt_ok((struct res_mtt *)res, extra);
case RES_MAC:
- return -ENOSYS;
+ return -EOPNOTSUPP;
case RES_EQ:
return remove_eq_ok((struct res_eq *)res);
case RES_COUNTER:
@@ -2980,6 +3021,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
- __be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3857,6 @@ out:
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
- qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3797cc7c1288..caa837e5e2b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (cmd->cmdif_rev > CMD_IF_REV) {
dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
CMD_IF_REV, cmd->cmdif_rev);
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto err_free_page;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 32d4af9b594d..336d4738b807 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -179,6 +179,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
cq->cqn);
+ cq->uar = dev->priv.uar;
+
return 0;
err_cmd:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9dbc28f6b97..a62f4b6a21a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -71,6 +71,16 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
if (dev_ctx->context) {
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (dev_ctx->intf->pfault) {
+ if (priv->pfault) {
+ mlx5_core_err(dev, "multiple page fault handlers not supported");
+ } else {
+ priv->pfault_ctx = dev_ctx->context;
+ priv->pfault = dev_ctx->intf->pfault;
+ }
+ }
+#endif
spin_unlock_irq(&priv->ctx_lock);
} else {
kfree(dev_ctx);
@@ -97,6 +107,15 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
if (!dev_ctx)
return;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ spin_lock_irq(&priv->ctx_lock);
+ if (priv->pfault == dev_ctx->intf->pfault)
+ priv->pfault = NULL;
+ spin_unlock_irq(&priv->ctx_lock);
+
+ synchronize_srcu(&priv->pfault_srcu);
+#endif
+
spin_lock_irq(&priv->ctx_lock);
list_del(&dev_ctx->list);
spin_unlock_irq(&priv->ctx_lock);
@@ -329,6 +348,20 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
spin_unlock_irqrestore(&priv->ctx_lock, flags);
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_core_page_fault(struct mlx5_core_dev *dev,
+ struct mlx5_pagefault *pfault)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&priv->pfault_srcu);
+ if (priv->pfault)
+ priv->pfault(dev, priv->pfault_ctx, pfault);
+ srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
+}
+#endif
+
void mlx5_dev_list_lock(void)
{
mutex_lock(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 951dbd58594d..95ca03c0d9f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -51,6 +51,9 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
#define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
@@ -67,8 +70,13 @@
#define MLX5_RX_HEADROOM NET_SKB_PAD
-#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
+ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
+ max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
+#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
+#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+
#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
@@ -98,6 +106,7 @@
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
@@ -111,8 +120,7 @@
#define MLX5E_XDP_IHS_DS_COUNT \
DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT \
- (MLX5E_XDP_IHS_DS_COUNT + \
- (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
#define MLX5E_XDP_TX_WQEBBS \
DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
@@ -259,6 +267,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
+ u8 *pps_pin_caps;
};
enum {
@@ -369,6 +378,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
+ u16 rx_headroom;
struct mlx5e_rx_am am; /* Adaptive Moderation */
struct bpf_prog *xdp_prog;
@@ -479,7 +489,7 @@ struct mlx5e_sq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_uar uar;
+ struct mlx5_sq_bfreg bfreg;
struct mlx5e_channel *channel;
int tc;
u32 rate_limit;
@@ -568,8 +578,9 @@ struct mlx5e_vlan_table {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
- struct mlx5_flow_handle *any_vlan_rule;
- bool filter_disabled;
+ struct mlx5_flow_handle *any_cvlan_rule;
+ struct mlx5_flow_handle *any_svlan_rule;
+ bool filter_disabled;
};
struct mlx5e_l2_table {
@@ -777,9 +788,11 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
struct skb_shared_hwtstamps *hwts);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
+void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+ struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
+void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@@ -791,7 +804,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+ enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -806,7 +820,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{
- u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+ u16 ofst = sq->bf_offset;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -832,7 +846,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
struct mlx5_core_cq *mcq;
mcq = &cq->mcq;
- mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
}
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
@@ -840,12 +854,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
-static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
-{
- return min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
-}
-
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
@@ -863,12 +871,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#else
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 746a92c13644..37e66eef6fb5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -37,6 +37,22 @@ enum {
MLX5E_CYCLES_SHIFT = 23
};
+enum {
+ MLX5E_PIN_MODE_IN = 0x0,
+ MLX5E_PIN_MODE_OUT = 0x1,
+};
+
+enum {
+ MLX5E_OUT_PATTERN_PULSE = 0x0,
+ MLX5E_OUT_PATTERN_PERIODIC = 0x1,
+};
+
+enum {
+ MLX5E_EVENT_MODE_DISABLE = 0x0,
+ MLX5E_EVENT_MODE_REPETETIVE = 0x1,
+ MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
+};
+
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts)
{
@@ -90,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
+ mutex_lock(&priv->state_lock);
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def);
+ mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -112,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Disable CQE compression */
netdev_warn(dev, "Disabling cqe compression");
- mlx5e_modify_rx_cqe_compression(priv, false);
+ mlx5e_modify_rx_cqe_compression_locked(priv, false);
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
+ mutex_unlock(&priv->state_lock);
return -ERANGE;
}
memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
+ mutex_unlock(&priv->state_lock);
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
@@ -189,6 +208,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+
+ if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ /* For future use need to add a loop for finding all 1PPS out pins */
+ MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+ MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
+
+ mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ }
if (delta < 0) {
neg_adj = 1;
@@ -208,6 +239,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
return 0;
}
+static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5e_tstamp *tstamp =
+ container_of(ptp, struct mlx5e_tstamp, ptp_info);
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+
+ if (!MLX5_CAP_GEN(priv->mdev, pps) ||
+ !MLX5_CAP_GEN(priv->mdev, pps_modify))
+ return -EOPNOTSUPP;
+
+ if (rq->extts.index >= tstamp->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ pattern = 1;
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+
+ err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(priv->mdev, pin, 0,
+ MLX5E_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5e_tstamp *tstamp =
+ container_of(ptp, struct mlx5e_tstamp, ptp_info);
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u64 nsec_now, nsec_delta, time_stamp;
+ u64 cycles_now, cycles_delta;
+ struct timespec64 ts;
+ unsigned long flags;
+ int pin = -1;
+ s64 ns;
+
+ if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= tstamp->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+ if (on)
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ ns = timespec64_to_ns(&ts);
+ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+ write_lock_irqsave(&tstamp->lock, flags);
+ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+ tstamp->cycles.mult);
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ time_stamp = cycles_now + cycles_delta;
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+ MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+
+ return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+}
+
+static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return mlx5e_extts_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return mlx5e_perout_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+}
+
static const struct ptp_clock_info mlx5e_ptp_clock_info = {
.owner = THIS_MODULE,
.max_adj = 100000000,
@@ -221,6 +370,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = {
.gettime64 = mlx5e_ptp_gettime,
.settime64 = mlx5e_ptp_settime,
.enable = NULL,
+ .verify = NULL,
};
static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
@@ -229,6 +379,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
+static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
+{
+ int i;
+
+ tstamp->ptp_info.pin_config =
+ kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
+ tstamp->ptp_info.n_pins, GFP_KERNEL);
+ if (!tstamp->ptp_info.pin_config)
+ return -ENOMEM;
+ tstamp->ptp_info.enable = mlx5e_ptp_enable;
+ tstamp->ptp_info.verify = mlx5e_ptp_verify;
+
+ for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+ snprintf(tstamp->ptp_info.pin_config[i].name,
+ sizeof(tstamp->ptp_info.pin_config[i].name),
+ "mlx5_pps%d", i);
+ tstamp->ptp_info.pin_config[i].index = i;
+ tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
+ tstamp->ptp_info.pin_config[i].chan = i;
+ }
+
+ return 0;
+}
+
+static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
+ struct mlx5e_tstamp *tstamp)
+{
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ mlx5_query_mtpps(priv->mdev, out, sizeof(out));
+
+ tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
+ cap_number_of_pps_pins);
+ tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_in_pins);
+ tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_out_pins);
+
+ tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+ tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+ tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+ tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+ tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+ tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+ tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+ tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+}
+
+void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+ struct ptp_clock_event *event)
+{
+ struct mlx5e_tstamp *tstamp = &priv->tstamp;
+
+ ptp_clock_event(tstamp->ptp, event);
+}
+
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
struct mlx5e_tstamp *tstamp = &priv->tstamp;
@@ -272,6 +478,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
tstamp->ptp_info = mlx5e_ptp_clock_info;
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
+ /* Initialize 1PPS data structures */
+#define MAX_PIN_NUM 8
+ tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
+ if (tstamp->pps_pin_caps) {
+ if (MLX5_CAP_GEN(priv->mdev, pps))
+ mlx5e_get_pps_caps(priv, tstamp);
+ if (tstamp->ptp_info.n_pins)
+ mlx5e_init_pin_config(tstamp);
+ } else {
+ mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
+ }
+
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
if (IS_ERR(tstamp->ptp)) {
@@ -293,5 +511,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL;
}
+ kfree(tstamp->pps_pin_caps);
+ kfree(tstamp->ptp_info.pin_config);
+
cancel_delayed_work_sync(&tstamp->overflow_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index f175518ff07a..bd898d8deda0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -89,16 +89,10 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
struct mlx5e_resources *res = &mdev->mlx5e_res;
int err;
- err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false);
- if (err) {
- mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
- return err;
- }
-
err = mlx5_core_alloc_pd(mdev, &res->pdn);
if (err) {
mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
- goto err_unmap_free_uar;
+ return err;
}
err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
@@ -121,9 +115,6 @@ err_dealloc_transport_domain:
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
err_dealloc_pd:
mlx5_core_dealloc_pd(mdev, res->pdn);
-err_unmap_free_uar:
- mlx5_unmap_free_uar(mdev, &res->cq_uar);
-
return err;
}
@@ -134,7 +125,6 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
- mlx5_unmap_free_uar(mdev, &res->cq_uar);
}
int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f0b460f47f29..0523ed47f597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
int err;
if (!MLX5_CAP_GEN(priv->mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
struct ieee_ets ets;
struct ieee_pfc pfc;
- int err = -ENOTSUPP;
+ int err = -EOPNOTSUPP;
int i;
if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (!MLX5_CAP_GEN(priv->mdev, ets)) {
+ netdev_err(netdev, "%s, ets is not supported\n", __func__);
+ return;
+ }
+
if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev,
"%s, priority is out of range\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 33a399a8b5d5..cc80522b5854 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -170,7 +170,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
- NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+ NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
+ NUM_PCIE_COUNTERS(priv) +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
@@ -218,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc[i].format);
+
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -330,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i);
+
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i);
+
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
@@ -535,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- ch->max_combined = mlx5e_get_max_num_channels(priv->mdev);
+ ch->max_combined = priv->profile->max_nch(priv->mdev);
ch->combined_count = priv->params.num_channels;
}
@@ -543,7 +560,6 @@ static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
@@ -554,16 +570,6 @@ static int mlx5e_set_channels(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (ch->rx_count || ch->tx_count) {
- netdev_info(dev, "%s: separate rx/tx count not supported\n",
- __func__);
- return -EINVAL;
- }
- if (count > ncv) {
- netdev_info(dev, "%s: count (%d) > max (%d)\n",
- __func__, count, ncv);
- return -EINVAL;
- }
if (priv->params.num_channels == count)
return 0;
@@ -606,7 +612,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -631,7 +637,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int i;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
mutex_lock(&priv->state_lock);
@@ -991,15 +997,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
{
- struct mlx5_core_dev *mdev = priv->mdev;
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- int i;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+ int tt;
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
- mlx5e_build_tir_ctx_hash(tirc, priv);
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(tirc, 0, ctxlen);
+ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ }
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -1007,6 +1016,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ bool hash_changed = false;
void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1028,14 +1038,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
- if (key)
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != priv->params.rss_hfunc) {
+ priv->params.rss_hfunc = hfunc;
+ hash_changed = true;
+ }
+
+ if (key) {
memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key));
+ hash_changed = hash_changed ||
+ priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+ }
- if (hfunc != ETH_RSS_HASH_NO_CHANGE)
- priv->params.rss_hfunc = hfunc;
-
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ if (hash_changed)
+ mlx5e_modify_tirs_hash(priv, in, inlen);
mutex_unlock(&priv->state_lock);
@@ -1307,7 +1324,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
u32 mlx5_wol_mode;
if (!wol_supported)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (wol->wolopts & ~wol_supported)
return -EINVAL;
@@ -1437,7 +1454,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!rx_mode_changed)
return 0;
@@ -1459,28 +1476,19 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- int err = 0;
- bool reset;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
return -EINVAL;
}
- reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
- if (reset)
- mlx5e_close_locked(netdev);
-
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
+ mlx5e_modify_rx_cqe_compression_locked(priv, enable);
priv->params.rx_cqe_compress_def = enable;
- if (reset)
- err = mlx5e_open_locked(netdev);
- return err;
+ return 0;
}
static int mlx5e_handle_pflag(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de5d68f..f2762e45c8ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
- MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
+ MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
MLX5E_VLAN_RULE_TYPE_MATCH_VID,
};
@@ -172,19 +173,31 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
dest.ft = priv->fs.l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
rule_p = &priv->fs.vlan.untagged_rule;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
break;
- case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- rule_p = &priv->fs.vlan.any_vlan_rule;
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+ case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
+ rule_p = &priv->fs.vlan.any_cvlan_rule;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
+ rule_p = &priv->fs.vlan.any_svlan_rule;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->fs.vlan.active_vlans_rule[vid];
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
@@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
priv->fs.vlan.untagged_rule = NULL;
}
break;
- case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- if (priv->fs.vlan.any_vlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
- priv->fs.vlan.any_vlan_rule = NULL;
+ case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
+ if (priv->fs.vlan.any_cvlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
+ priv->fs.vlan.any_cvlan_rule = NULL;
+ }
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
+ if (priv->fs.vlan.any_svlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
+ priv->fs.vlan.any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
@@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
}
}
+static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+{
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+}
+
+static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ if (err)
+ return err;
+
+ return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+}
+
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.filter_disabled)
@@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_del_any_vid_rules(priv);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_add_any_vid_rules(priv);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
if (priv->fs.vlan.filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_add_any_vid_rules(priv);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
@@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
if (priv->fs.vlan.filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_del_any_vid_rules(priv);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
@@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
if (enable_promisc) {
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
if (!priv->fs.vlan.filter_disabled)
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
+ mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
@@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
if (!priv->fs.vlan.filter_disabled)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
+ mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
@@ -976,11 +1010,13 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 2
+#define MLX5E_NUM_VLAN_GROUPS 3
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
+#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
- MLX5E_VLAN_GROUP1_SIZE)
+ MLX5E_VLAN_GROUP1_SIZE +\
+ MLX5E_VLAN_GROUP2_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -991,7 +1027,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP0_SIZE;
@@ -1003,7 +1039,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
@@ -1089,7 +1136,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs.ns)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d088effd7160..d55fff0ba388 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_ETHTOOL);
if (!ns)
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
@@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
if ((fs->flow_type & FLOW_EXT) &&
(fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
first_vid, 0xfff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2b7dd315020c..3cce6281e075 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,6 +31,7 @@
*/
#include <net/tc_act/tc_gact.h>
+#include <linux/crash_dump.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
@@ -83,16 +84,20 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
priv->params.rq_wq_type = rq_type;
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.log_rq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
priv->params.mpwqe_log_stride_sz =
MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
- MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
- MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
+ MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
priv->params.mpwqe_log_stride_sz;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.log_rq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
}
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
BIT(priv->params.log_rq_size));
@@ -268,6 +273,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ }
+
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
out = pstats->per_prio_counters[prio];
@@ -291,11 +302,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ void *out;
+ u32 *in;
+
+ if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+ return;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
+ return;
+
+ out = pcie_stats->pcie_perf_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+ kvfree(in);
+}
+
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
- mlx5e_update_q_counter(priv);
- mlx5e_update_vport_counters(priv);
+ mlx5e_update_pcie_counters(priv);
mlx5e_update_pport_counters(priv);
+ mlx5e_update_vport_counters(priv);
+ mlx5e_update_q_counter(priv);
mlx5e_update_sw_counters(priv);
}
@@ -317,6 +351,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
+ struct ptp_clock_event ptp_event;
+ struct mlx5_eqe *eqe = NULL;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
@@ -326,7 +362,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
-
+ case MLX5_DEV_EVENT_PPS:
+ eqe = (struct mlx5_eqe *)param;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_event.index = eqe->data.pps.pin;
+ ptp_event.timestamp =
+ timecounter_cyc2time(&priv->tstamp.clock,
+ be64_to_cpu(eqe->data.pps.time_stamp));
+ mlx5e_pps_event_handler(vpriv, &ptp_event);
+ break;
default:
break;
}
@@ -343,9 +387,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
static inline int mlx5e_get_wqe_mtt_sz(void)
{
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
@@ -372,7 +413,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
cseg->imm = rq->mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
- ucseg->klm_octowords =
+ ucseg->xlt_octowords =
cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
ucseg->bsf_octowords =
cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
@@ -534,9 +575,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- rq->buff.map_dir = DMA_FROM_DEVICE;
- if (rq->xdp_prog)
+ if (rq->xdp_prog) {
rq->buff.map_dir = DMA_BIDIRECTIONAL;
+ rq->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ rq->rx_headroom = MLX5_RX_HEADROOM;
+ }
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -586,7 +631,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
- frag_sz = MLX5_RX_HEADROOM +
+ frag_sz = rq->rx_headroom +
byte_count /* packet data */ +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
frag_sz = SKB_DATA_ALIGN(frag_sz);
@@ -967,10 +1012,11 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->channel = c;
sq->tc = tc;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
+ err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
if (err)
return err;
+ sq->uar_map = sq->bfreg.map;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
@@ -979,17 +1025,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
goto err_unmap_free_uar;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- if (sq->uar.bf_map) {
+ if (sq->bfreg.wc)
set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
- sq->uar_map = sq->uar.bf_map;
- } else {
- sq->uar_map = sq->uar.map;
- }
+
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
- sq->min_inline_mode =
- MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
- param->min_inline_mode : 0;
+ sq->min_inline_mode = param->min_inline_mode;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
@@ -1012,7 +1053,7 @@ err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
err_unmap_free_uar:
- mlx5_unmap_free_uar(mdev, &sq->uar);
+ mlx5_free_bfreg(mdev, &sq->bfreg);
return err;
}
@@ -1024,7 +1065,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
- mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+ mlx5_free_bfreg(priv->mdev, &sq->bfreg);
}
static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
@@ -1053,12 +1094,15 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
0 : priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
- MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
+
+ if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+ MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
+
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, sq->uar.index);
+ MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
@@ -1216,7 +1260,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &mdev->mlx5e_res.cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1265,7 +1308,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
- MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -1472,6 +1515,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+ return is_kdump_kernel() ?
+ MLX5E_MIN_NUM_CHANNELS :
+ min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
@@ -1677,7 +1728,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
+ MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -1756,8 +1807,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
param->max_inline = priv->params.tx_max_inline;
- /* FOR XDP SQs will support only L2 inline mode */
- param->min_inline_mode = MLX5_INLINE_MODE_NONE;
+ param->min_inline_mode = priv->params.tx_min_inline_mode;
param->type = MLX5E_SQ_XDP;
}
@@ -2022,8 +2072,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+ enum mlx5e_traffic_types tt)
{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
MLX5_SET(tirc, tirc, rx_hash_fn,
mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2100,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, priv->params.toeplitz_hash_key, len);
}
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ default:
+ WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+ }
}
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2296,7 +2443,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &mdev->mlx5e_res.cq_uar;
cq->priv = priv;
@@ -2404,110 +2550,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
enum mlx5e_traffic_types tt)
{
- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- mlx5e_build_tir_ctx_hash(tirc, priv);
-
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV6_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV6_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
-
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- default:
- WARN_ONCE(true,
- "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
- }
+ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -2686,7 +2735,7 @@ mqprio:
return mlx5e_setup_tc(dev, tc->tc);
}
-static struct rtnl_link_stats64 *
+static void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2729,7 +2778,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->multicast =
VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
- return stats;
}
static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -2987,11 +3035,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- if (min_tx_rate)
- return -EOPNOTSUPP;
-
return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
- max_tx_rate);
+ max_tx_rate, min_tx_rate);
}
static int mlx5_vport_link2ifla(u8 esw_link)
@@ -3159,11 +3204,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
bool reset, was_opened;
int i;
- if (prog && prog->xdp_adjust_head) {
- netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
-
mutex_lock(&priv->state_lock);
if ((netdev->features & NETIF_F_LRO) && prog) {
@@ -3331,7 +3371,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
!MLX5_CAP_GEN(mdev, nic_flow_table) ||
!MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3383,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
< 3) {
mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
@@ -3432,22 +3472,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
}
-static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
- u8 *min_inline_mode)
-{
- switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
- case MLX5_CAP_INLINE_MODE_L2:
- *min_inline_mode = MLX5_INLINE_MODE_L2;
- break;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
- break;
- case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
- *min_inline_mode = MLX5_INLINE_MODE_NONE;
- break;
- }
-}
-
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
@@ -3481,7 +3505,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.lro_timeout =
mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
- priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_sq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
priv->params.rx_cqe_compress_def = false;
@@ -3507,7 +3533,11 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.tx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
+ mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
+ if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
+ !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+
priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 850378893b25..2c864574a9d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -374,13 +374,12 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
return -EINVAL;
}
-static struct rtnl_link_stats64 *
+static void
mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
- return stats;
}
static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0e2fb3ed1790..b039b87742a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -33,6 +33,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include "en.h"
#include "en_tc.h"
@@ -155,17 +156,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
}
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
{
bool was_opened;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
return;
- mutex_lock(&priv->state_lock);
-
if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
- goto unlock;
+ return;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
@@ -176,8 +175,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
if (was_opened)
mlx5e_open_locked(priv->netdev);
-unlock:
- mutex_unlock(&priv->state_lock);
}
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
@@ -193,6 +190,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
+ if (unlikely(page_is_pfmemalloc(dma_info->page)))
+ return false;
+
cache->page_cache[cache->tail] = *dma_info;
cache->tail = tail_next;
return true;
@@ -264,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
return -ENOMEM;
- wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+ wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom);
return 0;
}
@@ -644,10 +644,9 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
-static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di,
- unsigned int data_offset,
- int len)
+ const struct xdp_buff *xdp)
{
struct mlx5e_sq *sq = &rq->channel->xdp_sq;
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -658,10 +657,18 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
+ u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
- dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
- unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
- void *data = page_address(di->page) + data_offset;
+ ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
+ dma_addr_t dma_addr = di->addr + data_offset;
+ unsigned int dma_len = xdp->data_end - xdp->data;
+
+ if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
+ MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
+ rq->stats.xdp_drop++;
+ mlx5e_page_release(rq, di, true);
+ return false;
+ }
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
if (sq->db.xdp.doorbell) {
@@ -671,7 +678,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
}
rq->stats.xdp_tx_full++;
mlx5e_page_release(rq, di, true);
- return;
+ return false;
}
dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
@@ -679,11 +686,17 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
memset(wqe, 0, sizeof(*wqe));
- /* copy the inline part */
- memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
- eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
+ /* copy the inline part if required */
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ dma_len -= MLX5E_XDP_MIN_INLINE;
+ dma_addr += MLX5E_XDP_MIN_INLINE;
- dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
+ ds_cnt += MLX5E_XDP_IHS_DS_COUNT;
+ dseg++;
+ }
/* write the dma part */
dseg->addr = cpu_to_be64(dma_addr);
@@ -691,7 +704,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
dseg->lkey = sq->mkey_be;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->db.xdp.di[pi] = *di;
wi->opcode = MLX5_OPCODE_SEND;
@@ -700,32 +713,39 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
sq->db.xdp.doorbell = true;
rq->stats.xdp_tx++;
+ return true;
}
/* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
- const struct bpf_prog *prog,
- struct mlx5e_dma_info *di,
- void *data, u16 len)
+static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
{
+ const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
struct xdp_buff xdp;
u32 act;
if (!prog)
return false;
- xdp.data = data;
- xdp.data_end = xdp.data + len;
+ xdp.data = va + *rx_headroom;
+ xdp.data_end = xdp.data + *len;
+ xdp.data_hard_start = va;
+
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
case XDP_PASS:
+ *rx_headroom = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
return false;
case XDP_TX:
- mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+ if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
+ trace_xdp_exception(rq->netdev, prog, act);
return true;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP:
rq->stats.xdp_drop++;
mlx5e_page_release(rq, di, true);
@@ -740,15 +760,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_dma_info *di;
struct sk_buff *skb;
void *va, *data;
+ u16 rx_headroom = rq->rx_headroom;
bool consumed;
di = &rq->dma_info[wqe_counter];
va = page_address(di->page);
- data = va + MLX5_RX_HEADROOM;
+ data = va + rx_headroom;
dma_sync_single_range_for_cpu(rq->pdev,
di->addr,
- MLX5_RX_HEADROOM,
+ rx_headroom,
rq->buff.wqe_sz,
DMA_FROM_DEVICE);
prefetch(data);
@@ -760,8 +781,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
- cqe_bcnt);
+ consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
rcu_read_unlock();
if (consumed)
return NULL; /* page/packet was consumed by XDP */
@@ -777,7 +797,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
page_ref_inc(di->page);
mlx5e_page_release(rq, di, true);
- skb_reserve(skb, MLX5_RX_HEADROOM);
+ skb_reserve(skb, rx_headroom);
skb_put(skb, cqe_bcnt);
return skb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index ba5db1dd23a9..53e4992d6511 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
- be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+ be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = {
#define PPORT_2819_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PHY_STATISTICAL_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PER_PRIO_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -215,6 +221,7 @@ struct mlx5e_pport_stats {
__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
};
static const struct counter_desc pport_802_3_stats_desc[] = {
@@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = {
{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
};
+static const struct counter_desc pport_phy_statistical_stats_desc[] = {
+ { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
+};
+
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
@@ -276,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
+#define PCIE_PERF_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+#define PCIE_PERF_GET(pcie_stats, c) \
+ MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
+ counter_set.pcie_perf_cntrs_grp_data_layout.c)
+
+struct mlx5e_pcie_stats {
+ __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
+};
+
+static const struct counter_desc pcie_perf_stats_desc[] = {
+ { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+ { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -360,15 +387,23 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
+ (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
+ MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+#define NUM_PCIE_PERF_COUNTERS(priv) \
+ (ARRAY_SIZE(pcie_perf_stats_desc) * \
+ MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \
+#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
NUM_PPORT_2863_COUNTERS + \
NUM_PPORT_2819_COUNTERS + \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
+#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
@@ -378,6 +413,7 @@ struct mlx5e_stats {
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
+ struct mlx5e_pcie_stats pcie;
};
static const struct counter_desc mlx5e_pme_status_desc[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 118cea5e5489..44406a5ec15d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -298,6 +298,32 @@ vxlan_match_offload_err:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+ } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ f->mask);
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
}
/* Enforce DMAC when offloading incoming tunneled flows.
@@ -358,12 +384,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
f->key);
switch (key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
if (parse_tunnel_attr(priv, spec, f))
return -EOPNOTSUPP;
break;
- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- netdev_warn(priv->netdev,
- "IPv6 tunnel decap offload isn't supported\n");
default:
return -EOPNOTSUPP;
}
@@ -460,8 +484,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_id || mask->vlan_priority) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
@@ -644,15 +668,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
-static inline int cmp_encap_info(struct mlx5_encap_info *a,
- struct mlx5_encap_info *b)
+static inline int cmp_encap_info(struct ip_tunnel_key *a,
+ struct ip_tunnel_key *b)
{
return memcmp(a, b, sizeof(*a));
}
-static inline int hash_encap_info(struct mlx5_encap_info *info)
+static inline int hash_encap_info(struct ip_tunnel_key *key)
{
- return jhash(info, sizeof(*info), 0);
+ return jhash(key, sizeof(*key), 0);
}
static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
@@ -660,38 +684,76 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi4 *fl4,
struct neighbour **out_n,
- __be32 *saddr,
int *out_ttl)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rtable *rt;
struct neighbour *n = NULL;
- int ttl;
#if IS_ENABLED(CONFIG_INET)
+ int ret;
+
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
#else
return -EOPNOTSUPP;
#endif
+ /* if the egress device isn't on the same HW e-switch, we use the uplink */
+ if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
+ *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ else
+ *out_dev = rt->dst.dev;
- if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
- pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
- ip_rt_put(rt);
- return -EOPNOTSUPP;
- }
-
- ttl = ip4_dst_hoplimit(&rt->dst);
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
if (!n)
return -ENOMEM;
*out_n = n;
- *saddr = fl4->saddr;
- *out_ttl = ttl;
- *out_dev = rt->dst.dev;
+ return 0;
+}
+
+static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ int *out_ttl)
+{
+ struct neighbour *n = NULL;
+ struct dst_entry *dst;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int ret;
+
+ dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
+ ret = dst->error;
+ if (ret) {
+ dst_release(dst);
+ return ret;
+ }
+
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+ /* if the egress device isn't on the same HW e-switch, we use the uplink */
+ if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
+ *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ else
+ *out_dev = dst->dev;
+#else
+ return -EOPNOTSUPP;
+#endif
+
+ n = dst_neigh_lookup(dst, &fl6->daddr);
+ dst_release(dst);
+ if (!n)
+ return -ENOMEM;
+ *out_n = n;
return 0;
}
@@ -731,19 +793,52 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
return encap_size;
}
+static int gen_vxlan_header_ipv6(struct net_device *out_dev,
+ char buf[],
+ unsigned char h_dest[ETH_ALEN],
+ int ttl,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr,
+ __be16 udp_dst_port,
+ __be32 vx_vni)
+{
+ int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
+ struct ethhdr *eth = (struct ethhdr *)buf;
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
+ struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
+ struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+
+ memset(buf, 0, encap_size);
+
+ ether_addr_copy(eth->h_dest, h_dest);
+ ether_addr_copy(eth->h_source, out_dev->dev_addr);
+ eth->h_proto = htons(ETH_P_IPV6);
+
+ ip6_flow_hdr(ip6h, 0, 0);
+ /* the HW fills up ipv6 payload len */
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+
+ udp->dest = udp_dst_port;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(vx_vni);
+
+ return encap_size;
+}
+
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5_encap_entry *e,
struct net_device **out_dev)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ int encap_size, ttl, err;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
char *encap_header;
- int encap_size;
- __be32 saddr = 0;
- int ttl = 0;
- int err;
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
if (!encap_header)
@@ -752,37 +847,108 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
fl4.flowi4_proto = IPPROTO_UDP;
- fl4.fl4_dport = e->tun_info.tp_dst;
+ fl4.fl4_dport = tun_key->tp_dst;
break;
default:
err = -EOPNOTSUPP;
goto out;
}
- fl4.daddr = e->tun_info.daddr;
+ fl4.flowi4_tos = tun_key->tos;
+ fl4.daddr = tun_key->u.ipv4.dst;
+ fl4.saddr = tun_key->u.ipv4.src;
err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
- &fl4, &n, &saddr, &ttl);
+ &fl4, &n, &ttl);
if (err)
goto out;
+ if (!(n->nud_state & NUD_VALID)) {
+ pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
e->n = n;
e->out_dev = *out_dev;
+ neigh_ha_snapshot(e->h_dest, n, *out_dev);
+
+ switch (e->tunnel_type) {
+ case MLX5_HEADER_TYPE_VXLAN:
+ encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+ e->h_dest, ttl,
+ fl4.daddr,
+ fl4.saddr, tun_key->tp_dst,
+ tunnel_id_to_key32(tun_key->tun_id));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+ encap_size, encap_header, &e->encap_id);
+out:
+ if (err && n)
+ neigh_release(n);
+ kfree(encap_header);
+ return err;
+}
+
+static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5_encap_entry *e,
+ struct net_device **out_dev)
+
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ int encap_size, err, ttl = 0;
+ struct neighbour *n = NULL;
+ struct flowi6 fl6 = {};
+ char *encap_header;
+
+ encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+ if (!encap_header)
+ return -ENOMEM;
+
+ switch (e->tunnel_type) {
+ case MLX5_HEADER_TYPE_VXLAN:
+ fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_dport = tun_key->tp_dst;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ fl6.daddr = tun_key->u.ipv6.dst;
+ fl6.saddr = tun_key->u.ipv6.src;
+
+ err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
+ &fl6, &n, &ttl);
+ if (err)
+ goto out;
+
if (!(n->nud_state & NUD_VALID)) {
- pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+ pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
err = -EOPNOTSUPP;
goto out;
}
+ e->n = n;
+ e->out_dev = *out_dev;
+
neigh_ha_snapshot(e->h_dest, n, *out_dev);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+ encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
e->h_dest, ttl,
- e->tun_info.daddr,
- saddr, e->tun_info.tp_dst,
- e->tun_info.tun_id);
+ &fl6.daddr,
+ &fl6.saddr, tun_key->tp_dst,
+ tunnel_id_to_key32(tun_key->tun_id));
break;
default:
err = -EOPNOTSUPP;
@@ -806,13 +972,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info);
struct ip_tunnel_key *key = &tun_info->key;
- struct mlx5_encap_info info;
struct mlx5_encap_entry *e;
struct net_device *out_dev;
+ int tunnel_type, err = -EOPNOTSUPP;
uintptr_t hash_key;
bool found = false;
- int tunnel_type;
- int err;
/* udp dst port must be set */
if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
@@ -828,8 +992,6 @@ vxlan_encap_offload_err:
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- info.tp_dst = key->tp_dst;
- info.tun_id = tunnel_id_to_key32(key->tun_id);
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
netdev_warn(priv->netdev,
@@ -837,22 +999,11 @@ vxlan_encap_offload_err:
return -EOPNOTSUPP;
}
- switch (family) {
- case AF_INET:
- info.daddr = key->u.ipv4.dst;
- break;
- case AF_INET6:
- netdev_warn(priv->netdev,
- "IPv6 tunnel encap offload isn't supported\n");
- default:
- return -EOPNOTSUPP;
- }
-
- hash_key = hash_encap_info(&info);
+ hash_key = hash_encap_info(key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- if (!cmp_encap_info(&e->tun_info, &info)) {
+ if (!cmp_encap_info(&e->tun_info.key, key)) {
found = true;
break;
}
@@ -867,11 +1018,15 @@ vxlan_encap_offload_err:
if (!e)
return -ENOMEM;
- e->tun_info = info;
+ e->tun_info = *tun_info;
e->tunnel_type = tunnel_type;
INIT_LIST_HEAD(&e->flows);
- err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ if (family == AF_INET)
+ err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ else if (family == AF_INET6)
+ err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
+
if (err)
goto out_err;
@@ -1085,10 +1240,14 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ preempt_disable();
+
tcf_exts_to_list(f->exts, &actions);
list_for_each_entry(a, &actions, list)
tcf_action_stats_update(a, bytes, packets, lastuse);
+ preempt_enable();
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cfb68371c397..f193128bac4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -154,6 +154,8 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
int hlen;
switch (mode) {
+ case MLX5_INLINE_MODE_NONE:
+ return 0;
case MLX5_INLINE_MODE_TCP_UDP:
hlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
@@ -283,21 +285,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
wi->num_bytes = num_bytes;
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
- &skb_len);
- ihs += VLAN_HLEN;
- } else {
- memcpy(eseg->inline_hdr_start, skb_data, ihs);
- mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ if (ihs) {
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
+ ihs += VLAN_HLEN;
+ } else {
+ memcpy(eseg->inline_hdr.start, skb_data, ihs);
+ mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ }
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
+ } else if (skb_vlan_tag_present(skb)) {
+ eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+ eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
}
- eseg->inline_hdr_sz = cpu_to_be16(ihs);
-
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
- MLX5_SEND_WQE_DS);
- dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+ dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
wi->num_dma = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 8ffcc8808e50..ea5d8d37a75c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -54,6 +54,7 @@ enum {
MLX5_NUM_SPARE_EQE = 0x80,
MLX5_NUM_ASYNC_EQE = 0x100,
MLX5_NUM_CMD_EQE = 32,
+ MLX5_NUM_PF_DRAIN = 64,
};
enum {
@@ -153,6 +154,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
return "MLX5_EVENT_TYPE_PAGE_FAULT";
+ case MLX5_EVENT_TYPE_PPS_EVENT:
+ return "MLX5_EVENT_TYPE_PPS_EVENT";
default:
return "Unrecognized event";
}
@@ -188,10 +191,193 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm)
mb();
}
-static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static void eqe_pf_action(struct work_struct *work)
+{
+ struct mlx5_pagefault *pfault = container_of(work,
+ struct mlx5_pagefault,
+ work);
+ struct mlx5_eq *eq = pfault->eq;
+
+ mlx5_core_page_fault(eq->dev, pfault);
+ mempool_free(pfault, eq->pf_ctx.pool);
+}
+
+static void eq_pf_process(struct mlx5_eq *eq)
+{
+ struct mlx5_core_dev *dev = eq->dev;
+ struct mlx5_eqe_page_fault *pf_eqe;
+ struct mlx5_pagefault *pfault;
+ struct mlx5_eqe *eqe;
+ int set_ci = 0;
+
+ while ((eqe = next_eqe_sw(eq))) {
+ pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
+ if (!pfault) {
+ schedule_work(&eq->pf_ctx.work);
+ break;
+ }
+
+ dma_rmb();
+ pf_eqe = &eqe->data.page_fault;
+ pfault->event_subtype = eqe->sub_type;
+ pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
+
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
+ eqe->sub_type, pfault->bytes_committed);
+
+ switch (eqe->sub_type) {
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ /* RDMA based event */
+ pfault->type =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
+ pfault->token =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) &
+ MLX5_24BIT_MASK;
+ pfault->rdma.r_key =
+ be32_to_cpu(pf_eqe->rdma.r_key);
+ pfault->rdma.packet_size =
+ be16_to_cpu(pf_eqe->rdma.packet_length);
+ pfault->rdma.rdma_op_len =
+ be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+ pfault->rdma.rdma_va =
+ be64_to_cpu(pf_eqe->rdma.rdma_va);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
+ pfault->type, pfault->token,
+ pfault->rdma.r_key);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
+ pfault->rdma.rdma_op_len,
+ pfault->rdma.rdma_va);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ /* WQE based event */
+ pfault->type =
+ be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+ pfault->token =
+ be32_to_cpu(pf_eqe->wqe.token);
+ pfault->wqe.wq_num =
+ be32_to_cpu(pf_eqe->wqe.pftype_wq) &
+ MLX5_24BIT_MASK;
+ pfault->wqe.wqe_index =
+ be16_to_cpu(pf_eqe->wqe.wqe_index);
+ pfault->wqe.packet_size =
+ be16_to_cpu(pf_eqe->wqe.packet_length);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+ pfault->type, pfault->token,
+ pfault->wqe.wq_num,
+ pfault->wqe.wqe_index);
+ break;
+
+ default:
+ mlx5_core_warn(dev,
+ "Unsupported page fault event sub-type: 0x%02hhx\n",
+ eqe->sub_type);
+ /* Unsupported page faults should still be
+ * resolved by the page fault handler
+ */
+ }
+
+ pfault->eq = eq;
+ INIT_WORK(&pfault->work, eqe_pf_action);
+ queue_work(eq->pf_ctx.wq, &pfault->work);
+
+ ++eq->cons_index;
+ ++set_ci;
+
+ if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
+ eq_update_ci(eq, 0);
+ set_ci = 0;
+ }
+ }
+
+ eq_update_ci(eq, 1);
+}
+
+static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
+{
+ struct mlx5_eq *eq = eq_ptr;
+ unsigned long flags;
+
+ if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
+ eq_pf_process(eq);
+ spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
+ } else {
+ schedule_work(&eq->pf_ctx.work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* mempool_refill() was proposed but unfortunately wasn't accepted
+ * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
+ * Chip workaround.
+ */
+static void mempool_refill(mempool_t *pool)
+{
+ while (pool->curr_nr < pool->min_nr)
+ mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
+}
+
+static void eq_pf_action(struct work_struct *work)
+{
+ struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
+
+ mempool_refill(eq->pf_ctx.pool);
+
+ spin_lock_irq(&eq->pf_ctx.lock);
+ eq_pf_process(eq);
+ spin_unlock_irq(&eq->pf_ctx.lock);
+}
+
+static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
+{
+ spin_lock_init(&pf_ctx->lock);
+ INIT_WORK(&pf_ctx->work, eq_pf_action);
+
+ pf_ctx->wq = alloc_ordered_workqueue(name,
+ WQ_MEM_RECLAIM);
+ if (!pf_ctx->wq)
+ return -ENOMEM;
+
+ pf_ctx->pool = mempool_create_kmalloc_pool
+ (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
+ if (!pf_ctx->pool)
+ goto err_wq;
+
+ return 0;
+err_wq:
+ destroy_workqueue(pf_ctx->wq);
+ return -ENOMEM;
+}
+
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+ u32 wq_num, u8 type, int error)
+{
+ u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
+
+ MLX5_SET(page_fault_resume_in, in, opcode,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ MLX5_SET(page_fault_resume_in, in, error, !!error);
+ MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
+ MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
+ MLX5_SET(page_fault_resume_in, in, token, token);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
+#endif
+
+static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
{
+ struct mlx5_eq *eq = eq_ptr;
+ struct mlx5_core_dev *dev = eq->dev;
struct mlx5_eqe *eqe;
- int eqes_found = 0;
int set_ci = 0;
u32 cqn = -1;
u32 rsn;
@@ -276,12 +462,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
break;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- case MLX5_EVENT_TYPE_PAGE_FAULT:
- mlx5_eq_pagefault(dev, eqe);
- break;
-#endif
-
#ifdef CONFIG_MLX5_CORE_EN
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
@@ -292,6 +472,10 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_port_module_event(dev, eqe);
break;
+ case MLX5_EVENT_TYPE_PPS_EVENT:
+ if (dev->event)
+ dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
+ break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -299,7 +483,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
++eq->cons_index;
- eqes_found = 1;
++set_ci;
/* The HCA will think the queue has overflowed if we
@@ -319,17 +502,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (cqn != -1)
tasklet_schedule(&eq->tasklet_ctx.task);
- return eqes_found;
-}
-
-static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
-{
- struct mlx5_eq *eq = eq_ptr;
- struct mlx5_core_dev *dev = eq->dev;
-
- mlx5_eq_int(dev, eq);
-
- /* MSI-X vectors always belong to us */
return IRQ_HANDLED;
}
@@ -345,22 +517,32 @@ static void init_eq_buf(struct mlx5_eq *eq)
}
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name, struct mlx5_uar *uar)
+ int nent, u64 mask, const char *name,
+ enum mlx5_eq_type type)
{
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
+ irq_handler_t handler;
__be64 *pas;
void *eqc;
int inlen;
u32 *in;
int err;
+ eq->type = type;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (type == MLX5_EQ_TYPE_PF)
+ handler = mlx5_eq_pf_int;
+ else
+#endif
+ handler = mlx5_eq_int;
+
init_eq_buf(eq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
@@ -380,7 +562,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
- MLX5_SET(eqc, eqc, uar_page, uar->index);
+ MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
@@ -395,8 +577,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
- eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(eq->irqn, mlx5_msix_handler, 0,
+ eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
+ err = request_irq(eq->irqn, handler, 0,
priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -405,11 +587,20 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_irq;
- INIT_LIST_HEAD(&eq->tasklet_ctx.list);
- INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
- spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (type == MLX5_EQ_TYPE_PF) {
+ err = init_pf_ctx(&eq->pf_ctx, name);
+ if (err)
+ goto err_irq;
+ } else
+#endif
+ {
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+ (unsigned long)&eq->tasklet_ctx);
+ }
/* EQs are created in ARMED state
*/
@@ -444,7 +635,16 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
synchronize_irq(eq->irqn);
- tasklet_disable(&eq->tasklet_ctx.task);
+
+ if (eq->type == MLX5_EQ_TYPE_COMP) {
+ tasklet_disable(&eq->tasklet_ctx.task);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ } else if (eq->type == MLX5_EQ_TYPE_PF) {
+ cancel_work_sync(&eq->pf_ctx.work);
+ destroy_workqueue(eq->pf_ctx.wq);
+ mempool_destroy(eq->pf_ctx.pool);
+#endif
+ }
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -479,8 +679,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
- if (MLX5_CAP_GEN(dev, pg))
- async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
MLX5_CAP_GEN(dev, vport_group_manager) &&
@@ -492,9 +690,12 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else
mlx5_core_dbg(dev, "port_module_event is not set\n");
+ if (MLX5_CAP_GEN(dev, pps))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
- "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
+ "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
return err;
@@ -504,7 +705,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
MLX5_NUM_ASYNC_EQE, async_event_mask,
- "mlx5_async_eq", &dev->priv.uuari.uars[0]);
+ "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
goto err1;
@@ -514,13 +715,33 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
MLX5_EQ_VEC_PAGES,
/* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
- &dev->priv.uuari.uars[0]);
+ MLX5_EQ_TYPE_ASYNC);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
goto err2;
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_create_map_eq(dev, &table->pfault_eq,
+ MLX5_EQ_VEC_PFAULT,
+ MLX5_NUM_ASYNC_EQE,
+ 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
+ "mlx5_page_fault_eq",
+ MLX5_EQ_TYPE_PF);
+ if (err) {
+ mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
+ err);
+ goto err3;
+ }
+ }
+
return err;
+err3:
+ mlx5_destroy_unmap_eq(dev, &table->pages_eq);
+#else
+ return err;
+#endif
err2:
mlx5_destroy_unmap_eq(dev, &table->async_eq);
@@ -536,6 +757,14 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
+ if (err)
+ return err;
+ }
+#endif
+
err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f14d9c9ba773..fcd5bc7e31db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
@@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
if (vport->info.vlan || vport->info.qos)
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
@@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
@@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
}
static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
- u32 initial_max_rate)
+ u32 initial_max_rate, u32 initial_bw_share)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
initial_max_rate);
+ MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
}
static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
- u32 max_rate)
+ u32 max_rate, u32 bw_share)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
max_rate);
+ MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_apply_vport_conf(esw, vport);
/* Attach vport to the eswitch rate limiter */
- if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate))
+ if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
+ vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
@@ -1630,7 +1634,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
@@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
+ ivi->min_tx_rate = evport->info.min_rate;
ivi->max_tx_rate = evport->info.max_rate;
mutex_unlock(&esw->state_lock);
@@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
return 0;
}
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
- int vport, u32 max_rate)
+static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
+{
+ u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ struct mlx5_vport *evport;
+ u32 max_guarantee = 0;
+ int i;
+
+ for (i = 0; i <= esw->total_vports; i++) {
+ evport = &esw->vports[i];
+ if (!evport->enabled || evport->info.min_rate < max_guarantee)
+ continue;
+ max_guarantee = evport->info.min_rate;
+ }
+
+ return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+}
+
+static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+{
+ u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ struct mlx5_vport *evport;
+ u32 vport_max_rate;
+ u32 vport_min_rate;
+ u32 bw_share;
+ int err;
+ int i;
+
+ for (i = 0; i <= esw->total_vports; i++) {
+ evport = &esw->vports[i];
+ if (!evport->enabled)
+ continue;
+ vport_min_rate = evport->info.min_rate;
+ vport_max_rate = evport->info.max_rate;
+ bw_share = MLX5_MIN_BW_SHARE;
+
+ if (vport_min_rate)
+ bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
+ divider,
+ fw_max_bw_share);
+
+ if (bw_share == evport->qos.bw_share)
+ continue;
+
+ err = esw_vport_qos_config(esw, i, vport_max_rate,
+ bw_share);
+ if (!err)
+ evport->qos.bw_share = bw_share;
+ else
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
+ u32 max_rate, u32 min_rate)
{
+ u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
+ fw_max_bw_share >= MLX5_MIN_BW_SHARE;
+ bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
struct mlx5_vport *evport;
+ u32 previous_min_rate;
+ u32 divider;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
+ return -EOPNOTSUPP;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- err = esw_vport_qos_config(esw, vport, max_rate);
+
+ if (min_rate == evport->info.min_rate)
+ goto set_max_rate;
+
+ previous_min_rate = evport->info.min_rate;
+ evport->info.min_rate = min_rate;
+ divider = calculate_vports_min_rate_divider(esw);
+ err = normalize_vports_min_rate(esw, divider);
+ if (err) {
+ evport->info.min_rate = previous_min_rate;
+ goto unlock;
+ }
+
+set_max_rate:
+ if (max_rate == evport->info.max_rate)
+ goto unlock;
+
+ err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
if (!err)
evport->info.max_rate = max_rate;
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 8661dd3f542c..5b78883d5654 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -36,6 +36,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <net/devlink.h>
+#include <net/ip_tunnels.h>
#include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -49,6 +50,11 @@
#define FDB_UPLINK_VPORT 0xffff
+#define MLX5_MIN_BW_SHARE 1
+
+#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
+ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
+
/* L2 -mac address based- hash helpers */
struct l2addr_node {
struct hlist_node hlist;
@@ -115,6 +121,7 @@ struct mlx5_vport_info {
u8 qos;
u64 node_guid;
int link_state;
+ u32 min_rate;
u32 max_rate;
bool spoofchk;
bool trusted;
@@ -137,6 +144,7 @@ struct mlx5_vport {
struct {
bool enabled;
u32 esw_tsar_ix;
+ u32 bw_share;
} qos;
bool enabled;
@@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk);
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting);
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
- int vport, u32 max_rate);
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
+ u32 max_rate, u32 min_rate);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -274,18 +282,12 @@ enum {
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
-struct mlx5_encap_info {
- __be32 daddr;
- __be32 tun_id;
- __be16 tp_dst;
-};
-
struct mlx5_encap_entry {
struct hlist_node encap_hlist;
struct list_head flows;
u32 encap_id;
struct neighbour *n;
- struct mlx5_encap_info tun_info;
+ struct ip_tunnel_info tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 03293ed1cc22..4f5b0d47d5f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
return 0;
out_notsupp:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -402,19 +402,18 @@ out:
}
#define MAX_PF_SQ 256
-#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
#define ESW_OFFLOADS_NUM_GROUPS 4
static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int table_size, ix, esw_size, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
struct mlx5_flow_group *g;
u32 *flow_group_in;
void *match_criteria;
- int table_size, ix, err = 0;
u32 flags = 0;
flow_group_in = mlx5_vzalloc(inlen);
@@ -424,18 +423,23 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
+ err = -EOPNOTSUPP;
goto ns_err;
}
- esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
+ MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
+
+ esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
+ 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
- ESW_OFFLOADS_NUM_ENTRIES,
+ esw_size,
ESW_OFFLOADS_NUM_GROUPS, 0,
flags);
if (IS_ERR(fdb)) {
@@ -535,7 +539,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +659,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +678,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
int vport;
int err;
+ /* disable PF RoCE so missed packets don't go through RoCE steering */
+ mlx5_dev_list_lock();
+ mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
err = esw_create_offloads_fdb_table(esw, nvports);
if (err)
- return err;
+ goto create_fdb_err;
err = esw_create_offloads_table(esw);
if (err)
@@ -696,11 +705,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
goto err_reps;
}
- /* disable PF RoCE so missed packets don't go through RoCE steering */
- mlx5_dev_list_lock();
- mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
return 0;
err_reps:
@@ -717,6 +721,13 @@ create_fg_err:
create_ft_err:
esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+ /* enable back PF RoCE */
+ mlx5_dev_list_lock();
+ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
return err;
}
@@ -724,11 +735,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
- /* enable back PF RoCE */
- mlx5_dev_list_lock();
- mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
@@ -738,6 +744,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
}
+ /* enable back PF RoCE */
+ mlx5_dev_list_lock();
+ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ecd8056..b64a781c7e85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
opmod = 1;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
@@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
int err;
u32 *in;
- if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
+ if (size > max_encap_size) {
+ mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
+ size, max_encap_size);
return -EINVAL;
+ }
- in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ac7a2fc916c..ce3d92106386 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1665,7 +1665,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
#define FLOW_TABLE_BIT_SZ 1
#define GET_FLOW_TABLE_CAP(dev, offset) \
- ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
+ ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
offset / 32)) >> \
(32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
- if (!ns)
+ if (WARN_ON(!ns))
return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 5718aada6605..d0bbefa08af7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -91,6 +91,20 @@ out:
}
EXPORT_SYMBOL(mlx5_core_query_vendor_id);
+static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
+{
+ return mlx5_query_pcam_reg(dev, dev->caps.pcam,
+ MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
+ MLX5_PCAM_REGS_5000_TO_507F);
+}
+
+static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
+{
+ return mlx5_query_mcam_reg(dev, dev->caps.mcam,
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
+ MLX5_MCAM_REGS_FIRST_128);
+}
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
@@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, pcam_reg))
+ mlx5_get_pcam_reg(dev);
+
+ if (MLX5_CAP_GEN(dev, mcam_reg))
+ mlx5_get_mcam_reg(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 5bcf93422ee0..d0515391d33b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd)
}
}
-static u16 get_maj(u32 fw)
-{
- return fw >> 28;
-}
-
-static u16 get_min(u32 fw)
-{
- return fw >> 16 & 0xfff;
-}
-
-static u16 get_sub(u32 fw)
-{
- return fw & 0xffff;
-}
-
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev)
dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
- fw = ioread32be(&h->fw_ver);
- sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+ sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+ fw = ioread32be(&h->fw_ver);
+ dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw);
}
static unsigned long get_next_poll_jiffies(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d01e9f21d469..c4242a4e8130 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -152,6 +152,26 @@ static struct mlx5_profile profile[] = {
.size = 8,
.limit = 4
},
+ .mr_cache[16] = {
+ .size = 8,
+ .limit = 4
+ },
+ .mr_cache[17] = {
+ .size = 8,
+ .limit = 4
+ },
+ .mr_cache[18] = {
+ .size = 8,
+ .limit = 4
+ },
+ .mr_cache[19] = {
+ .size = 4,
+ .limit = 2
+ },
+ .mr_cache[20] = {
+ .size = 4,
+ .limit = 2
+ },
},
};
@@ -398,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
switch (cap_mode) {
case HCA_CAP_OPMOD_GET_MAX:
- memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ memcpy(dev->caps.hca_max[cap_type], hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
case HCA_CAP_OPMOD_GET_CUR:
- memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ memcpy(dev->caps.hca_cur[cap_type], hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
default:
@@ -493,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
- memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
MLX5_ST_SZ_BYTES(cmd_hca_cap));
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
@@ -517,8 +537,18 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+ /* If the HCA supports 4K UARs use it */
+ if (MLX5_CAP_GEN_MAX(dev, uar_4k))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
+
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+ if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
+ MLX5_SET(cmd_hca_cap,
+ set_hca_cap,
+ cache_line_128byte,
+ cache_line_size() == 128 ? 1 : 0);
+
err = set_caps(dev, set_ctx, set_sz,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
@@ -739,7 +769,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
- name, &dev->priv.uuari.uars[0]);
+ name, MLX5_EQ_TYPE_COMP);
if (err) {
kfree(eq);
goto clean;
@@ -807,7 +837,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
@@ -899,8 +929,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out;
}
- MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
-
err = mlx5_init_cq_table(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize cq table\n");
@@ -1079,8 +1107,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_cleanup_once;
}
- err = mlx5_alloc_uuars(dev, &priv->uuari);
- if (err) {
+ dev->priv.uar = mlx5_get_uars_page(dev);
+ if (!dev->priv.uar) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
goto err_disable_msix;
}
@@ -1088,7 +1116,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_start_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
- goto err_free_uar;
+ goto err_put_uars;
}
err = alloc_comp_eqs(dev);
@@ -1154,8 +1182,8 @@ err_affinity_hints:
err_stop_eqs:
mlx5_stop_eqs(dev);
-err_free_uar:
- mlx5_free_uuars(dev, &priv->uuari);
+err_put_uars:
+ mlx5_put_uars_page(dev, priv->uar);
err_disable_msix:
mlx5_disable_msix(dev);
@@ -1218,7 +1246,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
- mlx5_free_uuars(dev, &priv->uuari);
+ mlx5_put_uars_page(dev, priv->uar);
mlx5_disable_msix(dev);
if (cleanup)
mlx5_cleanup_once(dev);
@@ -1284,10 +1312,24 @@ static int init_one(struct pci_dev *pdev,
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->pci_status_mutex);
mutex_init(&dev->intf_state_mutex);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ err = init_srcu_struct(&priv->pfault_srcu);
+ if (err) {
+ dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
+ err);
+ goto clean_dev;
+ }
+#endif
+ mutex_init(&priv->bfregs.reg_head.lock);
+ mutex_init(&priv->bfregs.wc_head.lock);
+ INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
+ INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
+
err = mlx5_pci_init(dev, priv);
if (err) {
dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
- goto clean_dev;
+ goto clean_srcu;
}
err = mlx5_health_init(dev);
@@ -1304,9 +1346,7 @@ static int init_one(struct pci_dev *pdev,
goto clean_health;
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
+ request_module_nowait(MLX5_IB_MOD);
err = devlink_register(devlink, &pdev->dev);
if (err)
@@ -1321,7 +1361,11 @@ clean_health:
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
+clean_srcu:
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ cleanup_srcu_struct(&priv->pfault_srcu);
clean_dev:
+#endif
pci_set_drvdata(pdev, NULL);
devlink_free(devlink);
@@ -1346,6 +1390,9 @@ static void remove_one(struct pci_dev *pdev)
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ cleanup_srcu_struct(&priv->pfault_srcu);
+#endif
pci_set_drvdata(pdev, NULL);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index d4a99c9757cb..b3dabe6e8836 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -86,6 +86,8 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
+void mlx5_core_page_fault(struct mlx5_core_dev *dev,
+ struct mlx5_pagefault *pfault);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
@@ -111,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
void mlx5_cq_tasklet_cb(unsigned long data);
+int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
+ u8 access_reg_group);
+int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
+ u8 access_reg_group);
+
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
@@ -136,6 +143,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
+int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
+int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
+int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
+int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d232a70..141583daf5a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -74,6 +74,30 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
+int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
+ u8 access_reg_group)
+{
+ u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(pcam_reg);
+
+ MLX5_SET(pcam_reg, in, feature_group, feature_group);
+ MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group);
+
+ return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0);
+}
+
+int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
+ u8 access_reg_group)
+{
+ u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(mcam_reg);
+
+ MLX5_SET(mcam_reg, in, feature_group, feature_group);
+ MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group);
+
+ return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
+}
+
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -620,7 +644,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
MLX5_REG_QETCR, 0, 1);
@@ -632,7 +656,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
u32 in[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
memset(in, 0, sizeof(in));
return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
@@ -866,3 +890,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
module_num, mlx5_pme_status[module_status - 1],
mlx5_pme_error[error_type]);
}
+
+int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
+{
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
+{
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out,
+ sizeof(out), MLX5_REG_MTPPS, 0, 1);
+}
+
+int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode)
+{
+ u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+ int err = 0;
+
+ MLX5_SET(mtppse_reg, in, pin, pin);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MTPPSE, 0, 0);
+ if (err)
+ return err;
+
+ *arm = MLX5_GET(mtppse_reg, in, event_arm);
+ *mode = MLX5_GET(mtppse_reg, in, event_generation_mode);
+
+ return err;
+}
+
+int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
+{
+ u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+
+ MLX5_SET(mtppse_reg, in, pin, pin);
+ MLX5_SET(mtppse_reg, in, event_arm, arm);
+ MLX5_SET(mtppse_reg, in, event_generation_mode, mode);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MTPPSE, 0, 1);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index d0a4005fe63a..cbbcef2884be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -143,95 +143,6 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
-{
- struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
- int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
- struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
- struct mlx5_core_qp *qp =
- container_of(common, struct mlx5_core_qp, common);
- struct mlx5_pagefault pfault;
-
- if (!qp) {
- mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
- qpn);
- return;
- }
-
- pfault.event_subtype = eqe->sub_type;
- pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
- (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
- pfault.bytes_committed = be32_to_cpu(
- pf_eqe->bytes_committed);
-
- mlx5_core_dbg(dev,
- "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
- eqe->sub_type, pfault.flags);
-
- switch (eqe->sub_type) {
- case MLX5_PFAULT_SUBTYPE_RDMA:
- /* RDMA based event */
- pfault.rdma.r_key =
- be32_to_cpu(pf_eqe->rdma.r_key);
- pfault.rdma.packet_size =
- be16_to_cpu(pf_eqe->rdma.packet_length);
- pfault.rdma.rdma_op_len =
- be32_to_cpu(pf_eqe->rdma.rdma_op_len);
- pfault.rdma.rdma_va =
- be64_to_cpu(pf_eqe->rdma.rdma_va);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
- qpn, pfault.rdma.r_key);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
- pfault.rdma.rdma_op_len);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: rdma_va: 0x%016llx,\n",
- pfault.rdma.rdma_va);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: bytes_committed: 0x%06x\n",
- pfault.bytes_committed);
- break;
-
- case MLX5_PFAULT_SUBTYPE_WQE:
- /* WQE based event */
- pfault.wqe.wqe_index =
- be16_to_cpu(pf_eqe->wqe.wqe_index);
- pfault.wqe.packet_size =
- be16_to_cpu(pf_eqe->wqe.packet_length);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
- qpn, pfault.wqe.wqe_index);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: bytes_committed: 0x%06x\n",
- pfault.bytes_committed);
- break;
-
- default:
- mlx5_core_warn(dev,
- "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
- eqe->sub_type, qpn);
- /* Unsupported page faults should still be resolved by the
- * page fault handler
- */
- }
-
- if (qp->pfault_handler) {
- qp->pfault_handler(qp, &pfault);
- } else {
- mlx5_core_err(dev,
- "ODP event for QP %08x, without a fault handler in QP\n",
- qpn);
- /* Page fault will remain unresolved. QP will hang until it is
- * destroyed
- */
- }
-
- mlx5_core_put_rsc(common);
-}
-#endif
-
static int create_qprqsq_common(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
int rsc_type)
@@ -506,31 +417,6 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
- u8 flags, int error)
-{
- u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
-
- MLX5_SET(page_fault_resume_in, in, opcode,
- MLX5_CMD_OP_PAGE_FAULT_RESUME);
- MLX5_SET(page_fault_resume_in, in, qpn, qpn);
-
- if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR)
- MLX5_SET(page_fault_resume_in, in, req_res, 1);
- if (flags & MLX5_PAGE_FAULT_RESUME_WRITE)
- MLX5_SET(page_fault_resume_in, in, read_write, 1);
- if (flags & MLX5_PAGE_FAULT_RESUME_RDMA)
- MLX5_SET(page_fault_resume_in, in, rdma, 1);
- if (error)
- MLX5_SET(page_fault_resume_in, in, error, 1);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
-#endif
-
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index ab0b896621a0..2e6b0f290ddc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -37,11 +37,6 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-enum {
- NUM_DRIVER_UARS = 4,
- NUM_LOW_LAT_UUARS = 4,
-};
-
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
@@ -67,167 +62,269 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
-static int need_uuar_lock(int uuarn)
+static int uars_per_sys_page(struct mlx5_core_dev *mdev)
{
- int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
-
- if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
- return 0;
+ if (MLX5_CAP_GEN(mdev, uar_4k))
+ return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
return 1;
}
-int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
{
- int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
- struct mlx5_bf *bf;
- phys_addr_t addr;
- int err;
+ u32 system_page_index;
+
+ if (MLX5_CAP_GEN(mdev, uar_4k))
+ system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
+ else
+ system_page_index = index;
+
+ return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
+}
+
+static void up_rel_func(struct kref *kref)
+{
+ struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
+
+ list_del(&up->list);
+ if (mlx5_cmd_free_uar(up->mdev, up->index))
+ mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
+ kfree(up->reg_bitmap);
+ kfree(up->fp_bitmap);
+ kfree(up);
+}
+
+static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
+ bool map_wc)
+{
+ struct mlx5_uars_page *up;
+ int err = -ENOMEM;
+ phys_addr_t pfn;
+ int bfregs;
int i;
- uuari->num_uars = NUM_DRIVER_UARS;
- uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
+ bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ return ERR_PTR(err);
- mutex_init(&uuari->lock);
- uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
- if (!uuari->uars)
- return -ENOMEM;
+ up->mdev = mdev;
+ up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+ if (!up->reg_bitmap)
+ goto error1;
- uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
- if (!uuari->bfs) {
- err = -ENOMEM;
- goto out_uars;
- }
+ up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+ if (!up->fp_bitmap)
+ goto error1;
- uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
- GFP_KERNEL);
- if (!uuari->bitmap) {
- err = -ENOMEM;
- goto out_bfs;
- }
+ for (i = 0; i < bfregs; i++)
+ if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
+ set_bit(i, up->reg_bitmap);
+ else
+ set_bit(i, up->fp_bitmap);
- uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
- if (!uuari->count) {
- err = -ENOMEM;
- goto out_bitmap;
- }
+ up->bfregs = bfregs;
+ up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
+ up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
- for (i = 0; i < uuari->num_uars; i++) {
- err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
- if (err)
- goto out_count;
+ err = mlx5_cmd_alloc_uar(mdev, &up->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ goto error1;
+ }
- addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
- uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
- if (!uuari->uars[i].map) {
- mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+ pfn = uar2pfn(mdev, up->index);
+ if (map_wc) {
+ up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!up->map) {
+ err = -EAGAIN;
+ goto error2;
+ }
+ } else {
+ up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!up->map) {
err = -ENOMEM;
- goto out_count;
+ goto error2;
}
- mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
- uuari->uars[i].index, uuari->uars[i].map);
- }
-
- for (i = 0; i < tot_uuars; i++) {
- bf = &uuari->bfs[i];
-
- bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
- bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
- bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
- bf->reg = NULL; /* Add WC support */
- bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
- (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
- MLX5_BF_OFFSET;
- bf->need_lock = need_uuar_lock(i);
- spin_lock_init(&bf->lock);
- spin_lock_init(&bf->lock32);
- bf->uuarn = i;
}
+ kref_init(&up->ref_count);
+ mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
+ up->index, up->bfregs);
+ return up;
+
+error2:
+ if (mlx5_cmd_free_uar(mdev, up->index))
+ mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
+error1:
+ kfree(up->fp_bitmap);
+ kfree(up->reg_bitmap);
+ kfree(up);
+ return ERR_PTR(err);
+}
- return 0;
-
-out_count:
- for (i--; i >= 0; i--) {
- iounmap(uuari->uars[i].map);
- mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_uars_page *ret;
+
+ mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+ if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
+ ret = alloc_uars_page(mdev, false);
+ if (IS_ERR(ret)) {
+ ret = NULL;
+ goto out;
+ }
+ list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
+ } else {
+ ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
+ struct mlx5_uars_page, list);
+ kref_get(&ret->ref_count);
}
- kfree(uuari->count);
+out:
+ mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
-out_bitmap:
- kfree(uuari->bitmap);
-
-out_bfs:
- kfree(uuari->bfs);
+ return ret;
+}
+EXPORT_SYMBOL(mlx5_get_uars_page);
-out_uars:
- kfree(uuari->uars);
- return err;
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
+{
+ mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+ kref_put(&up->ref_count, up_rel_func);
+ mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
}
+EXPORT_SYMBOL(mlx5_put_uars_page);
-int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
{
- int i = uuari->num_uars;
+ /* return the offset in bytes from the start of the page to the
+ * blue flame area of the UAR
+ */
+ return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
+ (dbi % MLX5_BFREGS_PER_UAR) *
+ (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
+}
- for (i--; i >= 0; i--) {
- iounmap(uuari->uars[i].map);
- mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+ bool map_wc, bool fast_path)
+{
+ struct mlx5_bfreg_data *bfregs;
+ struct mlx5_uars_page *up;
+ struct list_head *head;
+ unsigned long *bitmap;
+ unsigned int *avail;
+ struct mutex *lock; /* pointer to right mutex */
+ int dbi;
+
+ bfregs = &mdev->priv.bfregs;
+ if (map_wc) {
+ head = &bfregs->wc_head.list;
+ lock = &bfregs->wc_head.lock;
+ } else {
+ head = &bfregs->reg_head.list;
+ lock = &bfregs->reg_head.lock;
}
-
- kfree(uuari->count);
- kfree(uuari->bitmap);
- kfree(uuari->bfs);
- kfree(uuari->uars);
+ mutex_lock(lock);
+ if (list_empty(head)) {
+ up = alloc_uars_page(mdev, map_wc);
+ if (IS_ERR(up)) {
+ mutex_unlock(lock);
+ return PTR_ERR(up);
+ }
+ list_add(&up->list, head);
+ } else {
+ up = list_entry(head->next, struct mlx5_uars_page, list);
+ kref_get(&up->ref_count);
+ }
+ if (fast_path) {
+ bitmap = up->fp_bitmap;
+ avail = &up->fp_avail;
+ } else {
+ bitmap = up->reg_bitmap;
+ avail = &up->reg_avail;
+ }
+ dbi = find_first_bit(bitmap, up->bfregs);
+ clear_bit(dbi, bitmap);
+ (*avail)--;
+ if (!(*avail))
+ list_del(&up->list);
+
+ bfreg->map = up->map + map_offset(mdev, dbi);
+ bfreg->up = up;
+ bfreg->wc = map_wc;
+ bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
+ mutex_unlock(lock);
return 0;
}
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
- bool map_wc)
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+ bool map_wc, bool fast_path)
{
- phys_addr_t pfn;
- phys_addr_t uar_bar_start;
int err;
- err = mlx5_cmd_alloc_uar(mdev, &uar->index);
- if (err) {
- mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
- return err;
- }
+ err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
+ if (!err)
+ return 0;
- uar_bar_start = pci_resource_start(mdev->pdev, 0);
- pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ if (err == -EAGAIN && map_wc)
+ return alloc_bfreg(mdev, bfreg, false, fast_path);
- if (map_wc) {
- uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->bf_map) {
- mlx5_core_warn(mdev, "ioremap_wc() failed\n");
- uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->map)
- goto err_free_uar;
- }
- } else {
- uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->map)
- goto err_free_uar;
- }
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_bfreg);
- return 0;
+static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
+ struct mlx5_uars_page *up,
+ struct mlx5_sq_bfreg *bfreg)
+{
+ unsigned int uar_idx;
+ unsigned int bfreg_idx;
+ unsigned int bf_reg_size;
-err_free_uar:
- mlx5_core_warn(mdev, "ioremap() failed\n");
- err = -ENOMEM;
- mlx5_cmd_free_uar(mdev, uar->index);
+ bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
- return err;
+ uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
+ bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
+
+ return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
}
-EXPORT_SYMBOL(mlx5_alloc_map_uar);
-void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
{
- if (uar->map)
- iounmap(uar->map);
- else
- iounmap(uar->bf_map);
- mlx5_cmd_free_uar(mdev, uar->index);
+ struct mlx5_bfreg_data *bfregs;
+ struct mlx5_uars_page *up;
+ struct mutex *lock; /* pointer to right mutex */
+ unsigned int dbi;
+ bool fp;
+ unsigned int *avail;
+ unsigned long *bitmap;
+ struct list_head *head;
+
+ bfregs = &mdev->priv.bfregs;
+ if (bfreg->wc) {
+ head = &bfregs->wc_head.list;
+ lock = &bfregs->wc_head.lock;
+ } else {
+ head = &bfregs->reg_head.list;
+ lock = &bfregs->reg_head.lock;
+ }
+ up = bfreg->up;
+ dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
+ fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
+ if (fp) {
+ avail = &up->fp_avail;
+ bitmap = up->fp_bitmap;
+ } else {
+ avail = &up->reg_avail;
+ bitmap = up->reg_bitmap;
+ }
+ mutex_lock(lock);
+ (*avail)++;
+ set_bit(dbi, bitmap);
+ if (*avail == 1)
+ list_add_tail(&up->list, head);
+
+ kref_put(&up->ref_count, up_rel_func);
+ mutex_unlock(lock);
}
-EXPORT_SYMBOL(mlx5_unmap_free_uar);
+EXPORT_SYMBOL(mlx5_free_bfreg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e4401c342..15c2294dd2b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline_mode)
+{
+ switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_L2:
+ *min_inline_mode = MLX5_INLINE_MODE_L2;
+ break;
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
+ break;
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+ *min_inline_mode = MLX5_INLINE_MODE_NONE;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
+
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline)
{
@@ -532,7 +549,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
in = mlx5_vzalloc(inlen);
if (!in)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 16f44b9aa076..ef23eaedc2ff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -73,6 +73,8 @@ config MLXSW_SWITCHX2
config MLXSW_SPECTRUM
tristate "Mellanox Technologies Spectrum support"
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
+ depends on PSAMPLE || PSAMPLE=n
+ select PARMAN
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index fe8dadba15ab..6b6c30deee83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
-mlxsw_core-objs := core.o
+mlxsw_core-objs := core.o core_acl_flex_keys.o \
+ core_acl_flex_actions.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
@@ -13,7 +14,8 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_kvdl.o
+ spectrum_kvdl.o spectrum_acl_tcam.o \
+ spectrum_acl.o spectrum_flower.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 56e19b0d2f8f..a1b48421648a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1132,12 +1132,12 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
-/* cmd_mbox_sw2hw_eq_int_oi
+/* cmd_mbox_sw2hw_eq_oi
* When set, overrun ignore is enabled.
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
-/* cmd_mbox_sw2hw_eq_int_st
+/* cmd_mbox_sw2hw_eq_st
* Event delivery state machine
* 0x0 - FIRED
* 0x1 - ARMED (Request for Notification)
@@ -1146,19 +1146,19 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
-/* cmd_mbox_sw2hw_eq_int_log_eq_size
+/* cmd_mbox_sw2hw_eq_log_eq_size
* Log (base 2) of the EQ size (in entries).
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
-/* cmd_mbox_sw2hw_eq_int_producer_counter
+/* cmd_mbox_sw2hw_eq_producer_counter
* Producer Counter. The counter is incremented for each EQE that is written
* by the HW to the EQ.
* Maintained by HW (valid for the QUERY_EQ command only)
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
-/* cmd_mbox_sw2hw_eq_int_pa
+/* cmd_mbox_sw2hw_eq_pa
* Physical Address.
*/
MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 57a98849551b..a4c07841aaf6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1901,11 +1901,11 @@ int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
}
EXPORT_SYMBOL(mlxsw_core_schedule_dw);
-int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay)
+bool mlxsw_core_schedule_work(struct work_struct *work)
{
- return queue_delayed_work(mlxsw_owq, dwork, delay);
+ return queue_work(mlxsw_owq, work);
}
-EXPORT_SYMBOL(mlxsw_core_schedule_odw);
+EXPORT_SYMBOL(mlxsw_core_schedule_work);
void mlxsw_core_flush_owq(void)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index a7f94fbc898b..cf38cf9027f8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -207,7 +207,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
-int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay);
+bool mlxsw_core_schedule_work(struct work_struct *work);
void mlxsw_core_flush_owq(void);
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
new file mode 100644
index 000000000000..5f337715a4da
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -0,0 +1,679 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/rhashtable.h>
+#include <linux/list.h>
+
+#include "item.h"
+#include "core_acl_flex_actions.h"
+
+enum mlxsw_afa_set_type {
+ MLXSW_AFA_SET_TYPE_NEXT,
+ MLXSW_AFA_SET_TYPE_GOTO,
+};
+
+/* afa_set_type
+ * Type of the record at the end of the action set.
+ */
+MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
+
+/* afa_set_next_action_set_ptr
+ * A pointer to the next action set in the KVD Centralized database.
+ */
+MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
+
+/* afa_set_goto_g
+ * group - When set, the binding is of an ACL group. When cleared,
+ * the binding is of an ACL.
+ * Must be set to 1 for Spectrum.
+ */
+MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
+
+enum mlxsw_afa_set_goto_binding_cmd {
+ /* continue go the next binding point */
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
+ /* jump to the next binding point no return */
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
+ /* terminate the acl binding */
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
+};
+
+/* afa_set_goto_binding_cmd */
+MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
+
+/* afa_set_goto_next_binding
+ * ACL/ACL group identifier. If the g bit is set, this field should hold
+ * the acl_group_id, else it should hold the acl_id.
+ */
+MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
+
+/* afa_all_action_type
+ * Action Type.
+ */
+MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
+
+struct mlxsw_afa {
+ unsigned int max_acts_per_set;
+ const struct mlxsw_afa_ops *ops;
+ void *ops_priv;
+ struct rhashtable set_ht;
+ struct rhashtable fwd_entry_ht;
+};
+
+#define MLXSW_AFA_SET_LEN 0xA8
+
+struct mlxsw_afa_set_ht_key {
+ char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
+ bool is_first;
+};
+
+/* Set structure holds one action set record. It contains up to three
+ * actions (depends on size of particular actions). The set is either
+ * put directly to a rule, or it is stored in KVD linear area.
+ * To prevent duplicate entries in KVD linear area, a hashtable is
+ * used to track sets that were previously inserted and may be shared.
+ */
+
+struct mlxsw_afa_set {
+ struct rhash_head ht_node;
+ struct mlxsw_afa_set_ht_key ht_key;
+ u32 kvdl_index;
+ bool shared; /* Inserted in hashtable (doesn't mean that
+ * kvdl_index is valid).
+ */
+ unsigned int ref_count;
+ struct mlxsw_afa_set *next; /* Pointer to the next set. */
+ struct mlxsw_afa_set *prev; /* Pointer to the previous set,
+ * note that set may have multiple
+ * sets from multiple blocks
+ * pointing at it. This is only
+ * usable until commit.
+ */
+};
+
+static const struct rhashtable_params mlxsw_afa_set_ht_params = {
+ .key_len = sizeof(struct mlxsw_afa_set_ht_key),
+ .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
+ .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
+ .automatic_shrinking = true,
+};
+
+struct mlxsw_afa_fwd_entry_ht_key {
+ u8 local_port;
+};
+
+struct mlxsw_afa_fwd_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_afa_fwd_entry_ht_key ht_key;
+ u32 kvdl_index;
+ unsigned int ref_count;
+};
+
+static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
+ .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
+ .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
+ .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
+ const struct mlxsw_afa_ops *ops,
+ void *ops_priv)
+{
+ struct mlxsw_afa *mlxsw_afa;
+ int err;
+
+ mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
+ if (!mlxsw_afa)
+ return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
+ if (err)
+ goto err_set_rhashtable_init;
+ err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
+ &mlxsw_afa_fwd_entry_ht_params);
+ if (err)
+ goto err_fwd_entry_rhashtable_init;
+ mlxsw_afa->max_acts_per_set = max_acts_per_set;
+ mlxsw_afa->ops = ops;
+ mlxsw_afa->ops_priv = ops_priv;
+ return mlxsw_afa;
+
+err_fwd_entry_rhashtable_init:
+ rhashtable_destroy(&mlxsw_afa->set_ht);
+err_set_rhashtable_init:
+ kfree(mlxsw_afa);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(mlxsw_afa_create);
+
+void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
+{
+ rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
+ rhashtable_destroy(&mlxsw_afa->set_ht);
+ kfree(mlxsw_afa);
+}
+EXPORT_SYMBOL(mlxsw_afa_destroy);
+
+static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
+ enum mlxsw_afa_set_goto_binding_cmd cmd,
+ u16 group_id)
+{
+ char *actions = set->ht_key.enc_actions;
+
+ mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
+ mlxsw_afa_set_goto_g_set(actions, true);
+ mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
+ mlxsw_afa_set_goto_next_binding_set(actions, group_id);
+}
+
+static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
+ u32 next_set_kvdl_index)
+{
+ char *actions = set->ht_key.enc_actions;
+
+ mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
+ mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
+}
+
+static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
+{
+ struct mlxsw_afa_set *set;
+
+ set = kzalloc(sizeof(*set), GFP_KERNEL);
+ if (!set)
+ return NULL;
+ /* Need to initialize the set to pass by default */
+ mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+ set->ht_key.is_first = is_first;
+ set->ref_count = 1;
+ return set;
+}
+
+static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
+{
+ kfree(set);
+}
+
+static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *set)
+{
+ int err;
+
+ err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
+ mlxsw_afa_set_ht_params);
+ if (err)
+ return err;
+ err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
+ &set->kvdl_index,
+ set->ht_key.enc_actions,
+ set->ht_key.is_first);
+ if (err)
+ goto err_kvdl_set_add;
+ set->shared = true;
+ set->prev = NULL;
+ return 0;
+
+err_kvdl_set_add:
+ rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
+ mlxsw_afa_set_ht_params);
+ return err;
+}
+
+static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *set)
+{
+ mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
+ set->kvdl_index,
+ set->ht_key.is_first);
+ rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
+ mlxsw_afa_set_ht_params);
+ set->shared = false;
+}
+
+static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *set)
+{
+ if (--set->ref_count)
+ return;
+ if (set->shared)
+ mlxsw_afa_set_unshare(mlxsw_afa, set);
+ mlxsw_afa_set_destroy(set);
+}
+
+static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *orig_set)
+{
+ struct mlxsw_afa_set *set;
+ int err;
+
+ /* There is a hashtable of sets maintained. If a set with the exact
+ * same encoding exists, we reuse it. Otherwise, the current set
+ * is shared by making it available to others using the hash table.
+ */
+ set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
+ mlxsw_afa_set_ht_params);
+ if (set) {
+ set->ref_count++;
+ mlxsw_afa_set_put(mlxsw_afa, orig_set);
+ } else {
+ set = orig_set;
+ err = mlxsw_afa_set_share(mlxsw_afa, set);
+ if (err)
+ return ERR_PTR(err);
+ }
+ return set;
+}
+
+/* Block structure holds a list of action sets. One action block
+ * represents one chain of actions executed upon match of a rule.
+ */
+
+struct mlxsw_afa_block {
+ struct mlxsw_afa *afa;
+ bool finished;
+ struct mlxsw_afa_set *first_set;
+ struct mlxsw_afa_set *cur_set;
+ unsigned int cur_act_index; /* In current set. */
+ struct list_head fwd_entry_ref_list;
+};
+
+struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
+{
+ struct mlxsw_afa_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->fwd_entry_ref_list);
+ block->afa = mlxsw_afa;
+
+ /* At least one action set is always present, so just create it here */
+ block->first_set = mlxsw_afa_set_create(true);
+ if (!block->first_set)
+ goto err_first_set_create;
+ block->cur_set = block->first_set;
+ return block;
+
+err_first_set_create:
+ kfree(block);
+ return NULL;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_create);
+
+static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
+
+void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_set *set = block->first_set;
+ struct mlxsw_afa_set *next_set;
+
+ do {
+ next_set = set->next;
+ mlxsw_afa_set_put(block->afa, set);
+ set = next_set;
+ } while (set);
+ mlxsw_afa_fwd_entry_refs_destroy(block);
+ kfree(block);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_destroy);
+
+int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_set *set = block->cur_set;
+ struct mlxsw_afa_set *prev_set;
+
+ block->cur_set = NULL;
+ block->finished = true;
+
+ /* Go over all linked sets starting from last
+ * and try to find existing set in the hash table.
+ * In case it is not there, assign a KVD linear index
+ * and insert it.
+ */
+ do {
+ prev_set = set->prev;
+ set = mlxsw_afa_set_get(block->afa, set);
+ if (IS_ERR(set))
+ /* No rollback is needed since the chain is
+ * in consistent state and mlxsw_afa_block_destroy
+ * will take care of putting it away.
+ */
+ return PTR_ERR(set);
+ if (prev_set) {
+ prev_set->next = set;
+ mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
+ set = prev_set;
+ }
+ } while (prev_set);
+
+ block->first_set = set;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_commit);
+
+char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
+{
+ return block->first_set->ht_key.enc_actions;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_set);
+
+u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
+{
+ return block->first_set->kvdl_index;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
+
+void mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
+{
+ if (WARN_ON(block->finished))
+ return;
+ mlxsw_afa_set_goto_set(block->cur_set,
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
+ block->finished = true;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_continue);
+
+void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
+{
+ if (WARN_ON(block->finished))
+ return;
+ mlxsw_afa_set_goto_set(block->cur_set,
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
+ block->finished = true;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_jump);
+
+static struct mlxsw_afa_fwd_entry *
+mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+{
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+ int err;
+
+ fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
+ if (!fwd_entry)
+ return ERR_PTR(-ENOMEM);
+ fwd_entry->ht_key.local_port = local_port;
+ fwd_entry->ref_count = 1;
+
+ err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
+ &fwd_entry->ht_node,
+ mlxsw_afa_fwd_entry_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
+ &fwd_entry->kvdl_index,
+ local_port);
+ if (err)
+ goto err_kvdl_fwd_entry_add;
+ return fwd_entry;
+
+err_kvdl_fwd_entry_add:
+ rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
+ mlxsw_afa_fwd_entry_ht_params);
+err_rhashtable_insert:
+ kfree(fwd_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_fwd_entry *fwd_entry)
+{
+ mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
+ fwd_entry->kvdl_index);
+ rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
+ mlxsw_afa_fwd_entry_ht_params);
+ kfree(fwd_entry);
+}
+
+static struct mlxsw_afa_fwd_entry *
+mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+{
+ struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+
+ ht_key.local_port = local_port;
+ fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
+ mlxsw_afa_fwd_entry_ht_params);
+ if (fwd_entry) {
+ fwd_entry->ref_count++;
+ return fwd_entry;
+ }
+ return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
+}
+
+static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_fwd_entry *fwd_entry)
+{
+ if (--fwd_entry->ref_count)
+ return;
+ mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
+}
+
+struct mlxsw_afa_fwd_entry_ref {
+ struct list_head list;
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+};
+
+static struct mlxsw_afa_fwd_entry_ref *
+mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+ int err;
+
+ fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
+ if (!fwd_entry_ref)
+ return ERR_PTR(-ENOMEM);
+ fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
+ if (IS_ERR(fwd_entry)) {
+ err = PTR_ERR(fwd_entry);
+ goto err_fwd_entry_get;
+ }
+ fwd_entry_ref->fwd_entry = fwd_entry;
+ list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
+ return fwd_entry_ref;
+
+err_fwd_entry_get:
+ kfree(fwd_entry_ref);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+ list_del(&fwd_entry_ref->list);
+ mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+ kfree(fwd_entry_ref);
+}
+
+static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+ struct mlxsw_afa_fwd_entry_ref *tmp;
+
+ list_for_each_entry_safe(fwd_entry_ref, tmp,
+ &block->fwd_entry_ref_list, list)
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
+#define MLXSW_AFA_ONE_ACTION_LEN 32
+#define MLXSW_AFA_PAYLOAD_OFFSET 4
+
+static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
+ u8 action_code, u8 action_size)
+{
+ char *oneact;
+ char *actions;
+
+ if (WARN_ON(block->finished))
+ return NULL;
+ if (block->cur_act_index + action_size >
+ block->afa->max_acts_per_set) {
+ struct mlxsw_afa_set *set;
+
+ /* The appended action won't fit into the current action set,
+ * so create a new set.
+ */
+ set = mlxsw_afa_set_create(false);
+ if (!set)
+ return NULL;
+ set->prev = block->cur_set;
+ block->cur_act_index = 0;
+ block->cur_set->next = set;
+ block->cur_set = set;
+ }
+
+ actions = block->cur_set->ht_key.enc_actions;
+ oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
+ block->cur_act_index += action_size;
+ mlxsw_afa_all_action_type_set(oneact, action_code);
+ return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
+}
+
+/* Trap / Discard Action
+ * ---------------------
+ * The Trap / Discard action enables trapping / mirroring packets to the CPU
+ * as well as discarding packets.
+ * The ACL Trap / Discard separates the forward/discard control from CPU
+ * trap control. In addition, the Trap / Discard action enables activating
+ * SPAN (port mirroring).
+ */
+
+#define MLXSW_AFA_TRAPDISC_CODE 0x03
+#define MLXSW_AFA_TRAPDISC_SIZE 1
+
+enum mlxsw_afa_trapdisc_forward_action {
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
+};
+
+/* afa_trapdisc_forward_action
+ * Forward Action.
+ */
+MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
+
+static inline void
+mlxsw_afa_trapdisc_pack(char *payload,
+ enum mlxsw_afa_trapdisc_forward_action forward_action)
+{
+ mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
+}
+
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
+
+/* Forwarding Action
+ * -----------------
+ * Forwarding Action can be used to implement Policy Based Switching (PBS)
+ * as well as OpenFlow related "Output" action.
+ */
+
+#define MLXSW_AFA_FORWARD_CODE 0x07
+#define MLXSW_AFA_FORWARD_SIZE 1
+
+enum mlxsw_afa_forward_type {
+ /* PBS, Policy Based Switching */
+ MLXSW_AFA_FORWARD_TYPE_PBS,
+ /* Output, OpenFlow output type */
+ MLXSW_AFA_FORWARD_TYPE_OUTPUT,
+};
+
+/* afa_forward_type */
+MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
+
+/* afa_forward_pbs_ptr
+ * A pointer to the PBS entry configured by PPBS register.
+ * Reserved when in_port is set.
+ */
+MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
+
+/* afa_forward_in_port
+ * Packet is forwarded back to the ingress port.
+ */
+MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
+
+static inline void
+mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
+ u32 pbs_ptr, bool in_port)
+{
+ mlxsw_afa_forward_type_set(payload, type);
+ mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
+ mlxsw_afa_forward_in_port_set(payload, in_port);
+}
+
+int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
+ u8 local_port, bool in_port)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+ u32 kvdl_index;
+ char *act;
+ int err;
+
+ if (in_port)
+ return -EOPNOTSUPP;
+ fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
+ if (IS_ERR(fwd_entry_ref))
+ return PTR_ERR(fwd_entry_ref);
+ kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
+
+ act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
+ MLXSW_AFA_FORWARD_SIZE);
+ if (!act) {
+ err = -ENOBUFS;
+ goto err_append_action;
+ }
+ mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
+ kvdl_index, in_port);
+ return 0;
+
+err_append_action:
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
new file mode 100644
index 000000000000..43f78dcfe394
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H
+#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
+
+#include <linux/types.h>
+
+struct mlxsw_afa;
+struct mlxsw_afa_block;
+
+struct mlxsw_afa_ops {
+ int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first);
+ void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
+ int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
+ void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+};
+
+struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
+ const struct mlxsw_afa_ops *ops,
+ void *ops_priv);
+void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa);
+struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
+void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
+void mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
+void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
+ u8 local_port, bool in_port);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
new file mode 100644
index 000000000000..b32a00972e83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -0,0 +1,475 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_afk {
+ struct list_head key_info_list;
+ unsigned int max_blocks;
+ const struct mlxsw_afk_block *blocks;
+ unsigned int blocks_count;
+};
+
+static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
+
+ for (j = 0; j < block->instances_count; j++) {
+ struct mlxsw_afk_element_inst *elinst;
+
+ elinst = &block->instances[j];
+ if (elinst->type != elinst->info->type ||
+ elinst->item.size.bits !=
+ elinst->info->item.size.bits)
+ return false;
+ }
+ }
+ return true;
+}
+
+struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
+ const struct mlxsw_afk_block *blocks,
+ unsigned int blocks_count)
+{
+ struct mlxsw_afk *mlxsw_afk;
+
+ mlxsw_afk = kzalloc(sizeof(*mlxsw_afk), GFP_KERNEL);
+ if (!mlxsw_afk)
+ return NULL;
+ INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
+ mlxsw_afk->max_blocks = max_blocks;
+ mlxsw_afk->blocks = blocks;
+ mlxsw_afk->blocks_count = blocks_count;
+ WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
+ return mlxsw_afk;
+}
+EXPORT_SYMBOL(mlxsw_afk_create);
+
+void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk)
+{
+ WARN_ON(!list_empty(&mlxsw_afk->key_info_list));
+ kfree(mlxsw_afk);
+}
+EXPORT_SYMBOL(mlxsw_afk_destroy);
+
+struct mlxsw_afk_key_info {
+ struct list_head list;
+ unsigned int ref_count;
+ unsigned int blocks_count;
+ int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
+ * is index inside "blocks"
+ */
+ struct mlxsw_afk_element_usage elusage;
+ const struct mlxsw_afk_block *blocks[0];
+};
+
+static bool
+mlxsw_afk_key_info_elements_eq(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ return memcmp(&key_info->elusage, elusage, sizeof(*elusage)) == 0;
+}
+
+static struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_key_info *key_info;
+
+ list_for_each_entry(key_info, &mlxsw_afk->key_info_list, list) {
+ if (mlxsw_afk_key_info_elements_eq(key_info, elusage))
+ return key_info;
+ }
+ return NULL;
+}
+
+struct mlxsw_afk_picker {
+ struct {
+ DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
+ unsigned int total;
+ } hits[0];
+};
+
+static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker,
+ enum mlxsw_afk_element element)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
+
+ for (j = 0; j < block->instances_count; j++) {
+ struct mlxsw_afk_element_inst *elinst;
+
+ elinst = &block->instances[j];
+ if (elinst->info->element == element) {
+ __set_bit(element, picker->hits[i].element);
+ picker->hits[i].total++;
+ }
+ }
+ }
+}
+
+static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker,
+ int block_index)
+{
+ DECLARE_BITMAP(hits_element, MLXSW_AFK_ELEMENT_MAX);
+ int i;
+ int j;
+
+ memcpy(&hits_element, &picker->hits[block_index].element,
+ sizeof(hits_element));
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) {
+ if (__test_and_clear_bit(j, picker->hits[i].element))
+ picker->hits[i].total--;
+ }
+ }
+}
+
+static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker)
+{
+ int most_index = -EINVAL; /* Should never happen to return this */
+ int most_hits = 0;
+ int i;
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ if (picker->hits[i].total > most_hits) {
+ most_hits = picker->hits[i].total;
+ most_index = i;
+ }
+ }
+ return most_index;
+}
+
+static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker,
+ int block_index,
+ struct mlxsw_afk_key_info *key_info)
+{
+ enum mlxsw_afk_element element;
+
+ if (key_info->blocks_count == mlxsw_afk->max_blocks)
+ return -EINVAL;
+
+ for_each_set_bit(element, picker->hits[block_index].element,
+ MLXSW_AFK_ELEMENT_MAX) {
+ key_info->element_to_block[element] = key_info->blocks_count;
+ mlxsw_afk_element_usage_add(&key_info->elusage, element);
+ }
+
+ key_info->blocks[key_info->blocks_count] =
+ &mlxsw_afk->blocks[block_index];
+ key_info->blocks_count++;
+ return 0;
+}
+
+static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_picker *picker;
+ enum mlxsw_afk_element element;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count;
+ picker = kzalloc(alloc_size, GFP_KERNEL);
+ if (!picker)
+ return -ENOMEM;
+
+ /* Since the same elements could be present in multiple blocks,
+ * we must find out optimal block list in order to make the
+ * block count as low as possible.
+ *
+ * First, we count hits. We go over all available blocks and count
+ * how many of requested elements are covered by each.
+ *
+ * Then in loop, we find block with most hits and add it to
+ * output key_info. Then we have to subtract this block hits so
+ * the next iteration will find most suitable block for
+ * the rest of requested elements.
+ */
+
+ mlxsw_afk_element_usage_for_each(element, elusage)
+ mlxsw_afk_picker_count_hits(mlxsw_afk, picker, element);
+
+ do {
+ int block_index;
+
+ block_index = mlxsw_afk_picker_most_hits_get(mlxsw_afk, picker);
+ if (block_index < 0) {
+ err = block_index;
+ goto out;
+ }
+ err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker,
+ block_index, key_info);
+ if (err)
+ goto out;
+ mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index);
+ } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage));
+
+ err = 0;
+out:
+ kfree(picker);
+ return err;
+}
+
+static struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_key_info *key_info;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(*key_info) +
+ sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks;
+ key_info = kzalloc(alloc_size, GFP_KERNEL);
+ if (!key_info)
+ return ERR_PTR(-ENOMEM);
+ err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage);
+ if (err)
+ goto err_picker;
+ list_add(&key_info->list, &mlxsw_afk->key_info_list);
+ key_info->ref_count = 1;
+ return key_info;
+
+err_picker:
+ kfree(key_info);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afk_key_info_destroy(struct mlxsw_afk_key_info *key_info)
+{
+ list_del(&key_info->list);
+ kfree(key_info);
+}
+
+struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_key_info *key_info;
+
+ key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
+ if (key_info) {
+ key_info->ref_count++;
+ return key_info;
+ }
+ return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_get);
+
+void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
+{
+ if (--key_info->ref_count)
+ return;
+ mlxsw_afk_key_info_destroy(key_info);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_put);
+
+bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ return mlxsw_afk_element_usage_subset(elusage, &key_info->elusage);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_subset);
+
+static const struct mlxsw_afk_element_inst *
+mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
+ enum mlxsw_afk_element element)
+{
+ int i;
+
+ for (i = 0; i < block->instances_count; i++) {
+ struct mlxsw_afk_element_inst *elinst;
+
+ elinst = &block->instances[i];
+ if (elinst->info->element == element)
+ return elinst;
+ }
+ return NULL;
+}
+
+static const struct mlxsw_afk_element_inst *
+mlxsw_afk_key_info_elinst_get(struct mlxsw_afk_key_info *key_info,
+ enum mlxsw_afk_element element,
+ int *p_block_index)
+{
+ const struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_block *block;
+ int block_index;
+
+ if (WARN_ON(!test_bit(element, key_info->elusage.usage)))
+ return NULL;
+ block_index = key_info->element_to_block[element];
+ block = key_info->blocks[block_index];
+
+ elinst = mlxsw_afk_block_elinst_get(block, element);
+ if (WARN_ON(!elinst))
+ return NULL;
+
+ *p_block_index = block_index;
+ return elinst;
+}
+
+u16
+mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info,
+ int block_index)
+{
+ return key_info->blocks[block_index]->encoding;
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_block_encoding_get);
+
+unsigned int
+mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info)
+{
+ return key_info->blocks_count;
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_blocks_count_get);
+
+void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value)
+{
+ const struct mlxsw_afk_element_info *elinfo =
+ &mlxsw_afk_element_infos[element];
+ const struct mlxsw_item *storage_item = &elinfo->item;
+
+ if (!mask_value)
+ return;
+ if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_U32))
+ return;
+ __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value);
+ __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value);
+ mlxsw_afk_element_usage_add(&values->elusage, element);
+}
+EXPORT_SYMBOL(mlxsw_afk_values_add_u32);
+
+void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ const char *key_value, const char *mask_value,
+ unsigned int len)
+{
+ const struct mlxsw_afk_element_info *elinfo =
+ &mlxsw_afk_element_infos[element];
+ const struct mlxsw_item *storage_item = &elinfo->item;
+
+ if (!memchr_inv(mask_value, 0, len)) /* If mask is zero */
+ return;
+ if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_BUF) ||
+ WARN_ON(elinfo->item.size.bytes != len))
+ return;
+ __mlxsw_item_memcpy_to(values->storage.key, key_value,
+ storage_item, 0);
+ __mlxsw_item_memcpy_to(values->storage.mask, mask_value,
+ storage_item, 0);
+ mlxsw_afk_element_usage_add(&values->elusage, element);
+}
+EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
+
+static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output_indexed)
+{
+ u32 value;
+
+ value = __mlxsw_item_get32(storage, storage_item, 0);
+ __mlxsw_item_set32(output_indexed, output_item, 0, value);
+}
+
+static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output_indexed)
+{
+ char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
+ char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
+ size_t len = output_item->size.bytes;
+
+ memcpy(output_data, storage_data, len);
+}
+
+#define MLXSW_AFK_KEY_BLOCK_SIZE 16
+
+static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+ int block_index, char *storage, char *output)
+{
+ char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
+ const struct mlxsw_item *storage_item = &elinst->info->item;
+ const struct mlxsw_item *output_item = &elinst->item;
+
+ if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
+ mlxsw_afk_encode_u32(storage_item, output_item,
+ storage, output_indexed);
+ else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
+ mlxsw_afk_encode_buf(storage_item, output_item,
+ storage, output_indexed);
+}
+
+void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_values *values,
+ char *key, char *mask)
+{
+ const struct mlxsw_afk_element_inst *elinst;
+ enum mlxsw_afk_element element;
+ int block_index;
+
+ mlxsw_afk_element_usage_for_each(element, &values->elusage) {
+ elinst = mlxsw_afk_key_info_elinst_get(key_info, element,
+ &block_index);
+ if (!elinst)
+ continue;
+ mlxsw_afk_encode_one(elinst, block_index,
+ values->storage.key, key);
+ mlxsw_afk_encode_one(elinst, block_index,
+ values->storage.mask, mask);
+ }
+}
+EXPORT_SYMBOL(mlxsw_afk_encode);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
new file mode 100644
index 000000000000..e4fcba7c2af2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -0,0 +1,238 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H
+#define _MLXSW_CORE_ACL_FLEX_KEYS_H
+
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#include "item.h"
+
+enum mlxsw_afk_element {
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+ MLXSW_AFK_ELEMENT_DMAC,
+ MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ MLXSW_AFK_ELEMENT_SRC_IP4,
+ MLXSW_AFK_ELEMENT_DST_IP4,
+ MLXSW_AFK_ELEMENT_SRC_IP6_HI,
+ MLXSW_AFK_ELEMENT_SRC_IP6_LO,
+ MLXSW_AFK_ELEMENT_DST_IP6_HI,
+ MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_MAX,
+};
+
+enum mlxsw_afk_element_type {
+ MLXSW_AFK_ELEMENT_TYPE_U32,
+ MLXSW_AFK_ELEMENT_TYPE_BUF,
+};
+
+struct mlxsw_afk_element_info {
+ enum mlxsw_afk_element element; /* element ID */
+ enum mlxsw_afk_element_type type;
+ struct mlxsw_item item; /* element geometry in internal storage */
+};
+
+#define MLXSW_AFK_ELEMENT_INFO(_type, _element, _offset, _shift, _size) \
+ [MLXSW_AFK_ELEMENT_##_element] = { \
+ .element = MLXSW_AFK_ELEMENT_##_element, \
+ .type = _type, \
+ .item = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _size}, \
+ .name = #_element, \
+ }, \
+ }
+
+#define MLXSW_AFK_ELEMENT_INFO_U32(_element, _offset, _shift, _size) \
+ MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_U32, \
+ _element, _offset, _shift, _size)
+
+#define MLXSW_AFK_ELEMENT_INFO_BUF(_element, _offset, _size) \
+ MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
+ _element, _offset, 0, _size)
+
+/* For the purpose of the driver, define a internal storage scratchpad
+ * that will be used to store key/mask values. For each defined element type
+ * define an internal storage geometry.
+ */
+static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
+ MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
+};
+
+#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38
+
+struct mlxsw_afk_element_inst { /* element instance in actual block */
+ const struct mlxsw_afk_element_info *info;
+ enum mlxsw_afk_element_type type;
+ struct mlxsw_item item; /* element geometry in block */
+};
+
+#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, _shift, _size) \
+ { \
+ .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \
+ .type = _type, \
+ .item = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _size}, \
+ .name = #_element, \
+ }, \
+ }
+
+#define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size) \
+ MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \
+ _element, _offset, _shift, _size)
+
+#define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size) \
+ MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF, \
+ _element, _offset, 0, _size)
+
+struct mlxsw_afk_block {
+ u16 encoding; /* block ID */
+ struct mlxsw_afk_element_inst *instances;
+ unsigned int instances_count;
+};
+
+#define MLXSW_AFK_BLOCK(_encoding, _instances) \
+ { \
+ .encoding = _encoding, \
+ .instances = _instances, \
+ .instances_count = ARRAY_SIZE(_instances), \
+ }
+
+struct mlxsw_afk_element_usage {
+ DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX);
+};
+
+#define mlxsw_afk_element_usage_for_each(element, elusage) \
+ for_each_set_bit(element, (elusage)->usage, MLXSW_AFK_ELEMENT_MAX)
+
+static inline void
+mlxsw_afk_element_usage_add(struct mlxsw_afk_element_usage *elusage,
+ enum mlxsw_afk_element element)
+{
+ __set_bit(element, elusage->usage);
+}
+
+static inline void
+mlxsw_afk_element_usage_zero(struct mlxsw_afk_element_usage *elusage)
+{
+ bitmap_zero(elusage->usage, MLXSW_AFK_ELEMENT_MAX);
+}
+
+static inline void
+mlxsw_afk_element_usage_fill(struct mlxsw_afk_element_usage *elusage,
+ const enum mlxsw_afk_element *elements,
+ unsigned int elements_count)
+{
+ int i;
+
+ mlxsw_afk_element_usage_zero(elusage);
+ for (i = 0; i < elements_count; i++)
+ mlxsw_afk_element_usage_add(elusage, elements[i]);
+}
+
+static inline bool
+mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
+ struct mlxsw_afk_element_usage *elusage_big)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_AFK_ELEMENT_MAX; i++)
+ if (test_bit(i, elusage_small->usage) &&
+ !test_bit(i, elusage_big->usage))
+ return false;
+ return true;
+}
+
+struct mlxsw_afk;
+
+struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
+ const struct mlxsw_afk_block *blocks,
+ unsigned int blocks_count);
+void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
+
+struct mlxsw_afk_key_info;
+
+struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage);
+void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info);
+bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage);
+
+u16
+mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info,
+ int block_index);
+unsigned int
+mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info);
+
+struct mlxsw_afk_element_values {
+ struct mlxsw_afk_element_usage elusage;
+ struct {
+ char key[MLXSW_AFK_ELEMENT_STORAGE_SIZE];
+ char mask[MLXSW_AFK_ELEMENT_STORAGE_SIZE];
+ } storage;
+};
+
+void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value);
+void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ const char *key_value, const char *mask_value,
+ unsigned int len);
+void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_values *values,
+ char *key, char *mask);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index e50c8db2602a..12c3a4449120 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -338,7 +338,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
return -EIO;
}
- return err > 0 ? 0 : err;
+ return 0;
}
/* Routine executes I2C command. */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 3c95e3ddd9c2..28427f0758c7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/item.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -72,6 +72,40 @@ __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
typesize);
}
+static inline u8 __mlxsw_item_get8(const char *buf,
+ const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8));
+ u8 *b = (u8 *) buf;
+ u8 tmp;
+
+ tmp = b[offset];
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item,
+ unsigned short index, u8 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u8));
+ u8 *b = (u8 *) buf;
+ u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u8 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = b[offset];
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = tmp;
+}
+
static inline u16 __mlxsw_item_get16(const char *buf,
const struct mlxsw_item *item,
unsigned short index)
@@ -191,6 +225,14 @@ static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
memcpy(&buf[offset], src, item->size.bytes);
}
+static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ return &buf[offset];
+}
+
static inline u16
__mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
u16 index, u8 *shift)
@@ -253,6 +295,47 @@ static inline void __mlxsw_item_bit_array_set(char *buf,
* _iname: item name within the container
*/
+#define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+{ \
+ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\
+{ \
+ __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
+{ \
+ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u8 val) \
+{ \
+ __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.offset = _offset, \
@@ -393,6 +476,11 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
{ \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline char * \
+mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
+{ \
+ return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
}
#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
@@ -419,6 +507,12 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
{ \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), index); \
+} \
+static inline char * \
+mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_data(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
}
#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index d147ddd97997..0af3338bfcb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
/* pci_eqe_cmd_token
* Command completion event - token
*/
-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
/* pci_eqe_cmd_status
* Command completion event - status
*/
-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
/* pci_eqe_cmd_out_param_h
* Command completion event - output parameter - higher part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
/* pci_eqe_cmd_out_param_l
* Command completion event - output parameter - lower part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1357fe04391b..0899e2d310e2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,9 +1,9 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -1757,6 +1757,505 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* PPBT - Policy-Engine Port Binding Table
+ * ---------------------------------------
+ * This register is used for configuration of the Port Binding Table.
+ */
+#define MLXSW_REG_PPBT_ID 0x3002
+#define MLXSW_REG_PPBT_LEN 0x14
+
+MLXSW_REG_DEFINE(ppbt, MLXSW_REG_PPBT_ID, MLXSW_REG_PPBT_LEN);
+
+enum mlxsw_reg_pxbt_e {
+ MLXSW_REG_PXBT_E_IACL,
+ MLXSW_REG_PXBT_E_EACL,
+};
+
+/* reg_ppbt_e
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1);
+
+enum mlxsw_reg_pxbt_op {
+ MLXSW_REG_PXBT_OP_BIND,
+ MLXSW_REG_PXBT_OP_UNBIND,
+};
+
+/* reg_ppbt_op
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3);
+
+/* reg_ppbt_local_port
+ * Local port. Not including CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8);
+
+/* reg_ppbt_g
+ * group - When set, the binding is of an ACL group. When cleared,
+ * the binding is of an ACL.
+ * Must be set to 1 for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, g, 0x10, 31, 1);
+
+/* reg_ppbt_acl_info
+ * ACL/ACL group identifier. If the g bit is set, this field should hold
+ * the acl_group_id, else it should hold the acl_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16);
+
+static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e,
+ enum mlxsw_reg_pxbt_op op,
+ u8 local_port, u16 acl_info)
+{
+ MLXSW_REG_ZERO(ppbt, payload);
+ mlxsw_reg_ppbt_e_set(payload, e);
+ mlxsw_reg_ppbt_op_set(payload, op);
+ mlxsw_reg_ppbt_local_port_set(payload, local_port);
+ mlxsw_reg_ppbt_g_set(payload, true);
+ mlxsw_reg_ppbt_acl_info_set(payload, acl_info);
+}
+
+/* PACL - Policy-Engine ACL Register
+ * ---------------------------------
+ * This register is used for configuration of the ACL.
+ */
+#define MLXSW_REG_PACL_ID 0x3004
+#define MLXSW_REG_PACL_LEN 0x70
+
+MLXSW_REG_DEFINE(pacl, MLXSW_REG_PACL_ID, MLXSW_REG_PACL_LEN);
+
+/* reg_pacl_v
+ * Valid. Setting the v bit makes the ACL valid. It should not be cleared
+ * while the ACL is bounded to either a port, VLAN or ACL rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pacl, v, 0x00, 24, 1);
+
+/* reg_pacl_acl_id
+ * An identifier representing the ACL (managed by software)
+ * Range 0 .. cap_max_acl_regions - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pacl, acl_id, 0x08, 0, 16);
+
+#define MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN 16
+
+/* reg_pacl_tcam_region_info
+ * Opaque object that represents a TCAM region.
+ * Obtained through PTAR register.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, pacl, tcam_region_info, 0x30,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id,
+ bool valid, const char *tcam_region_info)
+{
+ MLXSW_REG_ZERO(pacl, payload);
+ mlxsw_reg_pacl_acl_id_set(payload, acl_id);
+ mlxsw_reg_pacl_v_set(payload, valid);
+ mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
+/* PAGT - Policy-Engine ACL Group Table
+ * ------------------------------------
+ * This register is used for configuration of the ACL Group Table.
+ */
+#define MLXSW_REG_PAGT_ID 0x3005
+#define MLXSW_REG_PAGT_BASE_LEN 0x30
+#define MLXSW_REG_PAGT_ACL_LEN 4
+#define MLXSW_REG_PAGT_ACL_MAX_NUM 16
+#define MLXSW_REG_PAGT_LEN (MLXSW_REG_PAGT_BASE_LEN + \
+ MLXSW_REG_PAGT_ACL_MAX_NUM * MLXSW_REG_PAGT_ACL_LEN)
+
+MLXSW_REG_DEFINE(pagt, MLXSW_REG_PAGT_ID, MLXSW_REG_PAGT_LEN);
+
+/* reg_pagt_size
+ * Number of ACLs in the group.
+ * Size 0 invalidates a group.
+ * Range 0 .. cap_max_acl_group_size (hard coded to 16 for now)
+ * Total number of ACLs in all groups must be lower or equal
+ * to cap_max_acl_tot_groups
+ * Note: a group which is binded must not be invalidated
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8);
+
+/* reg_pagt_acl_group_id
+ * An identifier (numbered from 0..cap_max_acl_groups-1) representing
+ * the ACL Group identifier (managed by software).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16);
+
+/* reg_pagt_acl_id
+ * ACL identifier
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pagt, acl_id, 0x30, 0, 16, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id)
+{
+ MLXSW_REG_ZERO(pagt, payload);
+ mlxsw_reg_pagt_acl_group_id_set(payload, acl_group_id);
+}
+
+static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index,
+ u16 acl_id)
+{
+ u8 size = mlxsw_reg_pagt_size_get(payload);
+
+ if (index >= size)
+ mlxsw_reg_pagt_size_set(payload, index + 1);
+ mlxsw_reg_pagt_acl_id_set(payload, index, acl_id);
+}
+
+/* PTAR - Policy-Engine TCAM Allocation Register
+ * ---------------------------------------------
+ * This register is used for allocation of regions in the TCAM.
+ * Note: Query method is not supported on this register.
+ */
+#define MLXSW_REG_PTAR_ID 0x3006
+#define MLXSW_REG_PTAR_BASE_LEN 0x20
+#define MLXSW_REG_PTAR_KEY_ID_LEN 1
+#define MLXSW_REG_PTAR_KEY_ID_MAX_NUM 16
+#define MLXSW_REG_PTAR_LEN (MLXSW_REG_PTAR_BASE_LEN + \
+ MLXSW_REG_PTAR_KEY_ID_MAX_NUM * MLXSW_REG_PTAR_KEY_ID_LEN)
+
+MLXSW_REG_DEFINE(ptar, MLXSW_REG_PTAR_ID, MLXSW_REG_PTAR_LEN);
+
+enum mlxsw_reg_ptar_op {
+ /* allocate a TCAM region */
+ MLXSW_REG_PTAR_OP_ALLOC,
+ /* resize a TCAM region */
+ MLXSW_REG_PTAR_OP_RESIZE,
+ /* deallocate TCAM region */
+ MLXSW_REG_PTAR_OP_FREE,
+ /* test allocation */
+ MLXSW_REG_PTAR_OP_TEST,
+};
+
+/* reg_ptar_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
+
+/* reg_ptar_action_set_type
+ * Type of action set to be used on this region.
+ * For Spectrum, this is always type 2 - "flexible"
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
+
+/* reg_ptar_key_type
+ * TCAM key type for the region.
+ * For Spectrum, this is always type 0x50 - "FLEX_KEY"
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
+
+/* reg_ptar_region_size
+ * TCAM region size. When allocating/resizing this is the requested size,
+ * the response is the actual size. Note that actual size may be
+ * larger than requested.
+ * Allowed range 1 .. cap_max_rules-1
+ * Reserved during op deallocate.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, region_size, 0x04, 0, 16);
+
+/* reg_ptar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptar, region_id, 0x08, 0, 16);
+
+/* reg_ptar_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Returned when allocating a region.
+ * Provided by software for ACL generation and region deallocation and resize.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptar, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptar_flexible_key_id
+ * Identifier of the Flexible Key.
+ * Only valid if key_type == "FLEX_KEY"
+ * The key size will be rounded up to one of the following values:
+ * 9B, 18B, 36B, 54B.
+ * This field is reserved for in resize operation.
+ * Access: WO
+ */
+MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
+ MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
+
+static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+ u16 region_size, u16 region_id,
+ const char *tcam_region_info)
+{
+ MLXSW_REG_ZERO(ptar, payload);
+ mlxsw_reg_ptar_op_set(payload, op);
+ mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
+ mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */
+ mlxsw_reg_ptar_region_size_set(payload, region_size);
+ mlxsw_reg_ptar_region_id_set(payload, region_id);
+ mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
+static inline void mlxsw_reg_ptar_key_id_pack(char *payload, int index,
+ u16 key_id)
+{
+ mlxsw_reg_ptar_flexible_key_id_set(payload, index, key_id);
+}
+
+static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info)
+{
+ mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info);
+}
+
+/* PPBS - Policy-Engine Policy Based Switching Register
+ * ----------------------------------------------------
+ * This register retrieves and sets Policy Based Switching Table entries.
+ */
+#define MLXSW_REG_PPBS_ID 0x300C
+#define MLXSW_REG_PPBS_LEN 0x14
+
+MLXSW_REG_DEFINE(ppbs, MLXSW_REG_PPBS_ID, MLXSW_REG_PPBS_LEN);
+
+/* reg_ppbs_pbs_ptr
+ * Index into the PBS table.
+ * For Spectrum, the index points to the KVD Linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbs, pbs_ptr, 0x08, 0, 24);
+
+/* reg_ppbs_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbs, system_port, 0x10, 0, 16);
+
+static inline void mlxsw_reg_ppbs_pack(char *payload, u32 pbs_ptr,
+ u16 system_port)
+{
+ MLXSW_REG_ZERO(ppbs, payload);
+ mlxsw_reg_ppbs_pbs_ptr_set(payload, pbs_ptr);
+ mlxsw_reg_ppbs_system_port_set(payload, system_port);
+}
+
+/* PRCR - Policy-Engine Rules Copy Register
+ * ----------------------------------------
+ * This register is used for accessing rules within a TCAM region.
+ */
+#define MLXSW_REG_PRCR_ID 0x300D
+#define MLXSW_REG_PRCR_LEN 0x40
+
+MLXSW_REG_DEFINE(prcr, MLXSW_REG_PRCR_ID, MLXSW_REG_PRCR_LEN);
+
+enum mlxsw_reg_prcr_op {
+ /* Move rules. Moves the rules from "tcam_region_info" starting
+ * at offset "offset" to "dest_tcam_region_info"
+ * at offset "dest_offset."
+ */
+ MLXSW_REG_PRCR_OP_MOVE,
+ /* Copy rules. Copies the rules from "tcam_region_info" starting
+ * at offset "offset" to "dest_tcam_region_info"
+ * at offset "dest_offset."
+ */
+ MLXSW_REG_PRCR_OP_COPY,
+};
+
+/* reg_prcr_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, prcr, op, 0x00, 28, 4);
+
+/* reg_prcr_offset
+ * Offset within the source region to copy/move from.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, prcr, offset, 0x00, 0, 16);
+
+/* reg_prcr_size
+ * The number of rules to copy/move.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, prcr, size, 0x04, 0, 16);
+
+/* reg_prcr_tcam_region_info
+ * Opaque object that represents the source TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, prcr, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_prcr_dest_offset
+ * Offset within the source region to copy/move to.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, prcr, dest_offset, 0x20, 0, 16);
+
+/* reg_prcr_dest_tcam_region_info
+ * Opaque object that represents the destination TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, prcr, dest_tcam_region_info, 0x30,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op,
+ const char *src_tcam_region_info,
+ u16 src_offset,
+ const char *dest_tcam_region_info,
+ u16 dest_offset, u16 size)
+{
+ MLXSW_REG_ZERO(prcr, payload);
+ mlxsw_reg_prcr_op_set(payload, op);
+ mlxsw_reg_prcr_offset_set(payload, src_offset);
+ mlxsw_reg_prcr_size_set(payload, size);
+ mlxsw_reg_prcr_tcam_region_info_memcpy_to(payload,
+ src_tcam_region_info);
+ mlxsw_reg_prcr_dest_offset_set(payload, dest_offset);
+ mlxsw_reg_prcr_dest_tcam_region_info_memcpy_to(payload,
+ dest_tcam_region_info);
+}
+
+/* PEFA - Policy-Engine Extended Flexible Action Register
+ * ------------------------------------------------------
+ * This register is used for accessing an extended flexible action entry
+ * in the central KVD Linear Database.
+ */
+#define MLXSW_REG_PEFA_ID 0x300F
+#define MLXSW_REG_PEFA_LEN 0xB0
+
+MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
+
+/* reg_pefa_index
+ * Index in the KVD Linear Centralized Database.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
+
+#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8
+
+/* reg_pefa_flex_action_set
+ * Action-set to perform when rule is matched.
+ * Must be zero padded if action set is shorter.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08,
+ MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+
+static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
+ const char *flex_action_set)
+{
+ MLXSW_REG_ZERO(pefa, payload);
+ mlxsw_reg_pefa_index_set(payload, index);
+ mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set);
+}
+
+/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
+ * -----------------------------------------------------
+ * This register is used for accessing rules within a TCAM region.
+ * It is a new version of PTCE in order to support wider key,
+ * mask and action within a TCAM region. This register is not supported
+ * by SwitchX and SwitchX-2.
+ */
+#define MLXSW_REG_PTCE2_ID 0x3017
+#define MLXSW_REG_PTCE2_LEN 0x1D8
+
+MLXSW_REG_DEFINE(ptce2, MLXSW_REG_PTCE2_ID, MLXSW_REG_PTCE2_LEN);
+
+/* reg_ptce2_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, v, 0x00, 31, 1);
+
+/* reg_ptce2_a
+ * Activity. Set if a packet lookup has hit on the specific entry.
+ * To clear the "a" bit, use "clear activity" op or "clear on read" op.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptce2, a, 0x00, 30, 1);
+
+enum mlxsw_reg_ptce2_op {
+ /* Read operation. */
+ MLXSW_REG_PTCE2_OP_QUERY_READ = 0,
+ /* clear on read operation. Used to read entry
+ * and clear Activity bit.
+ */
+ MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ = 1,
+ /* Write operation. Used to write a new entry to the table.
+ * All R/W fields are relevant for new entry. Activity bit is set
+ * for new entries - Note write with v = 0 will delete the entry.
+ */
+ MLXSW_REG_PTCE2_OP_WRITE_WRITE = 0,
+ /* Update action. Only action set will be updated. */
+ MLXSW_REG_PTCE2_OP_WRITE_UPDATE = 1,
+ /* Clear activity. A bit is cleared for the entry. */
+ MLXSW_REG_PTCE2_OP_WRITE_CLEAR_ACTIVITY = 2,
+};
+
+/* reg_ptce2_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
+
+/* reg_ptce2_offset
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
+
+/* reg_ptce2_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96
+
+/* reg_ptce2_flex_key_blocks
+ * ACL Key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
+ MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce2_mask
+ * mask- in the same size as key. A bit that is set directs the TCAM
+ * to compare the corresponding bit in key. A bit that is clear directs
+ * the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
+ MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce2_flex_action_set
+ * ACL action set.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
+ MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+
+static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
+ enum mlxsw_reg_ptce2_op op,
+ const char *tcam_region_info,
+ u16 offset)
+{
+ MLXSW_REG_ZERO(ptce2, payload);
+ mlxsw_reg_ptce2_v_set(payload, valid);
+ mlxsw_reg_ptce2_op_set(payload, op);
+ mlxsw_reg_ptce2_offset_set(payload, offset);
+ mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
/* QPCR - QoS Policer Configuration Register
* -----------------------------------------
* The QPCR register is used to create policers - that limit
@@ -3154,7 +3653,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
* Configures the properties for forwarding to CPU.
*/
#define MLXSW_REG_HTGT_ID 0x7002
-#define MLXSW_REG_HTGT_LEN 0x100
+#define MLXSW_REG_HTGT_LEN 0x20
MLXSW_REG_DEFINE(htgt, MLXSW_REG_HTGT_ID, MLXSW_REG_HTGT_LEN);
@@ -4965,6 +5464,46 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MPSC - Monitoring Packet Sampling Configuration Register
+ * --------------------------------------------------------
+ * MPSC Register is used to configure the Packet Sampling mechanism.
+ */
+#define MLXSW_REG_MPSC_ID 0x9080
+#define MLXSW_REG_MPSC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN);
+
+/* reg_mpsc_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8);
+
+/* reg_mpsc_e
+ * Enable sampling on port local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
+
+#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL
+
+/* reg_mpsc_rate
+ * Sampling rate = 1 out of rate packets (with randomization around
+ * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32);
+
+static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
+ u32 rate)
+{
+ MLXSW_REG_ZERO(mpsc, payload);
+ mlxsw_reg_mpsc_local_port_set(payload, local_port);
+ mlxsw_reg_mpsc_e_set(payload, e);
+ mlxsw_reg_mpsc_rate_set(payload, rate);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5394,6 +5933,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(svpe),
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
+ MLXSW_REG(ppbt),
+ MLXSW_REG(pacl),
+ MLXSW_REG(pagt),
+ MLXSW_REG(ptar),
+ MLXSW_REG(ppbs),
+ MLXSW_REG(prcr),
+ MLXSW_REG(pefa),
+ MLXSW_REG(ptce2),
MLXSW_REG(qpcr),
MLXSW_REG(qtct),
MLXSW_REG(qeec),
@@ -5429,6 +5976,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mpat),
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
+ MLXSW_REG(mpsc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 3c2171dbdba4..bce8c2e00630 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/resources.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -48,6 +48,14 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_MAX_BUFFER_SIZE,
+ MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
+ MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
+ MLXSW_RES_ID_ACL_MAX_REGIONS,
+ MLXSW_RES_ID_ACL_MAX_GROUPS,
+ MLXSW_RES_ID_ACL_MAX_GROUP_SIZE,
+ MLXSW_RES_ID_ACL_FLEX_KEYS,
+ MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
+ MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
@@ -72,6 +80,14 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
+ [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
+ [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
+ [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
+ [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904,
+ [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905,
+ [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
+ [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
+ [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d768c7b6c6d6..16484f24b7db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
*
@@ -57,6 +57,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h>
+#include <net/tc_act/tc_sample.h>
#include "spectrum.h"
#include "pci.h"
@@ -137,8 +138,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev);
-
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
@@ -469,6 +468,16 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
}
+static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable, u32 rate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char mpsc_pl[MLXSW_REG_MPSC_LEN];
+
+ mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
+}
+
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up)
{
@@ -684,6 +693,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
if (eth_skb_pad(skb)) {
@@ -947,15 +957,13 @@ out:
/* Return the stats from a cache that is updated periodically,
* as this function might get called in an atomic context.
*/
-static struct rtnl_link_stats64 *
+static void
mlxsw_sp_port_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
-
- return stats;
}
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
@@ -1163,8 +1171,8 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
}
static struct mlxsw_sp_port_mall_tc_entry *
-mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
- unsigned long cookie) {
+mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
+ unsigned long cookie) {
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
@@ -1176,17 +1184,15 @@ mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *cls,
+ struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
const struct tc_action *a,
bool ingress)
{
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
struct net *net = dev_net(mlxsw_sp_port->dev);
enum mlxsw_sp_span_type span_type;
struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
int ifindex;
- int err;
ifindex = tcf_mirred_ifindex(a);
to_dev = __dev_get_by_index(net, ifindex);
@@ -1197,90 +1203,149 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
if (!mlxsw_sp_port_dev_check(to_dev)) {
netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
to_port = netdev_priv(to_dev);
- mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
- if (!mall_tc_entry)
- return -ENOMEM;
+ mirror->to_local_port = to_port->local_port;
+ mirror->ingress = ingress;
+ span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+}
- mall_tc_entry->cookie = cls->cookie;
- mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
- mall_tc_entry->mirror.to_local_port = to_port->local_port;
- mall_tc_entry->mirror.ingress = ingress;
- list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
+static void
+mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_sp_span_type span_type;
+ struct mlxsw_sp_port *to_port;
- span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+ to_port = mlxsw_sp->ports[mirror->to_local_port];
+ span_type = mirror->ingress ?
+ MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
+}
+
+static int
+mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_cls_matchall_offload *cls,
+ const struct tc_action *a,
+ bool ingress)
+{
+ int err;
+
+ if (!mlxsw_sp_port->sample)
+ return -EOPNOTSUPP;
+ if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
+ netdev_err(mlxsw_sp_port->dev, "sample already active\n");
+ return -EEXIST;
+ }
+ if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
+ netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
+ tcf_sample_psample_group(a));
+ mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
+ mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
+ mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
+
+ err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
if (err)
- goto err_mirror_add;
+ goto err_port_sample_set;
return 0;
-err_mirror_add:
- list_del(&mall_tc_entry->list);
- kfree(mall_tc_entry);
+err_port_sample_set:
+ RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
return err;
}
+static void
+mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ if (!mlxsw_sp_port->sample)
+ return;
+
+ mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
+ RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
+}
+
static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
__be16 protocol,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
+ struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
const struct tc_action *a;
LIST_HEAD(actions);
int err;
if (!tc_single_action(cls->exts)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- tcf_exts_to_list(cls->exts, &actions);
- list_for_each_entry(a, &actions, list) {
- if (!is_tcf_mirred_egress_mirror(a) ||
- protocol != htons(ETH_P_ALL)) {
- return -ENOTSUPP;
- }
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
+ mall_tc_entry->cookie = cls->cookie;
- err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls,
+ tcf_exts_to_list(cls->exts, &actions);
+ a = list_first_entry(&actions, struct tc_action, list);
+
+ if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
+ struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
+
+ mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
+ mirror = &mall_tc_entry->mirror;
+ err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
+ mirror, a, ingress);
+ } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
+ mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
+ err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
a, ingress);
- if (err)
- return err;
+ } else {
+ err = -EOPNOTSUPP;
}
+ if (err)
+ goto err_add_action;
+
+ list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
return 0;
+
+err_add_action:
+ kfree(mall_tc_entry);
+ return err;
}
static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
- enum mlxsw_sp_span_type span_type;
- struct mlxsw_sp_port *to_port;
- mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port,
- cls->cookie);
+ mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
+ cls->cookie);
if (!mall_tc_entry) {
netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
return;
}
+ list_del(&mall_tc_entry->list);
switch (mall_tc_entry->type) {
case MLXSW_SP_PORT_MALL_MIRROR:
- to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port];
- span_type = mall_tc_entry->mirror.ingress ?
- MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
-
- mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
+ mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
+ &mall_tc_entry->mirror);
+ break;
+ case MLXSW_SP_PORT_MALL_SAMPLE:
+ mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
break;
default:
WARN_ON(1);
}
- list_del(&mall_tc_entry->list);
kfree(mall_tc_entry);
}
@@ -1290,7 +1355,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
- if (tc->type == TC_SETUP_MATCHALL) {
+ switch (tc->type) {
+ case TC_SETUP_MATCHALL:
switch (tc->cls_mall->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
@@ -1304,9 +1370,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
default:
return -EINVAL;
}
+ case TC_SETUP_CLSFLOWER:
+ switch (tc->cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
+ proto, tc->cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
+ tc->cls_flower);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1322,8 +1400,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
- .ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
- .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy,
.ndo_fdb_add = switchdev_port_fdb_add,
.ndo_fdb_del = switchdev_port_fdb_del,
.ndo_fdb_dump = switchdev_port_fdb_dump,
@@ -1649,7 +1725,7 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
break;
default:
WARN_ON(1);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -2255,6 +2331,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
+ mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->sample) {
+ err = -ENOMEM;
+ goto err_alloc_sample;
+ }
+
mlxsw_sp_port->hw_stats.cache =
kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
@@ -2383,6 +2466,8 @@ err_dev_addr_init:
err_port_swid_set:
kfree(mlxsw_sp_port->hw_stats.cache);
err_alloc_hw_stats:
+ kfree(mlxsw_sp_port->sample);
+err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2428,8 +2513,9 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
- free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->hw_stats.cache);
+ kfree(mlxsw_sp_port->sample);
+ free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
@@ -2730,6 +2816,41 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
+static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct psample_group *psample_group;
+ u32 size;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
+ local_port);
+ goto out;
+ }
+ if (unlikely(!mlxsw_sp_port->sample)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
+ local_port);
+ goto out;
+ }
+
+ size = mlxsw_sp_port->sample->truncate ?
+ mlxsw_sp_port->sample->trunc_size : skb->len;
+
+ rcu_read_lock();
+ psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
+ if (!psample_group)
+ goto out_unlock;
+ psample_sample_packet(psample_group, skb, size,
+ mlxsw_sp_port->dev->ifindex, 0,
+ mlxsw_sp_port->sample->rate);
+out_unlock:
+ rcu_read_unlock();
+out:
+ consume_skb(skb);
+}
+
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
@@ -2765,6 +2886,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
+ /* PKT Sample trap */
+ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
+ false, SP_IP2ME, DISCARD)
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -2949,10 +3073,16 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
else
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
- if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+ switch (type) {
+ case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
flood_table = MLXSW_SP_FLOOD_TABLE_UC;
- else
- flood_table = MLXSW_SP_FLOOD_TABLE_BM;
+ break;
+ case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
+ flood_table = MLXSW_SP_FLOOD_TABLE_MC;
+ break;
+ default:
+ flood_table = MLXSW_SP_FLOOD_TABLE_BC;
+ }
mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
flood_table);
@@ -3088,6 +3218,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_span_init;
}
+ err = mlxsw_sp_acl_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
+ goto err_acl_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3097,6 +3233,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ mlxsw_sp_acl_fini(mlxsw_sp);
+err_acl_init:
mlxsw_sp_span_fini(mlxsw_sp);
err_span_init:
mlxsw_sp_router_fini(mlxsw_sp);
@@ -3117,6 +3255,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -3137,9 +3276,9 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
- .max_fid_offset_flood_tables = 2,
+ .max_fid_offset_flood_tables = 3,
.fid_offset_flood_table_size = VLAN_N_VID - 1,
- .max_fid_flood_tables = 2,
+ .max_fid_flood_tables = 3,
.fid_flood_table_size = MLXSW_SP_VFID_MAX,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
@@ -3182,7 +3321,7 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.profile = &mlxsw_sp_config_profile,
};
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+bool mlxsw_sp_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
}
@@ -3340,6 +3479,8 @@ mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
if (!r)
return NULL;
+ INIT_LIST_HEAD(&r->nexthop_list);
+ INIT_LIST_HEAD(&r->neigh_list);
ether_addr_copy(r->addr, l3_dev->dev_addr);
r->mtu = l3_dev->mtu;
r->ref_count = 1;
@@ -3408,6 +3549,8 @@ static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
u16 fid = f->fid;
u16 rif = r->rif;
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
+
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
@@ -3552,7 +3695,7 @@ static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
table_type = mlxsw_sp_flood_table_type_get(fid);
index = mlxsw_sp_flood_table_index_get(fid);
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
1, MLXSW_PORT_ROUTER_PORT, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
@@ -3637,6 +3780,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *f = r->f;
u16 rif = r->rif;
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
+
mlxsw_sp->rifs[rif] = NULL;
f->r = NULL;
@@ -3926,6 +4071,9 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
+ mlxsw_sp_port->mc_flood = 1;
+ mlxsw_sp_port->mc_router = 0;
+ mlxsw_sp_port->mc_disabled = 1;
mlxsw_sp_port->bridged = 1;
return 0;
@@ -3942,6 +4090,8 @@ static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
+ mlxsw_sp_port->mc_flood = 0;
+ mlxsw_sp_port->mc_router = 0;
mlxsw_sp_port->bridged = 0;
/* Add implicit VLAN interface in the device, so that untagged
@@ -4604,6 +4754,9 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
mlxsw_sp_vport->learning = 1;
mlxsw_sp_vport->learning_sync = 1;
mlxsw_sp_vport->uc_flood = 1;
+ mlxsw_sp_vport->mc_flood = 1;
+ mlxsw_sp_vport->mc_router = 0;
+ mlxsw_sp_vport->mc_disabled = 1;
mlxsw_sp_vport->bridged = 1;
return 0;
@@ -4624,6 +4777,8 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
mlxsw_sp_vport->learning = 0;
mlxsw_sp_vport->learning_sync = 0;
mlxsw_sp_vport->uc_flood = 0;
+ mlxsw_sp_vport->mc_flood = 0;
+ mlxsw_sp_vport->mc_router = 0;
mlxsw_sp_vport->bridged = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index cc1af19d699a..13ec85e7c392 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
*
@@ -46,12 +46,16 @@
#include <linux/dcbnl.h>
#include <linux/in6.h>
#include <linux/notifier.h>
+#include <net/psample.h>
+#include <net/pkt_cls.h>
#include "port.h"
#include "core.h"
+#include "core_acl_flex_keys.h"
+#include "core_acl_flex_actions.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
+#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
#define MLXSW_SP_INVALID_RIF 0xffff
@@ -104,6 +108,8 @@ struct mlxsw_sp_fid {
};
struct mlxsw_sp_rif {
+ struct list_head nexthop_list;
+ struct list_head neigh_list;
struct net_device *dev;
unsigned int ref_count;
struct mlxsw_sp_fid *f;
@@ -229,6 +235,7 @@ struct mlxsw_sp_span_entry {
enum mlxsw_sp_port_mall_action_type {
MLXSW_SP_PORT_MALL_MIRROR,
+ MLXSW_SP_PORT_MALL_SAMPLE,
};
struct mlxsw_sp_port_mall_mirror_tc_entry {
@@ -249,17 +256,20 @@ struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable nexthop_ht;
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
} neighs_update;
struct delayed_work nexthop_probe_dw;
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
- struct list_head nexthop_group_list;
struct list_head nexthop_neighs_list;
bool aborted;
};
+struct mlxsw_sp_acl;
+
struct mlxsw_sp {
struct {
struct list_head list;
@@ -289,6 +299,7 @@ struct mlxsw_sp {
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
+ struct mlxsw_sp_acl *acl;
struct {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
@@ -315,15 +326,25 @@ struct mlxsw_sp_port_pcpu_stats {
u32 tx_dropped;
};
+struct mlxsw_sp_port_sample {
+ struct psample_group __rcu *psample_group;
+ u32 trunc_size;
+ u32 rate;
+ bool truncate;
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
u8 local_port;
u8 stp_state;
- u8 learning:1,
+ u16 learning:1,
learning_sync:1,
uc_flood:1,
+ mc_flood:1,
+ mc_router:1,
+ mc_disabled:1,
bridged:1,
lagged:1,
split:1;
@@ -361,8 +382,10 @@ struct mlxsw_sp_port {
struct rtnl_link_stats64 *cache;
struct delayed_work update_dw;
} hw_stats;
+ struct mlxsw_sp_port_sample *sample;
};
+bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -489,7 +512,8 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
- MLXSW_SP_FLOOD_TABLE_BM,
+ MLXSW_SP_FLOOD_TABLE_BC,
+ MLXSW_SP_FLOOD_TABLE_MC,
};
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
@@ -582,14 +606,107 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_neigh_construct(struct net_device *dev,
- struct neighbour *n);
-void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
- struct neighbour *n);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr);
+void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+
+struct mlxsw_sp_acl_rule_info {
+ unsigned int priority;
+ struct mlxsw_afk_element_values values;
+ struct mlxsw_afa_block *act_block;
+};
+
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+};
+
+struct mlxsw_sp_acl_profile_ops {
+ size_t ruleset_priv_size;
+ int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+ void *priv, void *ruleset_priv);
+ void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct net_device *dev, bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ size_t rule_priv_size;
+ int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+};
+
+struct mlxsw_sp_acl_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ const struct mlxsw_sp_acl_profile_ops *
+ (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile);
+};
+
+struct mlxsw_sp_acl_ruleset;
+
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev, bool ingress,
+ enum mlxsw_sp_acl_profile profile);
+void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset);
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
+ unsigned int priority);
+void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value);
+void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ const char *key_value,
+ const char *mask_value, unsigned int len);
+void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id);
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct net_device *out_dev);
+
+struct mlxsw_sp_acl_rule;
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie);
+void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule);
+void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule);
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie);
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
+
+int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
+
+extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+
+int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ __be16 protocol, struct tc_cls_flower_offload *f);
+void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
new file mode 100644
index 000000000000..8a18b3aa70dc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -0,0 +1,572 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+
+#include "reg.h"
+#include "core.h"
+#include "resources.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_acl_flex_keys.h"
+
+struct mlxsw_sp_acl {
+ struct mlxsw_afk *afk;
+ struct mlxsw_afa *afa;
+ const struct mlxsw_sp_acl_ops *ops;
+ struct rhashtable ruleset_ht;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
+{
+ return acl->afk;
+}
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+ struct net_device *dev; /* dev this ruleset is bound to */
+ bool ingress;
+ const struct mlxsw_sp_acl_profile_ops *ops;
+};
+
+struct mlxsw_sp_acl_ruleset {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct mlxsw_sp_acl_ruleset_ht_key ht_key;
+ struct rhashtable rule_ht;
+ unsigned int ref_count;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_rule {
+ struct rhash_head ht_node; /* Member of rule HT */
+ unsigned long cookie; /* HT key */
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
+ .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
+ .automatic_shrinking = true,
+};
+
+static struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_acl_profile_ops *ops)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
+ ruleset = kzalloc(alloc_size, GFP_KERNEL);
+ if (!ruleset)
+ return ERR_PTR(-ENOMEM);
+ ruleset->ref_count = 1;
+ ruleset->ht_key.ops = ops;
+
+ err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
+ if (err)
+ goto err_ops_ruleset_add;
+
+ return ruleset;
+
+err_ops_ruleset_add:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
+}
+
+static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct net_device *dev, bool ingress)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+ int err;
+
+ ruleset->ht_key.dev = dev;
+ ruleset->ht_key.ingress = ingress;
+ err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ mlxsw_sp_acl_ruleset_ht_params);
+ if (err)
+ return err;
+ err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+ if (err)
+ goto err_ops_ruleset_bind;
+ return 0;
+
+err_ops_ruleset_bind:
+ rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ mlxsw_sp_acl_ruleset_ht_params);
+ return err;
+}
+
+static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+ ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+ rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ mlxsw_sp_acl_ruleset_ht_params);
+}
+
+static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ ruleset->ref_count++;
+}
+
+static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ if (--ruleset->ref_count)
+ return;
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
+ mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
+}
+
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev, bool ingress,
+ enum mlxsw_sp_acl_profile profile)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops;
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+ struct mlxsw_sp_acl_ruleset_ht_key ht_key;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ int err;
+
+ ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
+ memset(&ht_key, 0, sizeof(ht_key));
+ ht_key.dev = dev;
+ ht_key.ingress = ingress;
+ ht_key.ops = ops;
+ ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
+ mlxsw_sp_acl_ruleset_ht_params);
+ if (ruleset) {
+ mlxsw_sp_acl_ruleset_ref_inc(ruleset);
+ return ruleset;
+ }
+ ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
+ if (IS_ERR(ruleset))
+ return ruleset;
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
+ if (err)
+ goto err_ruleset_bind;
+ return ruleset;
+
+err_ruleset_bind:
+ mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+}
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
+ if (!rulei)
+ return NULL;
+ rulei->act_block = mlxsw_afa_block_create(acl->afa);
+ if (IS_ERR(rulei->act_block)) {
+ err = PTR_ERR(rulei->act_block);
+ goto err_afa_block_create;
+ }
+ return rulei;
+
+err_afa_block_create:
+ kfree(rulei);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ mlxsw_afa_block_destroy(rulei->act_block);
+ kfree(rulei);
+}
+
+int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_commit(rulei->act_block);
+}
+
+void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
+ unsigned int priority)
+{
+ rulei->priority = priority;
+}
+
+void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value)
+{
+ mlxsw_afk_values_add_u32(&rulei->values, element,
+ key_value, mask_value);
+}
+
+void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ const char *key_value,
+ const char *mask_value, unsigned int len)
+{
+ mlxsw_afk_values_add_buf(&rulei->values, element,
+ key_value, mask_value, len);
+}
+
+void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ mlxsw_afa_block_continue(rulei->act_block);
+}
+
+void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id)
+{
+ mlxsw_afa_block_jump(rulei->act_block, group_id);
+}
+
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_append_drop(rulei->act_block);
+}
+
+int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct net_device *out_dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u8 local_port;
+ bool in_port;
+
+ if (out_dev) {
+ if (!mlxsw_sp_port_dev_check(out_dev))
+ return -EINVAL;
+ mlxsw_sp_port = netdev_priv(out_dev);
+ if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
+ return -EINVAL;
+ local_port = mlxsw_sp_port->local_port;
+ in_port = false;
+ } else {
+ /* If out_dev is NULL, the called wants to
+ * set forward to ingress port.
+ */
+ local_port = 0;
+ in_port = true;
+ }
+ return mlxsw_afa_block_append_fwd(rulei->act_block,
+ local_port, in_port);
+}
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ mlxsw_sp_acl_ruleset_ref_inc(ruleset);
+ rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ rule->cookie = cookie;
+ rule->ruleset = ruleset;
+
+ rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ if (IS_ERR(rule->rulei)) {
+ err = PTR_ERR(rule->rulei);
+ goto err_rulei_create;
+ }
+ return rule;
+
+err_rulei_create:
+ kfree(rule);
+err_alloc:
+ mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+
+ mlxsw_sp_acl_rulei_destroy(rule->rulei);
+ kfree(rule);
+ mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+}
+
+int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ int err;
+
+ err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
+ mlxsw_sp_acl_rule_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ ops->rule_del(mlxsw_sp, rule->priv);
+ return err;
+}
+
+void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ mlxsw_sp_acl_rule_ht_params);
+ ops->rule_del(mlxsw_sp, rule->priv);
+}
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie)
+{
+ return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
+ mlxsw_sp_acl_rule_ht_params);
+}
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
+{
+ return rule->rulei;
+}
+
+#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
+
+static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ u32 kvdl_index;
+ int ret;
+ int err;
+
+ /* The first action set of a TCAM entry is stored directly in TCAM,
+ * not KVD linear area.
+ */
+ if (is_first)
+ return 0;
+
+ ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE);
+ if (ret < 0)
+ return ret;
+ kvdl_index = ret;
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_pefa_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
+ bool is_first)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ if (is_first)
+ return;
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
+ u8 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char ppbs_pl[MLXSW_REG_PPBS_LEN];
+ u32 kvdl_index;
+ int ret;
+ int err;
+
+ ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1);
+ if (ret < 0)
+ return ret;
+ kvdl_index = ret;
+ mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
+ if (err)
+ goto err_ppbs_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_ppbs_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+};
+
+int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
+ struct mlxsw_sp_acl *acl;
+ int err;
+
+ acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
+ if (!acl)
+ return -ENOMEM;
+ mlxsw_sp->acl = acl;
+
+ acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_FLEX_KEYS),
+ mlxsw_sp_afk_blocks,
+ MLXSW_SP_AFK_BLOCKS_COUNT);
+ if (!acl->afk) {
+ err = -ENOMEM;
+ goto err_afk_create;
+ }
+
+ acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_ACTIONS_PER_SET),
+ &mlxsw_sp_act_afa_ops, mlxsw_sp);
+ if (IS_ERR(acl->afa)) {
+ err = PTR_ERR(acl->afa);
+ goto err_afa_create;
+ }
+
+ err = rhashtable_init(&acl->ruleset_ht,
+ &mlxsw_sp_acl_ruleset_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = acl_ops->init(mlxsw_sp, acl->priv);
+ if (err)
+ goto err_acl_ops_init;
+
+ acl->ops = acl_ops;
+ return 0;
+
+err_acl_ops_init:
+ rhashtable_destroy(&acl->ruleset_ht);
+err_rhashtable_init:
+ mlxsw_afa_destroy(acl->afa);
+err_afa_create:
+ mlxsw_afk_destroy(acl->afk);
+err_afk_create:
+ kfree(acl);
+ return err;
+}
+
+void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+ const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
+
+ acl_ops->fini(mlxsw_sp, acl->priv);
+ rhashtable_destroy(&acl->ruleset_ht);
+ mlxsw_afa_destroy(acl->afa);
+ mlxsw_afk_destroy(acl->afk);
+ kfree(acl);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
new file mode 100644
index 000000000000..82b81cf7f4a7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -0,0 +1,109 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+
+#include "core_acl_flex_keys.h"
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
+ MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
+ MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
+ MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
+ MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
+ MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
+ MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
+ MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
+};
+
+#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks)
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
new file mode 100644
index 000000000000..7382832215fa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -0,0 +1,1084 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "resources.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+ unsigned long *used_regions; /* bit array */
+ unsigned int max_regions;
+ unsigned long *used_groups; /* bit array */
+ unsigned int max_groups;
+ unsigned int max_group_size;
+};
+
+static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp_acl_tcam *tcam = priv;
+ u64 max_tcam_regions;
+ u64 max_regions;
+ u64 max_groups;
+ size_t alloc_size;
+ int err;
+
+ max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_TCAM_REGIONS);
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+
+ /* Use 1:1 mapping between ACL region and TCAM region */
+ if (max_tcam_regions < max_regions)
+ max_regions = max_tcam_regions;
+
+ alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
+ tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
+ if (!tcam->used_regions)
+ return -ENOMEM;
+ tcam->max_regions = max_regions;
+
+ max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+ alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
+ tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
+ if (!tcam->used_groups) {
+ err = -ENOMEM;
+ goto err_alloc_used_groups;
+ }
+ tcam->max_groups = max_groups;
+ tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_GROUP_SIZE);
+ return 0;
+
+err_alloc_used_groups:
+ kfree(tcam->used_regions);
+ return err;
+}
+
+static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp_acl_tcam *tcam = priv;
+
+ kfree(tcam->used_groups);
+ kfree(tcam->used_regions);
+}
+
+static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
+ u16 *p_id)
+{
+ u16 id;
+
+ id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
+ if (id < tcam->max_regions) {
+ __set_bit(id, tcam->used_regions);
+ *p_id = id;
+ return 0;
+ }
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
+ u16 id)
+{
+ __clear_bit(id, tcam->used_regions);
+}
+
+static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
+ u16 *p_id)
+{
+ u16 id;
+
+ id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
+ if (id < tcam->max_groups) {
+ __set_bit(id, tcam->used_groups);
+ *p_id = id;
+ return 0;
+ }
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
+ u16 id)
+{
+ __clear_bit(id, tcam->used_groups);
+}
+
+struct mlxsw_sp_acl_tcam_pattern {
+ const enum mlxsw_afk_element *elements;
+ unsigned int elements_count;
+};
+
+struct mlxsw_sp_acl_tcam_group {
+ struct mlxsw_sp_acl_tcam *tcam;
+ u16 id;
+ struct list_head region_list;
+ unsigned int region_count;
+ struct rhashtable chunk_ht;
+ struct {
+ u16 local_port;
+ bool ingress;
+ } bound;
+ struct mlxsw_sp_acl_tcam_group_ops *ops;
+ const struct mlxsw_sp_acl_tcam_pattern *patterns;
+ unsigned int patterns_count;
+};
+
+struct mlxsw_sp_acl_tcam_region {
+ struct list_head list; /* Member of a TCAM group */
+ struct list_head chunk_list; /* List of chunks under this region */
+ struct parman *parman;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_acl_tcam_group *group;
+ u16 id; /* ACL ID and region ID - they are same */
+ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+ struct mlxsw_afk_key_info *key_info;
+ struct {
+ struct parman_prio parman_prio;
+ struct parman_item parman_item;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ } catchall;
+};
+
+struct mlxsw_sp_acl_tcam_chunk {
+ struct list_head list; /* Member of a TCAM region */
+ struct rhash_head ht_node; /* Member of a chunk HT */
+ unsigned int priority; /* Priority within the region and group */
+ struct parman_prio parman_prio;
+ struct mlxsw_sp_acl_tcam_group *group;
+ struct mlxsw_sp_acl_tcam_region *region;
+ unsigned int ref_count;
+};
+
+struct mlxsw_sp_acl_tcam_entry {
+ struct parman_item parman_item;
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
+ .key_len = sizeof(unsigned int),
+ .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
+ .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
+ .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group)
+{
+ struct mlxsw_sp_acl_tcam_region *region;
+ char pagt_pl[MLXSW_REG_PAGT_LEN];
+ int acl_index = 0;
+
+ mlxsw_reg_pagt_pack(pagt_pl, group->id);
+ list_for_each_entry(region, &group->region_list, list)
+ mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
+ mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ struct mlxsw_sp_acl_tcam_group *group,
+ const struct mlxsw_sp_acl_tcam_pattern *patterns,
+ unsigned int patterns_count)
+{
+ int err;
+
+ group->tcam = tcam;
+ group->patterns = patterns;
+ group->patterns_count = patterns_count;
+ INIT_LIST_HEAD(&group->region_list);
+ err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ if (err)
+ goto err_group_update;
+
+ err = rhashtable_init(&group->chunk_ht,
+ &mlxsw_sp_acl_tcam_chunk_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ return 0;
+
+err_rhashtable_init:
+err_group_update:
+ mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
+ return err;
+}
+
+static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group)
+{
+ struct mlxsw_sp_acl_tcam *tcam = group->tcam;
+
+ rhashtable_destroy(&group->chunk_ht);
+ mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
+ WARN_ON(!list_empty(&group->region_list));
+}
+
+static int
+mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct net_device *dev, bool ingress)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ char ppbt_pl[MLXSW_REG_PPBT_LEN];
+
+ if (!mlxsw_sp_port_dev_check(dev))
+ return -EINVAL;
+
+ mlxsw_sp_port = netdev_priv(dev);
+ group->bound.local_port = mlxsw_sp_port->local_port;
+ group->bound.ingress = ingress;
+ mlxsw_reg_ppbt_pack(ppbt_pl,
+ group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+ group->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group)
+{
+ char ppbt_pl[MLXSW_REG_PPBT_LEN];
+
+ mlxsw_reg_ppbt_pack(ppbt_pl,
+ group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+ group->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
+}
+
+static unsigned int
+mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+ if (list_empty(&region->chunk_list))
+ return 0;
+ /* As a priority of a region, return priority of the first chunk */
+ chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
+ return chunk->priority;
+}
+
+static unsigned int
+mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+ if (list_empty(&region->chunk_list))
+ return 0;
+ chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
+ return chunk->priority;
+}
+
+static void
+mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_tcam_region *region2;
+ struct list_head *pos;
+
+ /* Position the region inside the list according to priority */
+ list_for_each(pos, &group->region_list) {
+ region2 = list_entry(pos, typeof(*region2), list);
+ if (mlxsw_sp_acl_tcam_region_prio(region2) >
+ mlxsw_sp_acl_tcam_region_prio(region))
+ break;
+ }
+ list_add_tail(&region->list, pos);
+ group->region_count++;
+}
+
+static void
+mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ group->region_count--;
+ list_del(&region->list);
+}
+
+static int
+mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ int err;
+
+ if (group->region_count == group->tcam->max_group_size)
+ return -ENOBUFS;
+
+ mlxsw_sp_acl_tcam_group_list_add(group, region);
+
+ err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ if (err)
+ goto err_group_update;
+ region->group = group;
+
+ return 0;
+
+err_group_update:
+ mlxsw_sp_acl_tcam_group_list_del(group, region);
+ mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_tcam_group *group = region->group;
+
+ mlxsw_sp_acl_tcam_group_list_del(group, region);
+ mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+}
+
+static struct mlxsw_sp_acl_tcam_region *
+mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage,
+ bool *p_need_split)
+{
+ struct mlxsw_sp_acl_tcam_region *region, *region2;
+ struct list_head *pos;
+ bool issubset;
+
+ list_for_each(pos, &group->region_list) {
+ region = list_entry(pos, typeof(*region), list);
+
+ /* First, check if the requested priority does not rather belong
+ * under some of the next regions.
+ */
+ if (pos->next != &group->region_list) { /* not last */
+ region2 = list_entry(pos->next, typeof(*region2), list);
+ if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
+ continue;
+ }
+
+ issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
+
+ /* If requested element usage would not fit and the priority
+ * is lower than the currently inspected region we cannot
+ * use this region, so return NULL to indicate new region has
+ * to be created.
+ */
+ if (!issubset &&
+ priority < mlxsw_sp_acl_tcam_region_prio(region))
+ return NULL;
+
+ /* If requested element usage would not fit and the priority
+ * is higher than the currently inspected region we cannot
+ * use this region. There is still some hope that the next
+ * region would be the fit. So let it be processed and
+ * eventually break at the check right above this.
+ */
+ if (!issubset &&
+ priority > mlxsw_sp_acl_tcam_region_max_prio(region))
+ continue;
+
+ /* Indicate if the region needs to be split in order to add
+ * the requested priority. Split is needed when requested
+ * element usage won't fit into the found region.
+ */
+ *p_need_split = !issubset;
+ return region;
+ }
+ return NULL; /* New region has to be created. */
+}
+
+static void
+mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_afk_element_usage *elusage,
+ struct mlxsw_afk_element_usage *out)
+{
+ const struct mlxsw_sp_acl_tcam_pattern *pattern;
+ int i;
+
+ for (i = 0; i < group->patterns_count; i++) {
+ pattern = &group->patterns[i];
+ mlxsw_afk_element_usage_fill(out, pattern->elements,
+ pattern->elements_count);
+ if (mlxsw_afk_element_usage_subset(elusage, out))
+ return;
+ }
+ memcpy(out, elusage, sizeof(*out));
+}
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct mlxsw_afk_key_info *key_info = region->key_info;
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+ unsigned int encodings_count;
+ int i;
+ int err;
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+ MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ region->id, region->tcam_region_info);
+ encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ for (i = 0; i < encodings_count; i++) {
+ u16 encoding;
+
+ encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
+ mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
+ }
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
+ region->tcam_region_info);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 new_size)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+ new_size, region->id, region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ char pacl_pl[MLXSW_REG_PACL_LEN];
+
+ mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
+ region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ char pacl_pl[MLXSW_REG_PACL_LEN];
+
+ mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
+ region->tcam_region_info);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int offset,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ char *act_set;
+ char *mask;
+ char *key;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info, offset);
+ key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+ mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+ mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
+
+ /* Only the first action set belongs here, the rest is in KVD */
+ act_set = mlxsw_afa_block_first_set(rulei->act_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int offset)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info, offset);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+static int
+mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct parman_prio *parman_prio = &region->catchall.parman_prio;
+ struct parman_item *parman_item = &region->catchall.parman_item;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ parman_prio_init(region->parman, parman_prio,
+ MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+ err = parman_item_add(region->parman, parman_prio, parman_item);
+ if (err)
+ goto err_parman_item_add;
+
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ if (IS_ERR(rulei)) {
+ err = PTR_ERR(rulei);
+ goto err_rulei_create;
+ }
+
+ mlxsw_sp_acl_rulei_act_continue(rulei);
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+
+ err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
+ parman_item->index, rulei);
+ region->catchall.rulei = rulei;
+ if (err)
+ goto err_rule_insert;
+
+ return 0;
+
+err_rule_insert:
+err_rulei_commit:
+ mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+ parman_item_remove(region->parman, parman_prio, parman_item);
+err_parman_item_add:
+ parman_prio_fini(parman_prio);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct parman_prio *parman_prio = &region->catchall.parman_prio;
+ struct parman_item *parman_item = &region->catchall.parman_item;
+ struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+ mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
+ parman_item->index);
+ mlxsw_sp_acl_rulei_destroy(rulei);
+ parman_item_remove(region->parman, parman_prio, parman_item);
+ parman_prio_fini(parman_prio);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 src_offset, u16 dst_offset, u16 size)
+{
+ char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+ mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+ region->tcam_region_info, src_offset,
+ region->tcam_region_info, dst_offset, size);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_acl_tcam_region *region = priv;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_acl_tcam_region *region = priv;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+ mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
+ from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_acl_tcam_region_parman_resize,
+ .move = mlxsw_sp_acl_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static struct mlxsw_sp_acl_tcam_region *
+mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_tcam_region *region;
+ int err;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&region->chunk_list);
+ region->mlxsw_sp = mlxsw_sp;
+
+ region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
+ region);
+ if (!region->parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+
+ region->key_info = mlxsw_afk_key_info_get(afk, elusage);
+ if (IS_ERR(region->key_info)) {
+ err = PTR_ERR(region->key_info);
+ goto err_key_info_get;
+ }
+
+ err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
+ if (err)
+ goto err_region_id_get;
+
+ err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_alloc;
+
+ err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_enable;
+
+ err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_catchall_add;
+
+ return region;
+
+err_tcam_region_catchall_add:
+ mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
+err_tcam_region_enable:
+ mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
+err_tcam_region_alloc:
+ mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
+err_region_id_get:
+ mlxsw_afk_key_info_put(region->key_info);
+err_key_info_get:
+ parman_destroy(region->parman);
+err_parman_create:
+ kfree(region);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
+ mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
+ mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
+ mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
+ mlxsw_afk_key_info_put(region->key_info);
+ parman_destroy(region->parman);
+ kfree(region);
+}
+
+static int
+mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+ struct mlxsw_sp_acl_tcam_region *region;
+ bool region_created = false;
+ bool need_split;
+ int err;
+
+ region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
+ &need_split);
+ if (region && need_split) {
+ /* According to priority, the chunk should belong to an
+ * existing region. However, this chunk needs elements
+ * that region does not contain. We need to split the existing
+ * region into two and create a new region for this chunk
+ * in between. This is not supported now.
+ */
+ return -EOPNOTSUPP;
+ }
+ if (!region) {
+ struct mlxsw_afk_element_usage region_elusage;
+
+ mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
+ &region_elusage);
+ region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
+ &region_elusage);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+ region_created = true;
+ }
+
+ chunk->region = region;
+ list_add_tail(&chunk->list, &region->chunk_list);
+
+ if (!region_created)
+ return 0;
+
+ err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
+ if (err)
+ goto err_group_region_attach;
+
+ return 0;
+
+err_group_region_attach:
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+ struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+ list_del(&chunk->list);
+ if (list_empty(&region->chunk_list)) {
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
+ }
+}
+
+static struct mlxsw_sp_acl_tcam_chunk *
+mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ int err;
+
+ if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
+ return ERR_PTR(-EINVAL);
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ return ERR_PTR(-ENOMEM);
+ chunk->priority = priority;
+ chunk->group = group;
+ chunk->ref_count = 1;
+
+ err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
+ elusage, chunk);
+ if (err)
+ goto err_chunk_assoc;
+
+ parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
+
+ err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
+ mlxsw_sp_acl_tcam_chunk_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return chunk;
+
+err_rhashtable_insert:
+ parman_prio_fini(&chunk->parman_prio);
+ mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
+err_chunk_assoc:
+ kfree(chunk);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+ struct mlxsw_sp_acl_tcam_group *group = chunk->group;
+
+ rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
+ mlxsw_sp_acl_tcam_chunk_ht_params);
+ parman_prio_fini(&chunk->parman_prio);
+ mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
+ kfree(chunk);
+}
+
+static struct mlxsw_sp_acl_tcam_chunk *
+mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+ chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
+ mlxsw_sp_acl_tcam_chunk_ht_params);
+ if (chunk) {
+ if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
+ elusage)))
+ return ERR_PTR(-EINVAL);
+ chunk->ref_count++;
+ return chunk;
+ }
+ return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
+ priority, elusage);
+}
+
+static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+ if (--chunk->ref_count)
+ return;
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
+}
+
+static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_region *region;
+ int err;
+
+ chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
+ &rulei->values.elusage);
+ if (IS_ERR(chunk))
+ return PTR_ERR(chunk);
+
+ region = chunk->region;
+ err = parman_item_add(region->parman, &chunk->parman_prio,
+ &entry->parman_item);
+ if (err)
+ goto err_parman_item_add;
+
+ err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
+ entry->parman_item.index,
+ rulei);
+ if (err)
+ goto err_rule_insert;
+ entry->chunk = chunk;
+
+ return 0;
+
+err_rule_insert:
+ parman_item_remove(region->parman, &chunk->parman_prio,
+ &entry->parman_item);
+err_parman_item_add:
+ mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
+ return err;
+}
+
+static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_entry *entry)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+ struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+ mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
+ entry->parman_item.index);
+ parman_item_remove(region->parman, &chunk->parman_prio,
+ &entry->parman_item);
+ mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+ MLXSW_AFK_ELEMENT_DMAC,
+ MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ MLXSW_AFK_ELEMENT_SRC_IP4,
+ MLXSW_AFK_ELEMENT_DST_IP4,
+ MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+};
+
+static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ MLXSW_AFK_ELEMENT_SRC_IP6_HI,
+ MLXSW_AFK_ELEMENT_SRC_IP6_LO,
+ MLXSW_AFK_ELEMENT_DST_IP6_HI,
+ MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+};
+
+static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
+ {
+ .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
+ .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
+ },
+ {
+ .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
+ .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
+ },
+};
+
+#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
+
+struct mlxsw_sp_acl_tcam_flower_ruleset {
+ struct mlxsw_sp_acl_tcam_group group;
+};
+
+struct mlxsw_sp_acl_tcam_flower_rule {
+ struct mlxsw_sp_acl_tcam_entry entry;
+};
+
+static int
+mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
+ void *priv, void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam *tcam = priv;
+
+ return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
+ mlxsw_sp_acl_tcam_patterns,
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ struct net_device *dev, bool ingress)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
+ dev, ingress);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
+ &rule->entry, rulei);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
+{
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
+}
+
+static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
+ .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
+ .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
+ .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
+ .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
+ .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
+ .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+ .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
+ .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
+};
+
+static const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops_arr[] = {
+ [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
+};
+
+static const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops;
+
+ if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
+ return NULL;
+ ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
+ if (WARN_ON(!ops))
+ return NULL;
+ return ops;
+}
+
+const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp_acl_tcam),
+ .init = mlxsw_sp_acl_tcam_init,
+ .fini = mlxsw_sp_acl_tcam_fini,
+ .profile_ops = mlxsw_sp_acl_tcam_profile_ops,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
new file mode 100644
index 000000000000..22ab42925377
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -0,0 +1,316 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct tcf_exts *exts)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+ int err;
+
+ if (tc_no_actions(exts))
+ return 0;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_shot(a)) {
+ err = mlxsw_sp_acl_rulei_act_drop(rulei);
+ if (err)
+ return err;
+ } else if (is_tcf_mirred_egress_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *out_dev;
+
+ out_dev = __dev_get_by_index(dev_net(dev), ifindex);
+ if (out_dev == dev)
+ out_dev = NULL;
+
+ err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
+ out_dev);
+ if (err)
+ return err;
+ } else {
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
+static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
+ struct tc_cls_flower_offload *f)
+{
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
+ ntohl(key->src), ntohl(mask->src));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
+ ntohl(key->dst), ntohl(mask->dst));
+}
+
+static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
+ struct tc_cls_flower_offload *f)
+{
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+ size_t addr_half_size = sizeof(key->src) / 2;
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
+ &key->src.s6_addr[0],
+ &mask->src.s6_addr[0],
+ addr_half_size);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
+ &key->src.s6_addr[addr_half_size],
+ &mask->src.s6_addr[addr_half_size],
+ addr_half_size);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
+ &key->dst.s6_addr[0],
+ &mask->dst.s6_addr[0],
+ addr_half_size);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ &key->dst.s6_addr[addr_half_size],
+ &mask->dst.s6_addr[addr_half_size],
+ addr_half_size);
+}
+
+static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct tc_cls_flower_offload *f,
+ u8 ip_proto)
+{
+ struct flow_dissector_key_ports *key, *mask;
+
+ if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
+ return -EINVAL;
+ }
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ ntohs(key->dst), ntohs(mask->dst));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ ntohs(key->src), ntohs(mask->src));
+ return 0;
+}
+
+static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct tc_cls_flower_offload *f)
+{
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+ int err;
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
+ return -EOPNOTSUPP;
+ }
+
+ mlxsw_sp_acl_rulei_priority(rulei, f->prio);
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+ addr_type = key->addr_type;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ u16 n_proto_key = ntohs(key->n_proto);
+ u16 n_proto_mask = ntohs(mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ n_proto_key, n_proto_mask);
+
+ ip_proto = key->ip_proto;
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ key->ip_proto, mask->ip_proto);
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+ struct flow_dissector_key_eth_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_DMAC,
+ key->dst, mask->dst,
+ sizeof(key->dst));
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC,
+ key->src, mask->src,
+ sizeof(key->src));
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ mlxsw_sp_flower_parse_ipv4(rulei, f);
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
+ mlxsw_sp_flower_parse_ipv6(rulei, f);
+
+ err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
+
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
+}
+
+int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ __be16 protocol, struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule_create;
+ }
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f);
+ if (err)
+ goto err_flower_parse;
+
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+
+ err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
+ if (err)
+ goto err_rule_add;
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return 0;
+
+err_rule_add:
+err_rulei_commit:
+err_flower_parse:
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+err_rule_create:
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return err;
+}
+
+void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
+ ingress,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (WARN_ON(IS_ERR(ruleset)))
+ return;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+ if (!WARN_ON(!rule)) {
+ mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+ }
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 01d0efa9c5c7..d7ac22d7f940 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -40,6 +40,7 @@
#include <linux/bitops.h>
#include <linux/in6.h>
#include <linux/notifier.h>
+#include <linux/inetdevice.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -108,7 +109,6 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
}
struct mlxsw_sp_fib_key {
- struct net_device *dev;
unsigned char addr[sizeof(struct in6_addr)];
unsigned char prefix_len;
};
@@ -121,95 +121,39 @@ enum mlxsw_sp_fib_entry_type {
struct mlxsw_sp_nexthop_group;
-struct mlxsw_sp_fib_entry {
- struct rhash_head ht_node;
+struct mlxsw_sp_fib_node {
+ struct list_head entry_list;
struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_vr *vr;
struct mlxsw_sp_fib_key key;
+};
+
+struct mlxsw_sp_fib_entry_params {
+ u32 tb_id;
+ u32 prio;
+ u8 tos;
+ u8 type;
+};
+
+struct mlxsw_sp_fib_entry {
+ struct list_head list;
+ struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
- unsigned int ref_count;
- u16 rif; /* used for action local */
- struct mlxsw_sp_vr *vr;
- struct fib_info *fi;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
+ struct mlxsw_sp_fib_entry_params params;
+ bool offloaded;
};
struct mlxsw_sp_fib {
struct rhashtable ht;
- struct list_head entry_list;
+ struct list_head node_list;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
-static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
- .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
- .key_len = sizeof(struct mlxsw_sp_fib_key),
- .automatic_shrinking = true,
-};
-
-static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- unsigned char prefix_len = fib_entry->key.prefix_len;
- int err;
-
- err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
- mlxsw_sp_fib_ht_params);
- if (err)
- return err;
- list_add_tail(&fib_entry->list, &fib->entry_list);
- if (fib->prefix_ref_count[prefix_len]++ == 0)
- mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
- return 0;
-}
-
-static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- unsigned char prefix_len = fib_entry->key.prefix_len;
-
- if (--fib->prefix_ref_count[prefix_len] == 0)
- mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
- list_del(&fib_entry->list);
- rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
- mlxsw_sp_fib_ht_params);
-}
-
-static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
- size_t addr_len, unsigned char prefix_len,
- struct net_device *dev)
-{
- struct mlxsw_sp_fib_entry *fib_entry;
-
- fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
- if (!fib_entry)
- return NULL;
- fib_entry->key.dev = dev;
- memcpy(fib_entry->key.addr, addr, addr_len);
- fib_entry->key.prefix_len = prefix_len;
- return fib_entry;
-}
-
-static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
-{
- kfree(fib_entry);
-}
-
-static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
- size_t addr_len, unsigned char prefix_len,
- struct net_device *dev)
-{
- struct mlxsw_sp_fib_key key;
-
- memset(&key, 0, sizeof(key));
- key.dev = dev;
- memcpy(key.addr, addr, addr_len);
- key.prefix_len = prefix_len;
- return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
-}
+static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
{
@@ -222,7 +166,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
if (err)
goto err_rhashtable_init;
- INIT_LIST_HEAD(&fib->entry_list);
+ INIT_LIST_HEAD(&fib->node_list);
return fib;
err_rhashtable_init:
@@ -232,6 +176,7 @@ err_rhashtable_init:
static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
{
+ WARN_ON(!list_empty(&fib->node_list));
rhashtable_destroy(&fib->ht);
kfree(fib);
}
@@ -610,12 +555,11 @@ struct mlxsw_sp_neigh_key {
};
struct mlxsw_sp_neigh_entry {
+ struct list_head rif_list_node;
struct rhash_head ht_node;
struct mlxsw_sp_neigh_key key;
u16 rif;
- bool offloaded;
- struct delayed_work dw;
- struct mlxsw_sp_port *mlxsw_sp_port;
+ bool connected;
unsigned char ha[ETH_ALEN];
struct list_head nexthop_list; /* list of nexthops using
* this neigh entry
@@ -629,105 +573,91 @@ static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
.key_len = sizeof(struct mlxsw_sp_neigh_key),
};
-static int
-mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_neigh_entry *neigh_entry)
-{
- return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
- &neigh_entry->ht_node,
- mlxsw_sp_neigh_ht_params);
-}
-
-static void
-mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_neigh_entry *neigh_entry)
-{
- rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
- &neigh_entry->ht_node,
- mlxsw_sp_neigh_ht_params);
-}
-
-static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
-
static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
+mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
+ u16 rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
+ neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
if (!neigh_entry)
return NULL;
+
neigh_entry->key.n = n;
neigh_entry->rif = rif;
- INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
INIT_LIST_HEAD(&neigh_entry->nexthop_list);
+
return neigh_entry;
}
-static void
-mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
+static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
{
kfree(neigh_entry);
}
-static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
+static int
+mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
{
- struct mlxsw_sp_neigh_key key;
+ return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
- key.n = n;
- return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
- &key, mlxsw_sp_neigh_ht_params);
+static void
+mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
}
-int mlxsw_sp_router_neigh_construct(struct net_device *dev,
- struct neighbour *n)
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_rif *r;
int err;
- if (n->tbl != &arp_tbl)
- return 0;
-
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
- if (neigh_entry)
- return 0;
-
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
- if (WARN_ON(!r))
- return -EINVAL;
+ if (!r)
+ return ERR_PTR(-EINVAL);
- neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
+ neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
if (!neigh_entry)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
if (err)
goto err_neigh_entry_insert;
- return 0;
+
+ list_add(&neigh_entry->rif_list_node, &r->neigh_list);
+
+ return neigh_entry;
err_neigh_entry_insert:
- mlxsw_sp_neigh_entry_destroy(neigh_entry);
- return err;
+ mlxsw_sp_neigh_entry_free(neigh_entry);
+ return ERR_PTR(err);
}
-void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
- struct neighbour *n)
+static void
+mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_neigh_entry *neigh_entry;
+ list_del(&neigh_entry->rif_list_node);
+ mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+ mlxsw_sp_neigh_entry_free(neigh_entry);
+}
- if (n->tbl != &arp_tbl)
- return;
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
+{
+ struct mlxsw_sp_neigh_key key;
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
- if (!neigh_entry)
- return;
- mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
- mlxsw_sp_neigh_entry_destroy(neigh_entry);
+ key.n = n;
+ return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+ &key, mlxsw_sp_neigh_ht_params);
}
static void
@@ -866,13 +796,11 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
/* Take RTNL mutex here to prevent lists from changes */
rtnl_lock();
list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
- nexthop_neighs_list_node) {
+ nexthop_neighs_list_node)
/* If this neigh have nexthops, make the kernel think this neigh
* is active regardless of the traffic.
*/
- if (!list_empty(&neigh_entry->nexthop_list))
- neigh_event_send(neigh_entry->key.n, NULL);
- }
+ neigh_event_send(neigh_entry->key.n, NULL);
rtnl_unlock();
}
@@ -916,11 +844,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
*/
rtnl_lock();
list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
- nexthop_neighs_list_node) {
- if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
- !list_empty(&neigh_entry->nexthop_list))
+ nexthop_neighs_list_node)
+ if (!neigh_entry->connected)
neigh_event_send(neigh_entry->key.n, NULL);
- }
rtnl_unlock();
mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
@@ -932,79 +858,101 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
bool removing);
-static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
+static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
+{
+ return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE;
+}
+
+static void
+mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ enum mlxsw_reg_rauht_op op)
{
- struct mlxsw_sp_neigh_entry *neigh_entry =
- container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
struct neighbour *n = neigh_entry->key.n;
- struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 dip = ntohl(*((__be32 *) n->primary_key));
char rauht_pl[MLXSW_REG_RAUHT_LEN];
- struct net_device *dev;
+
+ mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
+ dip);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
+static void
+mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool adding)
+{
+ if (!adding && !neigh_entry->connected)
+ return;
+ neigh_entry->connected = adding;
+ if (neigh_entry->key.n->tbl == &arp_tbl)
+ mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
+ mlxsw_sp_rauht_op(adding));
+ else
+ WARN_ON_ONCE(1);
+}
+
+struct mlxsw_sp_neigh_event_work {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+ struct neighbour *n;
+};
+
+static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_event_work *neigh_work =
+ container_of(work, struct mlxsw_sp_neigh_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct neighbour *n = neigh_work->n;
+ unsigned char ha[ETH_ALEN];
bool entry_connected;
u8 nud_state, dead;
- bool updating;
- bool removing;
- bool adding;
- u32 dip;
- int err;
+ /* If these parameters are changed after we release the lock,
+ * then we are guaranteed to receive another event letting us
+ * know about it.
+ */
read_lock_bh(&n->lock);
- dip = ntohl(*((__be32 *) n->primary_key));
- memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
+ memcpy(ha, n->ha, ETH_ALEN);
nud_state = n->nud_state;
dead = n->dead;
- dev = n->dev;
read_unlock_bh(&n->lock);
+ rtnl_lock();
entry_connected = nud_state & NUD_VALID && !dead;
- adding = (!neigh_entry->offloaded) && entry_connected;
- updating = neigh_entry->offloaded && entry_connected;
- removing = neigh_entry->offloaded && !entry_connected;
-
- if (adding || updating) {
- mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
- neigh_entry->rif,
- neigh_entry->ha, dip);
- err = mlxsw_reg_write(mlxsw_sp->core,
- MLXSW_REG(rauht), rauht_pl);
- if (err) {
- netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
- neigh_entry->offloaded = false;
- } else {
- neigh_entry->offloaded = true;
- }
- mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
- } else if (removing) {
- mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
- neigh_entry->rif,
- neigh_entry->ha, dip);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
- rauht_pl);
- if (err) {
- netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
- neigh_entry->offloaded = true;
- } else {
- neigh_entry->offloaded = false;
- }
- mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (!entry_connected && !neigh_entry)
+ goto out;
+ if (!neigh_entry) {
+ neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
+ if (IS_ERR(neigh_entry))
+ goto out;
}
+ memcpy(neigh_entry->ha, ha, ETH_ALEN);
+ mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
+
+ if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
+ mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+
+out:
+ rtnl_unlock();
neigh_release(n);
- mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ kfree(neigh_work);
}
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_neigh_event_work *neigh_work;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp;
unsigned long interval;
- struct net_device *dev;
struct neigh_parms *p;
struct neighbour *n;
- u32 dip;
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -1029,33 +977,31 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
break;
case NETEVENT_NEIGH_UPDATE:
n = ptr;
- dev = n->dev;
if (n->tbl != &arp_tbl)
return NOTIFY_DONE;
- mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
if (!mlxsw_sp_port)
return NOTIFY_DONE;
- mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- dip = ntohl(*((__be32 *) n->primary_key));
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
- if (WARN_ON(!neigh_entry)) {
+ neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
+ if (!neigh_work) {
mlxsw_sp_port_dev_put(mlxsw_sp_port);
- return NOTIFY_DONE;
+ return NOTIFY_BAD;
}
- neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
+
+ INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
+ neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ neigh_work->n = n;
/* Take a reference to ensure the neighbour won't be
* destructed until we drop the reference in delayed
* work.
*/
neigh_clone(n);
- if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
- neigh_release(n);
- mlxsw_sp_port_dev_put(mlxsw_sp_port);
- }
+ mlxsw_core_schedule_work(&neigh_work->work);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
}
@@ -1093,11 +1039,40 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
}
+static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif *r)
+{
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+
+ mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
+ r->rif, r->addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
+static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
+
+ mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
+ list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
+ rif_list_node)
+ mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+}
+
+struct mlxsw_sp_nexthop_key {
+ struct fib_nh *fib_nh;
+};
+
struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
+ struct list_head rif_list_node;
struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
* this belongs to
*/
+ struct rhash_head ht_node;
+ struct mlxsw_sp_nexthop_key key;
+ struct mlxsw_sp_rif *r;
u8 should_offload:1, /* set indicates this neigh is connected and
* should be put to KVD linear area of this group.
*/
@@ -1110,16 +1085,81 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_neigh_entry *neigh_entry;
};
+struct mlxsw_sp_nexthop_group_key {
+ struct fib_info *fi;
+};
+
struct mlxsw_sp_nexthop_group {
- struct list_head list; /* node in mlxsw->router.nexthop_group_list */
+ struct rhash_head ht_node;
struct list_head fib_list; /* list of fib entries that use this group */
- u8 adj_index_valid:1;
+ struct mlxsw_sp_nexthop_group_key key;
+ u8 adj_index_valid:1,
+ gateway:1; /* routes using the group use a gateway */
u32 adj_index;
u16 ecmp_size;
u16 count;
struct mlxsw_sp_nexthop nexthops[0];
+#define nh_rif nexthops[0].r
};
+static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
+ .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
+};
+
+static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
+ &nh_grp->ht_node,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
+ &nh_grp->ht_node,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_key key)
+{
+ return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
+ .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_nexthop_key),
+};
+
+static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
+ &nh->ht_node, mlxsw_sp_nexthop_ht_params);
+}
+
+static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
+ mlxsw_sp_nexthop_ht_params);
+}
+
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_key key)
+{
+ return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
+ mlxsw_sp_nexthop_ht_params);
+}
+
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
u32 adj_index, u16 ecmp_size,
@@ -1144,9 +1184,9 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (vr == fib_entry->vr)
+ if (vr == fib_entry->fib_node->vr)
continue;
- vr = fib_entry->vr;
+ vr = fib_entry->fib_node->vr;
err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
old_adj_index,
old_ecmp_size,
@@ -1172,7 +1212,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
static int
mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp)
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ bool reallocate)
{
u32 adj_index = nh_grp->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1228,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
continue;
}
- if (nh->update) {
+ if (nh->update || reallocate) {
err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
adj_index, nh);
if (err)
@@ -1233,6 +1274,11 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
int i;
int err;
+ if (!nh_grp->gateway) {
+ mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ return;
+ }
+
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
@@ -1248,7 +1294,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
+ false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -1276,7 +1323,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
nh_grp->adj_index_valid = 1;
nh_grp->adj_index = adj_index;
nh_grp->ecmp_size = ecmp_size;
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -1334,42 +1381,63 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop *nh;
- /* Take RTNL mutex here to prevent lists from changes */
- rtnl_lock();
list_for_each_entry(nh, &neigh_entry->nexthop_list,
neigh_list_node) {
__mlxsw_sp_nexthop_neigh_update(nh, removing);
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
}
- rtnl_unlock();
}
-static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp,
- struct mlxsw_sp_nexthop *nh,
- struct fib_nh *fib_nh)
+static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
+ struct mlxsw_sp_rif *r)
+{
+ if (nh->r)
+ return;
+
+ nh->r = r;
+ list_add(&nh->rif_list_node, &r->nexthop_list);
+}
+
+static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->r)
+ return;
+
+ list_del(&nh->rif_list_node);
+ nh->r = NULL;
+}
+
+static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct net_device *dev = fib_nh->nh_dev;
+ struct fib_nh *fib_nh = nh->key.fib_nh;
struct neighbour *n;
u8 nud_state, dead;
+ int err;
+
+ if (!nh->nh_grp->gateway || nh->neigh_entry)
+ return 0;
/* Take a reference of neigh here ensuring that neigh would
* not be detructed before the nexthop entry is finished.
* The reference is taken either in neigh_lookup() or
- * in neith_create() in case n is not found.
+ * in neigh_create() in case n is not found.
*/
- n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
+ n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
if (!n) {
- n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
+ n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
}
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
if (!neigh_entry) {
- neigh_release(n);
- return -EINVAL;
+ neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
+ if (IS_ERR(neigh_entry)) {
+ err = -EINVAL;
+ goto err_neigh_entry_create;
+ }
}
/* If that is the first nexthop connected to that neigh, add to
@@ -1379,7 +1447,6 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&neigh_entry->nexthop_neighs_list_node,
&mlxsw_sp->router.nexthop_neighs_list);
- nh->nh_grp = nh_grp;
nh->neigh_entry = neigh_entry;
list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
read_lock_bh(&n->lock);
@@ -1389,23 +1456,126 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
return 0;
+
+err_neigh_entry_create:
+ neigh_release(n);
+ return err;
}
-static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ struct neighbour *n;
+
+ if (!neigh_entry)
+ return;
+ n = neigh_entry->key.n;
__mlxsw_sp_nexthop_neigh_update(nh, true);
list_del(&nh->neigh_list_node);
+ nh->neigh_entry = NULL;
/* If that is the last nexthop connected to that neigh, remove from
* nexthop_neighs_list
*/
- if (list_empty(&nh->neigh_entry->nexthop_list))
- list_del(&nh->neigh_entry->nexthop_neighs_list_node);
+ if (list_empty(&neigh_entry->nexthop_list))
+ list_del(&neigh_entry->nexthop_neighs_list_node);
- neigh_release(neigh_entry->key.n);
+ if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
+ mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+
+ neigh_release(n);
+}
+
+static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct fib_nh *fib_nh)
+{
+ struct net_device *dev = fib_nh->nh_dev;
+ struct in_device *in_dev;
+ struct mlxsw_sp_rif *r;
+ int err;
+
+ nh->nh_grp = nh_grp;
+ nh->key.fib_nh = fib_nh;
+ err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
+ if (err)
+ return err;
+
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+ fib_nh->nh_flags & RTNH_F_LINKDOWN)
+ return 0;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!r)
+ return 0;
+ mlxsw_sp_nexthop_rif_init(nh, r);
+
+ err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
+ if (err)
+ goto err_nexthop_neigh_init;
+
+ return 0;
+
+err_nexthop_neigh_init:
+ mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
+ return err;
+}
+
+static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_rif_fini(nh);
+ mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
+}
+
+static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event, struct fib_nh *fib_nh)
+{
+ struct mlxsw_sp_nexthop_key key;
+ struct mlxsw_sp_nexthop *nh;
+ struct mlxsw_sp_rif *r;
+
+ if (mlxsw_sp->router.aborted)
+ return;
+
+ key.fib_nh = fib_nh;
+ nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
+ if (WARN_ON_ONCE(!nh))
+ return;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
+ if (!r)
+ return;
+
+ switch (event) {
+ case FIB_EVENT_NH_ADD:
+ mlxsw_sp_nexthop_rif_init(nh, r);
+ mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
+ break;
+ case FIB_EVENT_NH_DEL:
+ mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_rif_fini(nh);
+ break;
+ }
+
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+}
+
+static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ struct mlxsw_sp_nexthop *nh, *tmp;
+
+ list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
+ mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_rif_fini(nh);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ }
}
static struct mlxsw_sp_nexthop_group *
@@ -1424,7 +1594,9 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
if (!nh_grp)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
nh_grp->count = fi->fib_nhs;
+ nh_grp->key.fi = fi;
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
fib_nh = &fi->fib_nh[i];
@@ -1432,13 +1604,18 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
if (err)
goto err_nexthop_init;
}
- list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
+ err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_nexthop_group_insert;
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
return nh_grp;
+err_nexthop_group_insert:
err_nexthop_init:
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
+ nh = &nh_grp->nexthops[i];
mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+ }
kfree(nh_grp);
return ERR_PTR(err);
}
@@ -1450,7 +1627,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh;
int i;
- list_del(&nh_grp->list);
+ mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
@@ -1460,59 +1637,15 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
kfree(nh_grp);
}
-static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
- struct fib_info *fi)
-{
- int i;
-
- for (i = 0; i < fi->fib_nhs; i++) {
- struct fib_nh *fib_nh = &fi->fib_nh[i];
- struct neighbour *n = nh->neigh_entry->key.n;
-
- if (memcmp(n->primary_key, &fib_nh->nh_gw,
- sizeof(fib_nh->nh_gw)) == 0 &&
- n->dev == fib_nh->nh_dev)
- return true;
- }
- return false;
-}
-
-static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
- struct fib_info *fi)
-{
- int i;
-
- if (nh_grp->count != fi->fib_nhs)
- return false;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
-
- if (!mlxsw_sp_nexthop_match(nh, fi))
- return false;
- }
- return true;
-}
-
-static struct mlxsw_sp_nexthop_group *
-mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
-{
- struct mlxsw_sp_nexthop_group *nh_grp;
-
- list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
- list) {
- if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
- return nh_grp;
- }
- return NULL;
-}
-
static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
struct fib_info *fi)
{
+ struct mlxsw_sp_nexthop_group_key key;
struct mlxsw_sp_nexthop_group *nh_grp;
- nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
+ key.fi = fi;
+ nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
if (!nh_grp) {
nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
if (IS_ERR(nh_grp))
@@ -1534,13 +1667,82 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
}
+static bool
+mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
+
+ if (fib_entry->params.tos)
+ return false;
+
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
+ return !!nh_group->adj_index_valid;
+ case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
+ return !!nh_group->nh_rif;
+ default:
+ return false;
+ }
+}
+
+static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+{
+ fib_entry->offloaded = true;
+
+ switch (fib_entry->fib_node->vr->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ fib_info_offload_inc(fib_entry->nh_group->key.fi);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void
+mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+{
+ switch (fib_entry->fib_node->vr->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ fib_info_offload_dec(fib_entry->nh_group->key.fi);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ WARN_ON_ONCE(1);
+ }
+
+ fib_entry->offloaded = false;
+}
+
+static void
+mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op, int err)
+{
+ switch (op) {
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ if (!fib_entry->offloaded)
+ return;
+ return mlxsw_sp_fib_entry_offload_unset(fib_entry);
+ case MLXSW_REG_RALUE_OP_WRITE_WRITE:
+ if (err)
+ return;
+ if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
+ !fib_entry->offloaded)
+ mlxsw_sp_fib_entry_offload_set(fib_entry);
+ else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
+ fib_entry->offloaded)
+ mlxsw_sp_fib_entry_offload_unset(fib_entry);
+ return;
+ default:
+ return;
+ }
+}
+
static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
- u32 *p_dip = (u32 *) fib_entry->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->vr;
+ u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -1550,7 +1752,7 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
* with provided ECMP size. Otherwise, setup trap and pass
* traffic to kernel.
*/
- if (fib_entry->nh_group->adj_index_valid) {
+ if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
adjacency_index = fib_entry->nh_group->adj_index;
ecmp_size = fib_entry->nh_group->ecmp_size;
@@ -1561,7 +1763,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ralue_pack4(ralue_pl,
(enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->key.prefix_len, *p_dip);
+ vr->id, fib_entry->fib_node->key.prefix_len,
+ *p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1571,16 +1774,27 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
+ struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
+ enum mlxsw_reg_ralue_trap_action trap_action;
char ralue_pl[MLXSW_REG_RALUE_LEN];
- u32 *p_dip = (u32 *) fib_entry->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->vr;
+ u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
+ u16 trap_id = 0;
+ u16 rif = 0;
+
+ if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ rif = r->rif;
+ } else {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
+ }
mlxsw_reg_ralue_pack4(ralue_pl,
(enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->key.prefix_len, *p_dip);
- mlxsw_reg_ralue_act_local_pack(ralue_pl,
- MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
- fib_entry->rif);
+ vr->id, fib_entry->fib_node->key.prefix_len,
+ *p_dip);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1589,12 +1803,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralue_op op)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
- u32 *p_dip = (u32 *) fib_entry->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->vr;
+ u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
mlxsw_reg_ralue_pack4(ralue_pl,
(enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->key.prefix_len, *p_dip);
+ vr->id, fib_entry->fib_node->key.prefix_len,
+ *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1618,13 +1833,17 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
- switch (fib_entry->vr->proto) {
+ int err = -EINVAL;
+
+ switch (fib_entry->fib_node->vr->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
+ err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
+ break;
case MLXSW_SP_L3_PROTO_IPV6:
- return -EINVAL;
+ return err;
}
- return -EINVAL;
+ mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
+ return err;
}
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
@@ -1642,14 +1861,11 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
- const struct fib_entry_notifier_info *fen_info,
- struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct fib_info *fi = fen_info->fi;
- struct mlxsw_sp_rif *r = NULL;
- int nhsel;
- int err;
if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
@@ -1657,58 +1873,177 @@ mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
}
if (fen_info->type != RTN_UNICAST)
return -EINVAL;
+ if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ else
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ return 0;
+}
- for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
- const struct fib_nh *nh = &fi->fib_nh[nhsel];
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
- if (!nh->nh_dev)
- continue;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
- if (!r) {
- /* In case router interface is not found for
- * at least one of the nexthops, that means
- * the nexthop points to some device unrelated
- * to us. Set trap and pass the packets for
- * this prefix to kernel.
- */
- break;
- }
+ fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
+ if (!fib_entry) {
+ err = -ENOMEM;
+ goto err_fib_entry_alloc;
}
- if (!r) {
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
- return 0;
- }
+ err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
+ if (err)
+ goto err_fib4_entry_type_set;
- if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- fib_entry->rif = r->rif;
- } else {
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
- if (err)
- return err;
- }
- fib_info_offload_inc(fen_info->fi);
- return 0;
+ err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
+ if (err)
+ goto err_nexthop_group_get;
+
+ fib_entry->params.prio = fen_info->fi->fib_priority;
+ fib_entry->params.tb_id = fen_info->tb_id;
+ fib_entry->params.type = fen_info->type;
+ fib_entry->params.tos = fen_info->tos;
+
+ fib_entry->fib_node = fib_node;
+
+ return fib_entry;
+
+err_nexthop_group_get:
+err_fib4_entry_type_set:
+ kfree(fib_entry);
+err_fib_entry_alloc:
+ return ERR_PTR(err);
}
-static void
-mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
- fib_info_offload_dec(fib_entry->fi);
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
- mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+ kfree(fib_entry);
}
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info);
+
static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
- const struct fib_entry_notifier_info *fen_info)
+mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct fib_info *fi = fen_info->fi;
+ struct mlxsw_sp_fib_node *fib_node;
+
+ fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
+ if (IS_ERR(fib_node))
+ return NULL;
+
+ list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
+ if (fib_entry->params.tb_id == fen_info->tb_id &&
+ fib_entry->params.tos == fen_info->tos &&
+ fib_entry->params.type == fen_info->type &&
+ fib_entry->nh_group->key.fi == fen_info->fi) {
+ return fib_entry;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_fib_key),
+ .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
+ mlxsw_sp_fib_ht_params);
+}
+
+static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
+ mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len)
+{
+ struct mlxsw_sp_fib_key key;
+
+ memset(&key, 0, sizeof(key));
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
+ size_t addr_len, unsigned char prefix_len)
+{
+ struct mlxsw_sp_fib_node *fib_node;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ return NULL;
+
+ INIT_LIST_HEAD(&fib_node->entry_list);
+ list_add(&fib_node->list, &vr->fib->node_list);
+ memcpy(fib_node->key.addr, addr, addr_len);
+ fib_node->key.prefix_len = prefix_len;
+ mlxsw_sp_fib_node_insert(vr->fib, fib_node);
+ fib_node->vr = vr;
+
+ return fib_node;
+}
+
+static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
+{
+ mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
+ list_del(&fib_node->list);
+ WARN_ON(!list_empty(&fib_node->entry_list));
+ kfree(fib_node);
+}
+
+static bool
+mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
+ const struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return list_first_entry(&fib_node->entry_list,
+ struct mlxsw_sp_fib_entry, list) == fib_entry;
+}
+
+static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
+{
+ unsigned char prefix_len = fib_node->key.prefix_len;
+ struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+
+ if (fib->prefix_ref_count[prefix_len]++ == 0)
+ mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
+}
+
+static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
+{
+ unsigned char prefix_len = fib_node->key.prefix_len;
+ struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+
+ if (--fib->prefix_ref_count[prefix_len] == 0)
+ mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct mlxsw_sp_fib_node *fib_node;
struct mlxsw_sp_vr *vr;
int err;
@@ -1717,113 +2052,258 @@ mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(vr))
return ERR_CAST(vr);
- fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
- sizeof(fen_info->dst),
- fen_info->dst_len, fi->fib_dev);
- if (fib_entry) {
- /* Already exists, just take a reference */
- fib_entry->ref_count++;
- return fib_entry;
- }
- fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
- sizeof(fen_info->dst),
- fen_info->dst_len, fi->fib_dev);
- if (!fib_entry) {
+ fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len);
+ if (fib_node)
+ return fib_node;
+
+ fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len);
+ if (!fib_node) {
err = -ENOMEM;
- goto err_fib_entry_create;
+ goto err_fib_node_create;
}
- fib_entry->vr = vr;
- fib_entry->fi = fi;
- fib_entry->ref_count = 1;
- err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
- if (err)
- goto err_fib4_entry_init;
+ return fib_node;
- return fib_entry;
-
-err_fib4_entry_init:
- mlxsw_sp_fib_entry_destroy(fib_entry);
-err_fib_entry_create:
+err_fib_node_create:
mlxsw_sp_vr_put(mlxsw_sp, vr);
-
return ERR_PTR(err);
}
+static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_vr *vr = fib_node->vr;
+
+ if (!list_empty(&fib_node->entry_list))
+ return;
+ mlxsw_sp_fib_node_destroy(fib_node);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
- const struct fib_entry_notifier_info *fen_info)
+mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
+ const struct mlxsw_sp_fib_entry_params *params)
{
- struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fib_entry *fib_entry;
- vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
- MLXSW_SP_L3_PROTO_IPV4);
- if (!vr)
- return NULL;
+ list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
+ if (fib_entry->params.tb_id > params->tb_id)
+ continue;
+ if (fib_entry->params.tb_id != params->tb_id)
+ break;
+ if (fib_entry->params.tos > params->tos)
+ continue;
+ if (fib_entry->params.prio >= params->prio ||
+ fib_entry->params.tos < params->tos)
+ return fib_entry;
+ }
- return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
- sizeof(fen_info->dst),
- fen_info->dst_len,
- fen_info->fi->fib_dev);
+ return NULL;
}
-static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
+ struct mlxsw_sp_fib_entry *new_entry)
{
- struct mlxsw_sp_vr *vr = fib_entry->vr;
+ struct mlxsw_sp_fib_node *fib_node;
+
+ if (WARN_ON(!fib_entry))
+ return -EINVAL;
- if (--fib_entry->ref_count == 0) {
- mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_destroy(fib_entry);
+ fib_node = fib_entry->fib_node;
+ list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
+ if (fib_entry->params.tb_id != new_entry->params.tb_id ||
+ fib_entry->params.tos != new_entry->params.tos ||
+ fib_entry->params.prio != new_entry->params.prio)
+ break;
}
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ list_add_tail(&new_entry->list, &fib_entry->list);
+ return 0;
}
-static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static int
+mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
+ struct mlxsw_sp_fib_entry *new_entry,
+ bool replace, bool append)
{
- unsigned int last_ref_count;
+ struct mlxsw_sp_fib_entry *fib_entry;
- do {
- last_ref_count = fib_entry->ref_count;
- mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- } while (last_ref_count != 1);
+ fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
+
+ if (append)
+ return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
+ if (replace && WARN_ON(!fib_entry))
+ return -EINVAL;
+
+ /* Insert new entry before replaced one, so that we can later
+ * remove the second.
+ */
+ if (fib_entry) {
+ list_add_tail(&new_entry->list, &fib_entry->list);
+ } else {
+ struct mlxsw_sp_fib_entry *last;
+
+ list_for_each_entry(last, &fib_node->entry_list, list) {
+ if (new_entry->params.tb_id > last->params.tb_id)
+ break;
+ fib_entry = last;
+ }
+
+ if (fib_entry)
+ list_add(&new_entry->list, &fib_entry->list);
+ else
+ list_add(&new_entry->list, &fib_node->entry_list);
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
+{
+ list_del(&fib_entry->list);
+}
+
+static int
+mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib_node *fib_node,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
+ return 0;
+
+ /* To prevent packet loss, overwrite the previously offloaded
+ * entry.
+ */
+ if (!list_is_singular(&fib_node->entry_list)) {
+ enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
+ struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
+
+ mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
+ }
+
+ return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
}
-static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static void
+mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib_node *fib_node,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
+ return;
+
+ /* Promote the next entry by overwriting the deleted entry */
+ if (!list_is_singular(&fib_node->entry_list)) {
+ struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
+ enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
+
+ mlxsw_sp_fib_entry_update(mlxsw_sp, n);
+ mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
+ return;
+ }
+
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+}
+
+static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ bool replace, bool append)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ int err;
+
+ err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
+ append);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
+ if (err)
+ goto err_fib4_node_entry_add;
+
+ mlxsw_sp_fib_node_prefix_inc(fib_node);
+
+ return 0;
+
+err_fib4_node_entry_add:
+ mlxsw_sp_fib4_node_list_remove(fib_entry);
+ return err;
+}
+
+static void
+mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+
+ mlxsw_sp_fib_node_prefix_dec(fib_node);
+ mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
+ mlxsw_sp_fib4_node_list_remove(fib_entry);
+}
+
+static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ bool replace)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ struct mlxsw_sp_fib_entry *replaced;
+
+ if (!replace)
+ return;
+
+ /* We inserted the new entry before replaced one */
+ replaced = list_next_entry(fib_entry, list);
+
+ mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
+ mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
+}
+
+static int
+mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info,
+ bool replace, bool append)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fib_node *fib_node;
int err;
if (mlxsw_sp->router.aborted)
return 0;
- fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
- if (IS_ERR(fib_entry)) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
- return PTR_ERR(fib_entry);
+ fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
+ if (IS_ERR(fib_node)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
+ return PTR_ERR(fib_node);
}
- if (fib_entry->ref_count != 1)
- return 0;
+ fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
+ if (IS_ERR(fib_entry)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
+ err = PTR_ERR(fib_entry);
+ goto err_fib4_entry_create;
+ }
- vr = fib_entry->vr;
- err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
+ err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
+ append);
if (err) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
- goto err_fib_entry_insert;
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
+ goto err_fib4_node_entry_link;
}
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
- if (err)
- goto err_fib_entry_add;
+
+ mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
+
return 0;
-err_fib_entry_add:
- mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
-err_fib_entry_insert:
- mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+err_fib4_node_entry_link:
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
+err_fib4_entry_create:
+ mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
return err;
}
@@ -1831,20 +2311,19 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_fib_node *fib_node;
if (mlxsw_sp->router.aborted)
return;
- fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
- if (!fib_entry)
+ fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
+ if (WARN_ON(!fib_entry))
return;
+ fib_node = fib_entry->fib_node;
- if (fib_entry->ref_count == 1) {
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
- }
-
- mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1878,10 +2357,42 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
+static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_fib_entry *fib_entry, *tmp;
+
+ list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
+ bool do_break = &tmp->list == &fib_node->entry_list;
+
+ mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
+ /* Break when entry list is empty and node was freed.
+ * Otherwise, we'll access freed memory in the next
+ * iteration.
+ */
+ if (do_break)
+ break;
+ }
+}
+
+static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ switch (fib_node->vr->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_fib_entry *tmp;
+ struct mlxsw_sp_fib_node *fib_node, *tmp;
struct mlxsw_sp_vr *vr;
int i;
@@ -1891,14 +2402,11 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
if (!vr->used)
continue;
- list_for_each_entry_safe(fib_entry, tmp,
- &vr->fib->entry_list, list) {
- bool do_break = &tmp->list == &vr->fib->entry_list;
+ list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
+ list) {
+ bool do_break = &tmp->list == &vr->fib->node_list;
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
- fib_entry);
- mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
if (do_break)
break;
}
@@ -1919,6 +2427,28 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
+static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (WARN_ON_ONCE(err))
+ return err;
+
+ mlxsw_reg_ritr_enable_set(ritr_pl, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
+ mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
+}
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -1962,8 +2492,11 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
}
struct mlxsw_sp_fib_event_work {
- struct delayed_work dw;
- struct fib_entry_notifier_info fen_info;
+ struct work_struct work;
+ union {
+ struct fib_entry_notifier_info fen_info;
+ struct fib_nh_notifier_info fnh_info;
+ };
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
};
@@ -1971,15 +2504,21 @@ struct mlxsw_sp_fib_event_work {
static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
{
struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, dw.work);
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ bool replace, append;
int err;
/* Protect internal structures from changes */
rtnl_lock();
switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD:
- err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info);
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+ append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
+ replace, append);
if (err)
mlxsw_sp_router_fib4_abort(mlxsw_sp);
fib_info_put(fib_work->fen_info.fi);
@@ -1992,6 +2531,12 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
case FIB_EVENT_RULE_DEL:
mlxsw_sp_router_fib4_abort(mlxsw_sp);
break;
+ case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_DEL:
+ mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
+ fib_work->fnh_info.fib_nh);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
+ break;
}
rtnl_unlock();
kfree(fib_work);
@@ -2012,11 +2557,13 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
if (WARN_ON(!fib_work))
return NOTIFY_BAD;
- INIT_DELAYED_WORK(&fib_work->dw, mlxsw_sp_router_fib_event_work);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
fib_work->mlxsw_sp = mlxsw_sp;
fib_work->event = event;
switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
@@ -2025,9 +2572,14 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
+ case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_DEL:
+ memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
+ break;
}
- mlxsw_core_schedule_odw(&fib_work->dw, 0);
+ mlxsw_core_schedule_work(&fib_work->work);
return NOTIFY_DONE;
}
@@ -2049,11 +2601,20 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
int err;
INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
return err;
+ err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
+ &mlxsw_sp_nexthop_ht_params);
+ if (err)
+ goto err_nexthop_ht_init;
+
+ err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
+ &mlxsw_sp_nexthop_group_ht_params);
+ if (err)
+ goto err_nexthop_group_ht_init;
+
mlxsw_sp_lpm_init(mlxsw_sp);
err = mlxsw_sp_vrs_init(mlxsw_sp);
if (err)
@@ -2076,6 +2637,10 @@ err_register_fib_notifier:
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
+ rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
+err_nexthop_group_ht_init:
+ rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+err_nexthop_ht_init:
__mlxsw_sp_router_fini(mlxsw_sp);
return err;
}
@@ -2085,5 +2650,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(&mlxsw_sp->fib_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
+ rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
__mlxsw_sp_router_fini(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index b87ba7d36bc4..598727d578c1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -71,8 +71,21 @@ mlxsw_sp_port_orig_get(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_fid *fid;
u16 vid;
+ if (netif_is_bridge_master(dev)) {
+ fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
+ dev);
+ if (fid) {
+ mlxsw_sp_vport =
+ mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+ fid->fid);
+ WARN_ON(!mlxsw_sp_vport);
+ return mlxsw_sp_vport;
+ }
+ }
+
if (!is_vlan_dev(dev))
return mlxsw_sp_port;
@@ -166,9 +179,10 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
}
-static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool uc_set,
- bool bm_set)
+static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 idx_begin, u16 idx_end,
+ enum mlxsw_sp_flood_table table,
+ bool set)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 local_port = mlxsw_sp_port->local_port;
@@ -186,31 +200,48 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!sftr_pl)
return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, uc_set);
+ mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
+ table_type, range, local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 idx_begin, u16 idx_end, bool uc_set,
+ bool bc_set, bool mc_set)
+{
+ int err;
+
+ err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+ MLXSW_SP_FLOOD_TABLE_UC, uc_set);
if (err)
- goto buffer_out;
+ return err;
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
- table_type, range, local_port, bm_set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+ MLXSW_SP_FLOOD_TABLE_BC, bc_set);
if (err)
goto err_flood_bm_set;
- goto buffer_out;
+ err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+ MLXSW_SP_FLOOD_TABLE_MC, mc_set);
+ if (err)
+ goto err_flood_mc_set;
+ return 0;
+err_flood_mc_set:
+ __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+ MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
err_flood_bm_set:
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, !uc_set);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-buffer_out:
- kfree(sftr_pl);
+ __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+ MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
return err;
}
-static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool set)
+static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_sp_flood_table table,
+ bool set)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, last_visited_vid;
@@ -220,13 +251,13 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
u16 vfid = mlxsw_sp_fid_to_vfid(fid);
- return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
- set, true);
+ return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
+ vfid, table, set);
}
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
- true);
+ err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
+ table, set);
if (err) {
last_visited_vid = vid;
goto err_port_flood_set;
@@ -237,21 +268,53 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
err_port_flood_set:
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
+ __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
+ !set);
netdev_err(dev, "Failed to configure unicast flooding\n");
return err;
}
+static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ bool mc_disabled)
+{
+ int set;
+ int err = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
+ set = mc_disabled ?
+ mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
+ err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+ MLXSW_SP_FLOOD_TABLE_MC,
+ set);
+ }
+
+ if (!err)
+ mlxsw_sp_port->mc_disabled = mc_disabled;
+
+ return err;
+}
+
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool set)
{
+ bool mc_set = set;
u16 vfid;
/* In case of vFIDs, index into the flooding table is relative to
* the start of the vFIDs range.
*/
vfid = mlxsw_sp_fid_to_vfid(fid);
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
+
+ if (set)
+ mc_set = mlxsw_sp_vport->mc_disabled ?
+ mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
+
+ return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
+ mc_set);
}
static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -297,8 +360,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
- err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
- !mlxsw_sp_port->uc_flood);
+ err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+ MLXSW_SP_FLOOD_TABLE_UC,
+ !mlxsw_sp_port->uc_flood);
if (err)
return err;
}
@@ -318,8 +382,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
err_port_learning_set:
if ((uc_flood ^ brport_flags) & BR_FLOOD)
- mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
- mlxsw_sp_port->uc_flood);
+ mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+ MLXSW_SP_FLOOD_TABLE_UC,
+ mlxsw_sp_port->uc_flood);
return err;
}
@@ -371,6 +436,22 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ bool is_port_mc_router)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ mlxsw_sp_port->mc_router = is_port_mc_router;
+ if (!mlxsw_sp_port->mc_disabled)
+ return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+ MLXSW_SP_FLOOD_TABLE_MC,
+ is_port_mc_router);
+
+ return 0;
+}
+
static int mlxsw_sp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
@@ -400,6 +481,14 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
attr->orig_dev,
attr->u.vlan_filtering);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
+ attr->u.mrouter);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -545,6 +634,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_begin, u16 fid_end)
{
+ bool mc_flood;
int fid, err;
for (fid = fid_begin; fid <= fid_end; fid++) {
@@ -553,8 +643,12 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
goto err_port_fid_join;
}
+ mc_flood = mlxsw_sp_port->mc_disabled ?
+ mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
+
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
- mlxsw_sp_port->uc_flood, true);
+ mlxsw_sp_port->uc_flood, true,
+ mc_flood);
if (err)
goto err_port_flood_set;
@@ -570,7 +664,7 @@ err_port_fid_map:
for (fid--; fid >= fid_begin; fid--)
mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
__mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false);
+ false, false);
err_port_flood_set:
fid = fid_end;
err_port_fid_join:
@@ -588,7 +682,7 @@ static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
__mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false);
+ false, false);
for (fid = fid_begin; fid <= fid_end; fid++)
__mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 150ccf5192a9..ec1e886d4566 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
@@ -381,7 +382,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
return 0;
}
-static struct rtnl_link_stats64 *
+static void
mlxsw_sx_port_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -410,7 +411,6 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
tx_dropped += p->tx_dropped;
}
stats->tx_dropped = tx_dropped;
- return stats;
}
static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
@@ -733,7 +733,7 @@ static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
}
static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
@@ -750,8 +750,8 @@ static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
}
static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
@@ -776,8 +776,9 @@ static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
return PORT_OTHER;
}
-static int mlxsw_sx_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
@@ -785,6 +786,7 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev,
u32 eth_proto_cap;
u32 eth_proto_admin;
u32 eth_proto_oper;
+ u32 supported, advertising, lp_advertising;
int err;
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
@@ -796,18 +798,24 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev,
mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
&eth_proto_admin, &eth_proto_oper);
- cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+ supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+ advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
eth_proto_oper, cmd);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
- cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+ cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
+ lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
- cmd->transceiver = XCVR_INTERNAL;
return 0;
}
@@ -847,8 +855,9 @@ static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
return ptys_proto;
}
-static int mlxsw_sx_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
@@ -857,13 +866,17 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev,
u32 eth_proto_new;
u32 eth_proto_cap;
u32 eth_proto_admin;
+ u32 advertising;
bool is_up;
int err;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
- eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
- mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+ eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
+ mlxsw_sx_to_ptys_advert_link(advertising) :
mlxsw_sx_to_ptys_speed(speed);
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
@@ -920,8 +933,8 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
.get_strings = mlxsw_sx_port_get_strings,
.get_ethtool_stats = mlxsw_sx_port_get_stats,
.get_sset_count = mlxsw_sx_port_get_sset_count,
- .get_settings = mlxsw_sx_port_get_settings,
- .set_settings = mlxsw_sx_port_set_settings,
+ .get_link_ksettings = mlxsw_sx_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
};
static int mlxsw_sx_port_attr_get(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 7ab275deacac..02ea48b15eb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -54,6 +54,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+ MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 20cb85bc0c5f..bd51e057e915 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
/* Relinquish the SKB to the network layer */
skb_put(skb, pktlen);
skb->protocol = eth_type_trans(skb, ndev);
- netif_receive_skb(skb);
+ napi_gro_receive(&ksp->napi, skb);
/* Record stats */
ndev->stats.rx_packets++;
@@ -561,18 +561,17 @@ rx_finished:
static int ks8695_poll(struct napi_struct *napi, int budget)
{
struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
- unsigned long work_done;
-
unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+ int work_done;
work_done = ks8695_rx(ksp, budget);
- if (work_done < budget) {
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags;
+
spin_lock_irqsave(&ksp->rx_lock, flags);
- __napi_complete(napi);
- /*enable rx interrupt*/
+ /* enable rx interrupt */
writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
spin_unlock_irqrestore(&ksp->rx_lock, flags);
}
@@ -855,85 +854,94 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value)
}
/**
- * ks8695_wan_get_settings - Get device-specific settings.
+ * ks8695_wan_get_link_ksettings - Get device-specific settings.
* @ndev: The network device to read settings from
* @cmd: The ethtool structure to read into
*/
static int
-ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
+ u32 supported, advertising;
/* All ports on the KS8695 support these... */
- cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_TP | SUPPORTED_MII);
- cmd->transceiver = XCVR_INTERNAL;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
- cmd->port = PORT_MII;
- cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
- cmd->phy_address = 0;
+ advertising = ADVERTISED_TP | ADVERTISED_MII;
+ cmd->base.port = PORT_MII;
+ supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+ cmd->base.phy_address = 0;
ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
if ((ctrl & WMC_WAND) == 0) {
/* auto-negotiation is enabled */
- cmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
if (ctrl & WMC_WANA100F)
- cmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (ctrl & WMC_WANA100H)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if (ctrl & WMC_WANA10F)
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (ctrl & WMC_WANA10H)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if (ctrl & WMC_WANAP)
- cmd->advertising |= ADVERTISED_Pause;
- cmd->autoneg = AUTONEG_ENABLE;
+ advertising |= ADVERTISED_Pause;
+ cmd->base.autoneg = AUTONEG_ENABLE;
- ethtool_cmd_speed_set(cmd,
- (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
- cmd->duplex = (ctrl & WMC_WDS) ?
+ cmd->base.speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+ cmd->base.duplex = (ctrl & WMC_WDS) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
/* auto-negotiation is disabled */
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
- ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
- SPEED_100 : SPEED_10));
- cmd->duplex = (ctrl & WMC_WANFF) ?
+ cmd->base.speed = (ctrl & WMC_WANF100) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (ctrl & WMC_WANFF) ?
DUPLEX_FULL : DUPLEX_HALF;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
/**
- * ks8695_wan_set_settings - Set device-specific settings.
+ * ks8695_wan_set_link_ksettings - Set device-specific settings.
* @ndev: The network device to configure
* @cmd: The settings to configure
*/
static int
-ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
+ u32 advertising;
- if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
- return -EINVAL;
- if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if ((cmd->base.speed != SPEED_10) && (cmd->base.speed != SPEED_100))
return -EINVAL;
- if (cmd->port != PORT_MII)
+ if ((cmd->base.duplex != DUPLEX_HALF) &&
+ (cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
- if (cmd->transceiver != XCVR_INTERNAL)
+ if (cmd->base.port != PORT_MII)
return -EINVAL;
- if ((cmd->autoneg != AUTONEG_DISABLE) &&
- (cmd->autoneg != AUTONEG_ENABLE))
+ if ((cmd->base.autoneg != AUTONEG_DISABLE) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE))
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if ((cmd->advertising & (ADVERTISED_10baseT_Half |
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if ((advertising & (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full)) == 0)
@@ -943,13 +951,13 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
WMC_WANA10F | WMC_WANA10H);
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
ctrl |= WMC_WANA100F;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
ctrl |= WMC_WANA100H;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
ctrl |= WMC_WANA10F;
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
ctrl |= WMC_WANA10H;
/* force a re-negotiation */
@@ -962,9 +970,9 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
ctrl |= WMC_WAND;
ctrl &= ~(WMC_WANF100 | WMC_WANFF);
- if (cmd->speed == SPEED_100)
+ if (cmd->base.speed == SPEED_100)
ctrl |= WMC_WANF100;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
ctrl |= WMC_WANFF;
writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
@@ -1043,12 +1051,12 @@ static const struct ethtool_ops ks8695_ethtool_ops = {
static const struct ethtool_ops ks8695_wan_ethtool_ops = {
.get_msglevel = ks8695_get_msglevel,
.set_msglevel = ks8695_set_msglevel,
- .get_settings = ks8695_wan_get_settings,
- .set_settings = ks8695_wan_set_settings,
.nway_reset = ks8695_wan_nwayreset,
.get_link = ethtool_op_get_link,
.get_pauseparam = ks8695_wan_get_pause,
.get_drvinfo = ks8695_get_drvinfo,
+ .get_link_ksettings = ks8695_wan_get_link_ksettings,
+ .set_link_ksettings = ks8695_wan_set_link_ksettings,
};
/* Network device interface functions */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e7e1aff40bd9..279ee4612981 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -84,7 +84,6 @@ union ks8851_tx_hdr {
* @rc_ier: Cached copy of KS_IER.
* @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
- * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
* @vdd_reg: Optional regulator supplying the chip
* @vdd_io: Optional digital power supply for IO
@@ -120,7 +119,6 @@ struct ks8851_net {
u16 rc_ier;
u16 rc_rxqcr;
u16 rc_ccr;
- u16 eeprom_size;
struct mii_if_info mii;
struct ks8851_rxctrl rxctrl;
@@ -1088,16 +1086,18 @@ static void ks8851_set_msglevel(struct net_device *dev, u32 to)
ks->msg_enable = to;
}
-static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ks8851_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ks8851_net *ks = netdev_priv(dev);
- return mii_ethtool_gset(&ks->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
}
-static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ks8851_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ks8851_net *ks = netdev_priv(dev);
- return mii_ethtool_sset(&ks->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
}
static u32 ks8851_get_link(struct net_device *dev)
@@ -1253,13 +1253,13 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
.get_drvinfo = ks8851_get_drvinfo,
.get_msglevel = ks8851_get_msglevel,
.set_msglevel = ks8851_set_msglevel,
- .get_settings = ks8851_get_settings,
- .set_settings = ks8851_set_settings,
.get_link = ks8851_get_link,
.nway_reset = ks8851_nway_reset,
.get_eeprom_len = ks8851_get_eeprom_len,
.get_eeprom = ks8851_get_eeprom,
.set_eeprom = ks8851_set_eeprom,
+ .get_link_ksettings = ks8851_get_link_ksettings,
+ .set_link_ksettings = ks8851_set_link_ksettings,
};
/* MII interface controls */
@@ -1533,11 +1533,6 @@ static int ks8851_probe(struct spi_device *spi)
/* cache the contents of the CCR register for EEPROM, etc. */
ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
- if (ks->rc_ccr & CCR_EEPROM)
- ks->eeprom_size = 128;
- else
- ks->eeprom_size = 0;
-
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index db628078a4e6..7647f7bdbcb8 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1311,16 +1311,18 @@ static void ks_set_msglevel(struct net_device *netdev, u32 to)
ks->msg_enable = to;
}
-static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ks_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ks_net *ks = netdev_priv(netdev);
- return mii_ethtool_gset(&ks->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
}
-static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ks_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ks_net *ks = netdev_priv(netdev);
- return mii_ethtool_sset(&ks->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
}
static u32 ks_get_link(struct net_device *netdev)
@@ -1339,10 +1341,10 @@ static const struct ethtool_ops ks_ethtool_ops = {
.get_drvinfo = ks_get_drvinfo,
.get_msglevel = ks_get_msglevel,
.set_msglevel = ks_set_msglevel,
- .get_settings = ks_get_settings,
- .set_settings = ks_set_settings,
.get_link = ks_get_link,
.nway_reset = ks_nway_reset,
+ .get_link_ksettings = ks_get_link_ksettings,
+ .set_link_ksettings = ks_set_link_ksettings,
};
/* MII interface controls */
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 97f6ef1fa7d0..ee38c18c2d2d 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5944,7 +5944,7 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 };
/* These functions use the MII functions in mii.c. */
/**
- * netdev_get_settings - get network device settings
+ * netdev_get_link_ksettings - get network device settings
* @dev: Network device.
* @cmd: Ethtool command.
*
@@ -5952,23 +5952,26 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 };
*
* Return 0 if successful; otherwise an error code.
*/
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
mutex_lock(&hw_priv->lock);
- mii_ethtool_gset(&priv->mii_if, cmd);
- cmd->advertising |= SUPPORTED_TP;
+ mii_ethtool_get_link_ksettings(&priv->mii_if, cmd);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
mutex_unlock(&hw_priv->lock);
/* Save advertised settings for workaround in next function. */
- priv->advertising = cmd->advertising;
+ ethtool_convert_link_mode_to_legacy_u32(&priv->advertising,
+ cmd->link_modes.advertising);
+
return 0;
}
/**
- * netdev_set_settings - set network device settings
+ * netdev_set_link_ksettings - set network device settings
* @dev: Network device.
* @cmd: Ethtool command.
*
@@ -5976,54 +5979,65 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*
* Return 0 if successful; otherwise an error code.
*/
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_port *port = &priv->port;
- u32 speed = ethtool_cmd_speed(cmd);
+ struct ethtool_link_ksettings copy_cmd;
+ u32 speed = cmd->base.speed;
+ u32 advertising;
int rc;
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
/*
* ethtool utility does not change advertised setting if auto
* negotiation is not specified explicitly.
*/
- if (cmd->autoneg && priv->advertising == cmd->advertising) {
- cmd->advertising |= ADVERTISED_ALL;
+ if (cmd->base.autoneg && priv->advertising == advertising) {
+ advertising |= ADVERTISED_ALL;
if (10 == speed)
- cmd->advertising &=
+ advertising &=
~(ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half);
else if (100 == speed)
- cmd->advertising &=
+ advertising &=
~(ADVERTISED_10baseT_Full |
ADVERTISED_10baseT_Half);
- if (0 == cmd->duplex)
- cmd->advertising &=
+ if (0 == cmd->base.duplex)
+ advertising &=
~(ADVERTISED_100baseT_Full |
ADVERTISED_10baseT_Full);
- else if (1 == cmd->duplex)
- cmd->advertising &=
+ else if (1 == cmd->base.duplex)
+ advertising &=
~(ADVERTISED_100baseT_Half |
ADVERTISED_10baseT_Half);
}
mutex_lock(&hw_priv->lock);
- if (cmd->autoneg &&
- (cmd->advertising & ADVERTISED_ALL) ==
- ADVERTISED_ALL) {
+ if (cmd->base.autoneg &&
+ (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) {
port->duplex = 0;
port->speed = 0;
port->force_link = 0;
} else {
- port->duplex = cmd->duplex + 1;
+ port->duplex = cmd->base.duplex + 1;
if (1000 != speed)
port->speed = speed;
- if (cmd->autoneg)
+ if (cmd->base.autoneg)
port->force_link = 0;
else
port->force_link = 1;
}
- rc = mii_ethtool_sset(&priv->mii_if, cmd);
+
+ memcpy(&copy_cmd, cmd, sizeof(copy_cmd));
+ ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising,
+ advertising);
+ rc = mii_ethtool_set_link_ksettings(
+ &priv->mii_if,
+ (const struct ethtool_link_ksettings *)&copy_cmd);
mutex_unlock(&hw_priv->lock);
return rc;
}
@@ -6597,8 +6611,6 @@ static int netdev_set_features(struct net_device *dev,
}
static const struct ethtool_ops netdev_ethtool_ops = {
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_drvinfo = netdev_get_drvinfo,
@@ -6617,6 +6629,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_strings = netdev_get_strings,
.get_sset_count = netdev_get_sset_count,
.get_ethtool_stats = netdev_get_ethtool_stats,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
/*
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 045b9106c0ff..f6ecfa778660 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1487,27 +1487,30 @@ enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static int
-enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+enc28j60_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct enc28j60_net *priv = netdev_priv(dev);
- cmd->transceiver = XCVR_INTERNAL;
- cmd->supported = SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_TP;
- ethtool_cmd_speed_set(cmd, SPEED_10);
- cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = PORT_TP;
- cmd->autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_TP;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
static int
-enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+enc28j60_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- return enc28j60_setlink(dev, cmd->autoneg,
- ethtool_cmd_speed(cmd), cmd->duplex);
+ return enc28j60_setlink(dev, cmd->base.autoneg,
+ cmd->base.speed, cmd->base.duplex);
}
static u32 enc28j60_get_msglevel(struct net_device *dev)
@@ -1523,11 +1526,11 @@ static void enc28j60_set_msglevel(struct net_device *dev, u32 val)
}
static const struct ethtool_ops enc28j60_ethtool_ops = {
- .get_settings = enc28j60_get_settings,
- .set_settings = enc28j60_set_settings,
.get_drvinfo = enc28j60_get_drvinfo,
.get_msglevel = enc28j60_get_msglevel,
.set_msglevel = enc28j60_set_msglevel,
+ .get_link_ksettings = enc28j60_get_link_ksettings,
+ .set_link_ksettings = enc28j60_set_link_ksettings,
};
static int enc28j60_chipset_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index fbce6166504e..f831238d9793 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -940,29 +940,33 @@ static void encx24j600_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
}
-static int encx24j600_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int encx24j600_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct encx24j600_priv *priv = netdev_priv(dev);
+ u32 supported;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP;
- ethtool_cmd_speed_set(cmd, priv->speed);
- cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = PORT_TP;
- cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
+ cmd->base.speed = priv->speed;
+ cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_TP;
+ cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
-static int encx24j600_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+encx24j600_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- return encx24j600_setlink(dev, cmd->autoneg,
- ethtool_cmd_speed(cmd), cmd->duplex);
+ return encx24j600_setlink(dev, cmd->base.autoneg,
+ cmd->base.speed, cmd->base.duplex);
}
static u32 encx24j600_get_msglevel(struct net_device *dev)
@@ -980,13 +984,13 @@ static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
}
static const struct ethtool_ops encx24j600_ethtool_ops = {
- .get_settings = encx24j600_get_settings,
- .set_settings = encx24j600_set_settings,
.get_drvinfo = encx24j600_get_drvinfo,
.get_msglevel = encx24j600_get_msglevel,
.set_msglevel = encx24j600_set_msglevel,
.get_regs_len = encx24j600_get_regs_len,
.get_regs = encx24j600_get_regs,
+ .get_link_ksettings = encx24j600_get_link_ksettings,
+ .set_link_ksettings = encx24j600_set_link_ksettings,
};
static const struct net_device_ops encx24j600_netdev_ops = {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 9774b50cff6e..06c9f4100cb9 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -269,7 +269,7 @@ rx_next:
}
if (rx < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx);
}
priv->reg_imr |= RPKT_FINISH_M;
@@ -436,7 +436,7 @@ static void moxart_mac_set_rx_mode(struct net_device *ndev)
spin_unlock_irq(&priv->txlock);
}
-static struct net_device_ops moxart_netdev_ops = {
+static const struct net_device_ops moxart_netdev_ops = {
.ndo_open = moxart_mac_open,
.ndo_stop = moxart_mac_stop,
.ndo_start_xmit = moxart_mac_start_xmit,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index e506ca876d0d..b171ed2015fe 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -191,21 +191,6 @@ struct myri10ge_slice_state {
int cpu;
__be32 __iomem *dca_tag;
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
-#define SLICE_STATE_IDLE 0
-#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
-#define SLICE_STATE_POLL 2 /* poll owns this slice */
-#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
-#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
-#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
-#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
- spinlock_t lock;
- unsigned long lock_napi_yield;
- unsigned long lock_poll_yield;
- unsigned long busy_poll_miss;
- unsigned long busy_poll_cnt;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
char irq_desc[32];
};
@@ -378,8 +363,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
__raw_writel((__force __u32) val, (__force void __iomem *)p);
}
-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+static void myri10ge_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
{
@@ -925,92 +910,6 @@ abort:
return status;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
-{
- spin_lock_init(&ss->lock);
- ss->state = SLICE_STATE_IDLE;
-}
-
-static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
-{
- bool rc = true;
- spin_lock(&ss->lock);
- if ((ss->state & SLICE_LOCKED)) {
- WARN_ON((ss->state & SLICE_STATE_NAPI));
- ss->state |= SLICE_STATE_NAPI_YIELD;
- rc = false;
- ss->lock_napi_yield++;
- } else
- ss->state = SLICE_STATE_NAPI;
- spin_unlock(&ss->lock);
- return rc;
-}
-
-static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
-{
- spin_lock(&ss->lock);
- WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
- ss->state = SLICE_STATE_IDLE;
- spin_unlock(&ss->lock);
-}
-
-static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
-{
- bool rc = true;
- spin_lock_bh(&ss->lock);
- if ((ss->state & SLICE_LOCKED)) {
- ss->state |= SLICE_STATE_POLL_YIELD;
- rc = false;
- ss->lock_poll_yield++;
- } else
- ss->state |= SLICE_STATE_POLL;
- spin_unlock_bh(&ss->lock);
- return rc;
-}
-
-static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
-{
- spin_lock_bh(&ss->lock);
- WARN_ON((ss->state & SLICE_STATE_NAPI));
- ss->state = SLICE_STATE_IDLE;
- spin_unlock_bh(&ss->lock);
-}
-
-static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
-{
- WARN_ON(!(ss->state & SLICE_LOCKED));
- return (ss->state & SLICE_USER_PEND);
-}
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
-{
-}
-
-static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
-{
- return false;
-}
-
-static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
-{
-}
-
-static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
-{
- return false;
-}
-
-static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
-{
-}
-
-static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
-{
- return false;
-}
-#endif
-
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
@@ -1426,7 +1325,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
- bool polling;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
@@ -1441,15 +1339,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
- /* When busy polling in user context, allocate skb and copy headers to
- * skb's linear memory ourselves. When not busy polling, use the napi
- * gro api.
- */
- polling = myri10ge_ss_busy_polling(ss);
- if (polling)
- skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
- else
- skb = napi_get_frags(&ss->napi);
+ skb = napi_get_frags(&ss->napi);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1489,27 +1379,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
- if (polling) {
- int hlen;
-
- /* myri10ge_vlan_rx might have moved the header, so compute
- * length and address again.
- */
- hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
- va = page_address(skb_frag_page(&rx_frags[0])) +
- rx_frags[0].page_offset;
- /* Copy header into the skb linear memory */
- skb_copy_to_linear_data(skb, va, hlen);
- rx_frags[0].page_offset += hlen;
- rx_frags[0].size -= hlen;
- skb->data_len -= hlen;
- skb->tail += hlen;
- skb->protocol = eth_type_trans(skb, dev);
- skb_mark_napi_id(skb, &ss->napi);
- netif_receive_skb(skb);
- }
- else
- napi_gro_frags(&ss->napi);
+ napi_gro_frags(&ss->napi);
return 1;
}
@@ -1669,49 +1539,16 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
- /* Try later if the busy_poll handler is running. */
- if (!myri10ge_ss_lock_napi(ss))
- return budget;
-
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
- myri10ge_ss_unlock_napi(ss);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
put_be32(htonl(3), ss->irq_claim);
}
return work_done;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int myri10ge_busy_poll(struct napi_struct *napi)
-{
- struct myri10ge_slice_state *ss =
- container_of(napi, struct myri10ge_slice_state, napi);
- struct myri10ge_priv *mgp = ss->mgp;
- int work_done;
-
- /* Poll only when the link is up */
- if (mgp->link_state != MXGEFW_LINK_UP)
- return LL_FLUSH_FAILED;
-
- if (!myri10ge_ss_lock_poll(ss))
- return LL_FLUSH_BUSY;
-
- /* Process a small number of packets */
- work_done = myri10ge_clean_rx_done(ss, 4);
- if (work_done)
- ss->busy_poll_cnt += work_done;
- else
- ss->busy_poll_miss++;
-
- myri10ge_ss_unlock_poll(ss);
-
- return work_done;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
@@ -1773,15 +1610,16 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
}
static int
-myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+myri10ge_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
char *ptr;
int i;
- cmd->autoneg = AUTONEG_DISABLE;
- ethtool_cmd_speed_set(cmd, SPEED_10000);
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
/*
* parse the product code to deterimine the interface type
@@ -1806,16 +1644,12 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
ptr++;
if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
/* We've found either an XFP, quad ribbon fiber, or SFP+ */
- cmd->port = PORT_FIBRE;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
} else {
- cmd->port = PORT_OTHER;
+ cmd->base.port = PORT_OTHER;
}
- if (*ptr == 'R' || *ptr == 'S')
- cmd->transceiver = XCVR_EXTERNAL;
- else
- cmd->transceiver = XCVR_INTERNAL;
return 0;
}
@@ -1919,10 +1753,6 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
-#ifdef CONFIG_NET_RX_BUSY_POLL
- "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
- "rx_busy_poll_cnt",
-#endif
};
#define MYRI10GE_NET_STATS_LEN 21
@@ -2022,12 +1852,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- data[i++] = ss->lock_napi_yield;
- data[i++] = ss->lock_poll_yield;
- data[i++] = ss->busy_poll_miss;
- data[i++] = ss->busy_poll_cnt;
-#endif
}
}
@@ -2098,7 +1922,6 @@ myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
}
static const struct ethtool_ops myri10ge_ethtool_ops = {
- .get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
.set_coalesce = myri10ge_set_coalesce,
@@ -2112,6 +1935,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
.set_msglevel = myri10ge_set_msglevel,
.get_msglevel = myri10ge_get_msglevel,
.set_phys_id = myri10ge_phys_id,
+ .get_link_ksettings = myri10ge_get_link_ksettings,
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
@@ -2589,9 +2413,6 @@ static int myri10ge_open(struct net_device *dev)
goto abort_with_rings;
}
- /* Initialize the slice spinlock and state used for polling */
- myri10ge_ss_init_lock(ss);
-
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
@@ -2668,19 +2489,9 @@ static int myri10ge_close(struct net_device *dev)
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
- for (i = 0; i < mgp->num_slices; i++) {
+ for (i = 0; i < mgp->num_slices; i++)
napi_disable(&mgp->ss[i].napi);
- local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
- /* Lock the slice to prevent the busy_poll handler from
- * accessing it. Later when we bring the NIC up, myri10ge_open
- * resets the slice including this lock.
- */
- while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
- pr_info("Slice %d locked\n", i);
- mdelay(1);
- }
- local_bh_enable();
- }
+
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
@@ -3119,8 +2930,8 @@ drop:
return NETDEV_TX_OK;
}
-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void myri10ge_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
const struct myri10ge_priv *mgp = netdev_priv(dev);
const struct myri10ge_slice_netstats *slice_stats;
@@ -3135,7 +2946,6 @@ static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
stats->rx_dropped += slice_stats->rx_dropped;
stats->tx_dropped += slice_stats->tx_dropped;
}
- return stats;
}
static void myri10ge_set_multicast_list(struct net_device *dev)
@@ -3954,9 +3764,6 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = myri10ge_busy_poll,
-#endif
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 90eac63f9606..18af2a23a933 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -640,8 +640,10 @@ static int netdev_set_wol(struct net_device *dev, u32 newval);
static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
static int netdev_set_sopass(struct net_device *dev, u8 *newval);
static int netdev_get_sopass(struct net_device *dev, u8 *data);
-static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
-static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
+static int netdev_get_ecmd(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd);
+static int netdev_set_ecmd(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd);
static void enable_wol_mode(struct net_device *dev, int enable_intr);
static int netdev_close(struct net_device *dev);
static int netdev_get_regs(struct net_device *dev, u8 *buf);
@@ -2265,7 +2267,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
np->intr_status = readl(ioaddr + IntrStatus);
} while (np->intr_status);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Reenable interrupts providing nothing is trying to shut
* the chip down. */
@@ -2584,7 +2586,8 @@ static int get_eeprom_len(struct net_device *dev)
return np->eeprom_size;
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct netdev_private *np = netdev_priv(dev);
spin_lock_irq(&np->lock);
@@ -2593,7 +2596,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return 0;
}
-static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct netdev_private *np = netdev_priv(dev);
int res;
@@ -2689,8 +2693,6 @@ static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = get_drvinfo,
.get_regs_len = get_regs_len,
.get_eeprom_len = get_eeprom_len,
- .get_settings = get_settings,
- .set_settings = set_settings,
.get_wol = get_wol,
.set_wol = set_wol,
.get_regs = get_regs,
@@ -2699,6 +2701,8 @@ static const struct ethtool_ops ethtool_ops = {
.nway_reset = nway_reset,
.get_link = get_link,
.get_eeprom = get_eeprom,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int netdev_set_wol(struct net_device *dev, u32 newval)
@@ -2828,29 +2832,32 @@ static int netdev_get_sopass(struct net_device *dev, u8 *data)
return 0;
}
-static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int netdev_get_ecmd(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct netdev_private *np = netdev_priv(dev);
+ u32 supported, advertising;
u32 tmp;
- ecmd->port = dev->if_port;
- ethtool_cmd_speed_set(ecmd, np->speed);
- ecmd->duplex = np->duplex;
- ecmd->autoneg = np->autoneg;
- ecmd->advertising = 0;
+ ecmd->base.port = dev->if_port;
+ ecmd->base.speed = np->speed;
+ ecmd->base.duplex = np->duplex;
+ ecmd->base.autoneg = np->autoneg;
+ advertising = 0;
+
if (np->advertising & ADVERTISE_10HALF)
- ecmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if (np->advertising & ADVERTISE_10FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (np->advertising & ADVERTISE_100HALF)
- ecmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if (np->advertising & ADVERTISE_100FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- ecmd->supported = (SUPPORTED_Autoneg |
+ advertising |= ADVERTISED_100baseT_Full;
+ supported = (SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
- ecmd->phy_address = np->phy_addr_external;
+ ecmd->base.phy_address = np->phy_addr_external;
/*
* We intentionally report the phy address of the external
* phy, even if the internal phy is used. This is necessary
@@ -2870,62 +2877,70 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
*/
/* set information based on active port type */
- switch (ecmd->port) {
+ switch (ecmd->base.port) {
default:
case PORT_TP:
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->transceiver = XCVR_INTERNAL;
+ advertising |= ADVERTISED_TP;
break;
case PORT_MII:
- ecmd->advertising |= ADVERTISED_MII;
- ecmd->transceiver = XCVR_EXTERNAL;
+ advertising |= ADVERTISED_MII;
break;
case PORT_FIBRE:
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ advertising |= ADVERTISED_FIBRE;
break;
}
/* if autonegotiation is on, try to return the active speed/duplex */
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ if (ecmd->base.autoneg == AUTONEG_ENABLE) {
+ advertising |= ADVERTISED_Autoneg;
tmp = mii_nway_result(
np->advertising & mdio_read(dev, MII_LPA));
if (tmp == LPA_100FULL || tmp == LPA_100HALF)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ ecmd->base.speed = SPEED_100;
else
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ ecmd->base.speed = SPEED_10;
if (tmp == LPA_100FULL || tmp == LPA_10FULL)
- ecmd->duplex = DUPLEX_FULL;
+ ecmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ ecmd->base.duplex = DUPLEX_HALF;
}
/* ignore maxtxpkt, maxrxpkt for now */
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int netdev_set_ecmd(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct netdev_private *np = netdev_priv(dev);
+ u32 advertising;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
- return -EINVAL;
- if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ ecmd->link_modes.advertising);
+
+ if (ecmd->base.port != PORT_TP &&
+ ecmd->base.port != PORT_MII &&
+ ecmd->base.port != PORT_FIBRE)
return -EINVAL;
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ if (ecmd->base.autoneg == AUTONEG_ENABLE) {
+ if ((advertising & (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full)) == 0) {
return -EINVAL;
}
- } else if (ecmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(ecmd);
+ } else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = ecmd->base.speed;
if (speed != SPEED_10 && speed != SPEED_100)
return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ if (ecmd->base.duplex != DUPLEX_HALF &&
+ ecmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else {
return -EINVAL;
@@ -2936,8 +2951,8 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
* transceiver are really not going to work so don't let the
* user select them.
*/
- if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
- ecmd->port == PORT_TP))
+ if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
+ ecmd->base.port == PORT_TP))
return -EINVAL;
/*
@@ -2956,30 +2971,30 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
/* WHEW! now lets bang some bits */
/* save the parms */
- dev->if_port = ecmd->port;
- np->autoneg = ecmd->autoneg;
- np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
+ dev->if_port = ecmd->base.port;
+ np->autoneg = ecmd->base.autoneg;
+ np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
if (np->autoneg == AUTONEG_ENABLE) {
/* advertise only what has been requested */
np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
np->advertising |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
np->advertising |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
np->advertising |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
np->advertising |= ADVERTISE_100FULL;
} else {
- np->speed = ethtool_cmd_speed(ecmd);
- np->duplex = ecmd->duplex;
+ np->speed = ecmd->base.speed;
+ np->duplex = ecmd->base.duplex;
/* user overriding the initial full duplex parm? */
if (np->duplex == DUPLEX_HALF)
np->full_duplex = 0;
}
/* get the right phy enabled */
- if (ecmd->port == PORT_TP)
+ if (ecmd->base.port == PORT_TP)
switch_port_internal(dev);
else
switch_port_external(dev);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index f9d2eb9a920a..729095db3e08 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1217,12 +1217,13 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
}
/* Let ethtool retrieve info */
-static int ns83820_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
+static int ns83820_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct ns83820 *dev = PRIV(ndev);
u32 cfg, tanar, tbicr;
int fullduplex = 0;
+ u32 supported;
/*
* Here's the list of available ethtool commands from other drivers:
@@ -1244,44 +1245,47 @@ static int ns83820_get_settings(struct net_device *ndev,
fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
- cmd->supported = SUPPORTED_Autoneg;
+ supported = SUPPORTED_Autoneg;
if (dev->CFG_cache & CFG_TBI_EN) {
/* we have optical interface */
- cmd->supported |= SUPPORTED_1000baseT_Half |
+ supported |= SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
} else {
/* we have copper */
- cmd->supported |= SUPPORTED_10baseT_Half |
+ supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_MII;
- cmd->port = PORT_MII;
+ cmd->base.port = PORT_MII;
}
- cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
+ cmd->base.duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
switch (cfg / CFG_SPDSTS0 & 3) {
case 2:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case 1:
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
}
- cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
+ cmd->base.autoneg = (tbicr & TBICR_MR_AN_ENABLE)
? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
/* Let ethool change settings*/
-static int ns83820_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
+static int ns83820_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ns83820 *dev = PRIV(ndev);
u32 cfg, tanar;
@@ -1306,10 +1310,10 @@ static int ns83820_set_settings(struct net_device *ndev,
spin_lock(&dev->tx_lock);
/* Set duplex */
- if (cmd->duplex != fullduplex) {
+ if (cmd->base.duplex != fullduplex) {
if (have_optical) {
/*set full duplex*/
- if (cmd->duplex == DUPLEX_FULL) {
+ if (cmd->base.duplex == DUPLEX_FULL) {
/* force full duplex */
writel(readl(dev->base + TXCFG)
| TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,
@@ -1333,7 +1337,7 @@ static int ns83820_set_settings(struct net_device *ndev,
/* Set autonegotiation */
if (1) {
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
/* restart auto negotiation */
writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,
dev->base + TBICR);
@@ -1348,7 +1352,7 @@ static int ns83820_set_settings(struct net_device *ndev,
}
printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name,
- cmd->autoneg ? "ENABLED" : "DISABLED");
+ cmd->base.autoneg ? "ENABLED" : "DISABLED");
}
phy_intr(ndev);
@@ -1375,10 +1379,10 @@ static u32 ns83820_get_link(struct net_device *ndev)
}
static const struct ethtool_ops ops = {
- .get_settings = ns83820_get_settings,
- .set_settings = ns83820_set_settings,
.get_drvinfo = ns83820_get_drvinfo,
- .get_link = ns83820_get_link
+ .get_link = ns83820_get_link,
+ .get_link_ksettings = ns83820_get_link_ksettings,
+ .set_link_ksettings = ns83820_set_link_ksettings,
};
static inline void ns83820_disable_interrupts(struct ns83820 *dev)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 564f682fa4dc..c5c1d0e0c16f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
s2io_chk_rx_buffers(nic, ring);
if (pkts_processed < budget_org) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts_processed);
/*Re Enable MSI-Rx Vector*/
addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
addr += 7 - ring->ring_no;
@@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
break;
}
if (pkts_processed < budget_org) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts_processed);
/* Re enable the Rx interrupts for the ring */
writeq(0, &bar0->rx_traffic_mask);
readl(&bar0->rx_traffic_mask);
@@ -5300,10 +5300,10 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
}
/**
- * s2io_ethtool_sset - Sets different link parameters.
+ * s2io_ethtool_set_link_ksettings - Sets different link parameters.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @info: pointer to the structure with parameters given by ethtool to set
+ * @cmd: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
* The function sets different link parameters provided by the user onto
@@ -5312,13 +5312,14 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
* 0 on success.
*/
-static int s2io_ethtool_sset(struct net_device *dev,
- struct ethtool_cmd *info)
+static int
+s2io_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct s2io_nic *sp = netdev_priv(dev);
- if ((info->autoneg == AUTONEG_ENABLE) ||
- (ethtool_cmd_speed(info) != SPEED_10000) ||
- (info->duplex != DUPLEX_FULL))
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+ (cmd->base.speed != SPEED_10000) ||
+ (cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
else {
s2io_close(sp->dev);
@@ -5329,10 +5330,10 @@ static int s2io_ethtool_sset(struct net_device *dev,
}
/**
- * s2io_ethtol_gset - Return link specific information.
+ * s2io_ethtol_get_link_ksettings - Return link specific information.
* @sp : private member of the device structure, pointer to the
* s2io_nic structure.
- * @info : pointer to the structure with parameters given by ethtool
+ * @cmd : pointer to the structure with parameters given by ethtool
* to return link information.
* Description:
* Returns link specific information like speed, duplex etc.. to ethtool.
@@ -5340,25 +5341,31 @@ static int s2io_ethtool_sset(struct net_device *dev,
* return 0 on success.
*/
-static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
+static int
+s2io_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct s2io_nic *sp = netdev_priv(dev);
- info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- info->port = PORT_FIBRE;
- /* info->transceiver */
- info->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
if (netif_carrier_ok(sp->dev)) {
- ethtool_cmd_speed_set(info, SPEED_10000);
- info->duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
- info->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- info->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -6626,8 +6633,6 @@ static int s2io_set_features(struct net_device *dev, netdev_features_t features)
}
static const struct ethtool_ops netdev_ethtool_ops = {
- .get_settings = s2io_ethtool_gset,
- .set_settings = s2io_ethtool_sset,
.get_drvinfo = s2io_ethtool_gdrvinfo,
.get_regs_len = s2io_ethtool_get_regs_len,
.get_regs = s2io_ethtool_gregs,
@@ -6643,6 +6648,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.set_phys_id = s2io_ethtool_set_led,
.get_ethtool_stats = s2io_get_ethtool_stats,
.get_sset_count = s2io_get_sset_count,
+ .get_link_ksettings = s2io_ethtool_get_link_ksettings,
+ .set_link_ksettings = s2io_ethtool_set_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 9a2967016c18..db55e6d89cf4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -38,9 +38,9 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
};
/**
- * vxge_ethtool_sset - Sets different link parameters.
+ * vxge_ethtool_set_link_ksettings - Sets different link parameters.
* @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool to set
+ * @cmd: pointer to the structure with parameters given by ethtool to set
* link information.
*
* The function sets different link parameters provided by the user onto
@@ -48,44 +48,51 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
* Return value:
* 0 on success.
*/
-static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
+static int
+vxge_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
/* We currently only support 10Gb/FULL */
- if ((info->autoneg == AUTONEG_ENABLE) ||
- (ethtool_cmd_speed(info) != SPEED_10000) ||
- (info->duplex != DUPLEX_FULL))
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+ (cmd->base.speed != SPEED_10000) ||
+ (cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
return 0;
}
/**
- * vxge_ethtool_gset - Return link specific information.
+ * vxge_ethtool_get_link_ksettings - Return link specific information.
* @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool
+ * @cmd: pointer to the structure with parameters given by ethtool
* to return link information.
*
* Returns link specific information like speed, duplex etc.. to ethtool.
* Return value :
* return 0 on success.
*/
-static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
+static int vxge_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- info->port = PORT_FIBRE;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- info->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(info, SPEED_10000);
- info->duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
- info->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- info->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -1126,8 +1133,6 @@ static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
}
static const struct ethtool_ops vxge_ethtool_ops = {
- .get_settings = vxge_ethtool_gset,
- .set_settings = vxge_ethtool_sset,
.get_drvinfo = vxge_ethtool_gdrvinfo,
.get_regs_len = vxge_ethtool_get_regs_len,
.get_regs = vxge_ethtool_gregs,
@@ -1139,6 +1144,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_sset_count = vxge_ethtool_get_sset_count,
.get_ethtool_stats = vxge_get_ethtool_stats,
.flash_device = vxge_fw_flash,
+ .get_link_ksettings = vxge_ethtool_get_link_ksettings,
+ .set_link_ksettings = vxge_ethtool_set_link_ksettings,
};
void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e07b936f64ec..6a4310af5d97 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget)
vxge_hw_vpath_poll_rx(ring->handle);
pkts_processed = ring->pkts_processed;
- if (ring->pkts_processed < budget_org) {
- napi_complete(napi);
+ if (pkts_processed < budget_org) {
+ napi_complete_done(napi, pkts_processed);
/* Re enable the Rx interrupts for the vpath */
vxge_hw_channel_msix_unmask(
@@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
VXGE_COMPLETE_ALL_TX(vdev);
if (pkts_processed < budget_org) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts_processed);
/* Re enable the Rx interrupts for the ring */
vxge_hw_device_unmask_all(hldev);
vxge_hw_device_flush_io(hldev);
@@ -3111,7 +3111,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
* @stats: pointer to struct rtnl_link_stats64
*
*/
-static struct rtnl_link_stats64 *
+static void
vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3150,8 +3150,6 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->tx_bytes += bytes;
net_stats->tx_errors += txstats->tx_errors;
}
-
- return net_stats;
}
static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 9508ad782c30..967d7ca8c28c 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -15,21 +15,21 @@ config NET_VENDOR_NETRONOME
if NET_VENDOR_NETRONOME
-config NFP_NETVF
- tristate "Netronome(R) NFP4000/NFP6000 VF NIC driver"
+config NFP
+ tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
---help---
- This driver supports SR-IOV virtual functions of
- the Netronome(R) NFP4000/NFP6000 cards working as
- a advanced Ethernet NIC.
+ This driver supports the Netronome(R) NFP4000/NFP6000 based
+ cards working as a advanced Ethernet NIC. It works with both
+ SR-IOV physical and virtual functions.
-config NFP_NET_DEBUG
- bool "Debug support for Netronome(R) NFP3200/NFP6000 NIC drivers"
- depends on NFP_NET || NFP_NETVF
+config NFP_DEBUG
+ bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
+ depends on NFP
---help---
Enable extra sanity checks and debugfs support in
- Netronome(R) NFP3200/NFP6000 NIC PF and VF drivers.
+ Netronome(R) NFP4000/NFP6000 NIC drivers.
Note: selecting this option may adversely impact
performance.
diff --git a/drivers/net/ethernet/netronome/Makefile b/drivers/net/ethernet/netronome/Makefile
index dcb7b383f634..7fb3b84b5556 100644
--- a/drivers/net/ethernet/netronome/Makefile
+++ b/drivers/net/ethernet/netronome/Makefile
@@ -2,4 +2,4 @@
# Makefile for the Netronome network device drivers
#
-obj-$(CONFIG_NFP_NETVF) += nfp/
+obj-$(CONFIG_NFP) += nfp/
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 0efb2ba9a558..6933afa69df2 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -1,15 +1,28 @@
-obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o
+obj-$(CONFIG_NFP) += nfp.o
-nfp_netvf-objs := \
+nfp-objs := \
+ nfpcore/nfp6000_pcie.o \
+ nfpcore/nfp_cppcore.o \
+ nfpcore/nfp_cpplib.o \
+ nfpcore/nfp_hwinfo.o \
+ nfpcore/nfp_mip.o \
+ nfpcore/nfp_nffw.o \
+ nfpcore/nfp_nsp.o \
+ nfpcore/nfp_nsp_eth.o \
+ nfpcore/nfp_resource.o \
+ nfpcore/nfp_rtsym.o \
+ nfpcore/nfp_target.o \
+ nfp_main.o \
nfp_net_common.o \
nfp_net_ethtool.o \
nfp_net_offload.o \
+ nfp_net_main.o \
nfp_netvf_main.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
-nfp_netvf-objs += \
+nfp-objs += \
nfp_bpf_verifier.o \
nfp_bpf_jit.o
endif
-nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o
+nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
index 76a19f1796af..9513c80f7be5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
@@ -39,8 +39,6 @@
#include <linux/list.h>
#include <linux/types.h>
-#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask)))
-
/* For branch fixup logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
new file mode 100644
index 000000000000..dedac720fb29
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_main.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Alejandro Lucero <alejandro.lucero@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/vermagic.h>
+
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp_nsp_eth.h"
+
+#include "nfpcore/nfp6000_pcie.h"
+
+#include "nfp_main.h"
+#include "nfp_net.h"
+
+static const char nfp_driver_name[] = "nfp";
+const char nfp_driver_version[] = VERMAGIC_STRING;
+
+static const struct pci_device_id nfp_pci_device_ids[] = {
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0,
+ },
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0,
+ },
+ { 0, } /* Required last entry. */
+};
+MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids);
+
+static void nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
+{
+#ifdef CONFIG_PCI_IOV
+ int err;
+
+ pf->limit_vfs = nfp_rtsym_read_le(pf->cpp, "nfd_vf_cfg_max_vfs", &err);
+ if (!err)
+ return;
+
+ pf->limit_vfs = ~0;
+ /* Allow any setting for backwards compatibility if symbol not found */
+ if (err != -ENOENT)
+ nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
+#endif
+}
+
+static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct nfp_pf *pf = pci_get_drvdata(pdev);
+ int err;
+
+ if (num_vfs > pf->limit_vfs) {
+ nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
+ pf->limit_vfs);
+ return -EINVAL;
+ }
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err);
+ return err;
+ }
+
+ pf->num_vfs = num_vfs;
+
+ dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
+
+ return num_vfs;
+#endif
+ return 0;
+}
+
+static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct nfp_pf *pf = pci_get_drvdata(pdev);
+
+ /* If the VFs are assigned we cannot shut down SR-IOV without
+ * causing issues, so just leave the hardware available but
+ * disabled
+ */
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ }
+
+ pf->num_vfs = 0;
+
+ pci_disable_sriov(pdev);
+ dev_dbg(&pdev->dev, "Removed VFs.\n");
+#endif
+ return 0;
+}
+
+static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return nfp_pcie_sriov_disable(pdev);
+ else
+ return nfp_pcie_sriov_enable(pdev, num_vfs);
+}
+
+/**
+ * nfp_net_fw_find() - Find the correct firmware image for netdev mode
+ * @pdev: PCI Device structure
+ * @pf: NFP PF Device structure
+ *
+ * Return: firmware if found and requested successfully.
+ */
+static const struct firmware *
+nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
+{
+ const struct firmware *fw = NULL;
+ struct nfp_eth_table_port *port;
+ const char *fw_model;
+ char fw_name[256];
+ int spc, err = 0;
+ int i, j;
+
+ if (!pf->eth_tbl) {
+ dev_err(&pdev->dev, "Error: can't identify media config\n");
+ return NULL;
+ }
+
+ fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno");
+ if (!fw_model) {
+ dev_err(&pdev->dev, "Error: can't read part number\n");
+ return NULL;
+ }
+
+ spc = ARRAY_SIZE(fw_name);
+ spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model);
+
+ for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) {
+ port = &pf->eth_tbl->ports[i];
+ j = 1;
+ while (i + j < pf->eth_tbl->count &&
+ port->speed == port[j].speed)
+ j++;
+
+ spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc,
+ "_%dx%d", j, port->speed / 1000);
+ }
+
+ if (spc <= 0)
+ return NULL;
+
+ spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw");
+ if (spc <= 0)
+ return NULL;
+
+ err = request_firmware(&fw, fw_name, &pdev->dev);
+ if (err)
+ return NULL;
+
+ dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name);
+
+ return fw;
+}
+
+/**
+ * nfp_net_fw_load() - Load the firmware image
+ * @pdev: PCI Device structure
+ * @pf: NFP PF Device structure
+ * @nsp: NFP SP handle
+ *
+ * Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded
+ */
+static int
+nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp)
+{
+ const struct firmware *fw;
+ u16 interface;
+ int err;
+
+ interface = nfp_cpp_interface(pf->cpp);
+ if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) {
+ /* Only Unit 0 should reset or load firmware */
+ dev_info(&pdev->dev, "Firmware will be loaded by partner\n");
+ return 0;
+ }
+
+ fw = nfp_net_fw_find(pdev, pf);
+ if (!fw)
+ return 0;
+
+ dev_info(&pdev->dev, "Soft-reset, loading FW image\n");
+ err = nfp_nsp_device_soft_reset(nsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n",
+ err);
+ goto exit_release_fw;
+ }
+
+ err = nfp_nsp_load_fw(nsp, fw);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "FW loading failed: %d\n", err);
+ goto exit_release_fw;
+ }
+
+ dev_info(&pdev->dev, "Finished loading FW image\n");
+
+exit_release_fw:
+ release_firmware(fw);
+
+ return err < 0 ? err : 1;
+}
+
+static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ dev_err(&pdev->dev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ err = nfp_nsp_wait(nsp);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+
+ err = nfp_fw_load(pdev, pf, nsp);
+ if (err < 0) {
+ kfree(pf->eth_tbl);
+ dev_err(&pdev->dev, "Failed to load FW\n");
+ goto exit_close_nsp;
+ }
+
+ pf->fw_loaded = !!err;
+ err = 0;
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+
+ return err;
+}
+
+static void nfp_fw_unload(struct nfp_pf *pf)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ nfp_err(pf->cpp, "Reset failed, can't open NSP\n");
+ return;
+ }
+
+ err = nfp_nsp_device_soft_reset(nsp);
+ if (err < 0)
+ dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err);
+ else
+ dev_info(&pf->pdev->dev, "Firmware safely unloaded\n");
+
+ nfp_nsp_close(nsp);
+}
+
+static int nfp_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct nfp_pf *pf;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0)
+ return err;
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+ if (err)
+ goto err_pci_disable;
+
+ err = pci_request_regions(pdev, nfp_driver_name);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to reserve pci resources.\n");
+ goto err_pci_disable;
+ }
+
+ pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ if (!pf) {
+ err = -ENOMEM;
+ goto err_rel_regions;
+ }
+ INIT_LIST_HEAD(&pf->ports);
+ pci_set_drvdata(pdev, pf);
+ pf->pdev = pdev;
+
+ pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
+ if (IS_ERR_OR_NULL(pf->cpp)) {
+ err = PTR_ERR(pf->cpp);
+ if (err >= 0)
+ err = -ENOMEM;
+ goto err_disable_msix;
+ }
+
+ dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
+ nfp_hwinfo_lookup(pf->cpp, "assembly.vendor"),
+ nfp_hwinfo_lookup(pf->cpp, "assembly.partno"),
+ nfp_hwinfo_lookup(pf->cpp, "assembly.serial"),
+ nfp_hwinfo_lookup(pf->cpp, "assembly.revision"),
+ nfp_hwinfo_lookup(pf->cpp, "cpld.version"));
+
+ err = nfp_nsp_init(pdev, pf);
+ if (err)
+ goto err_cpp_free;
+
+ nfp_pcie_sriov_read_nfd_limit(pf);
+
+ err = nfp_net_pci_probe(pf);
+ if (err)
+ goto err_fw_unload;
+
+ return 0;
+
+err_fw_unload:
+ if (pf->fw_loaded)
+ nfp_fw_unload(pf);
+ kfree(pf->eth_tbl);
+err_cpp_free:
+ nfp_cpp_free(pf->cpp);
+err_disable_msix:
+ pci_set_drvdata(pdev, NULL);
+ kfree(pf);
+err_rel_regions:
+ pci_release_regions(pdev);
+err_pci_disable:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void nfp_pci_remove(struct pci_dev *pdev)
+{
+ struct nfp_pf *pf = pci_get_drvdata(pdev);
+
+ if (!list_empty(&pf->ports))
+ nfp_net_pci_remove(pf);
+
+ nfp_pcie_sriov_disable(pdev);
+
+ if (pf->fw_loaded)
+ nfp_fw_unload(pf);
+
+ pci_set_drvdata(pdev, NULL);
+ nfp_cpp_free(pf->cpp);
+
+ kfree(pf->eth_tbl);
+ kfree(pf);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver nfp_pci_driver = {
+ .name = nfp_driver_name,
+ .id_table = nfp_pci_device_ids,
+ .probe = nfp_pci_probe,
+ .remove = nfp_pci_remove,
+ .sriov_configure = nfp_pcie_sriov_configure,
+};
+
+static int __init nfp_main_init(void)
+{
+ int err;
+
+ pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n",
+ nfp_driver_name);
+
+ nfp_net_debugfs_create();
+
+ err = pci_register_driver(&nfp_pci_driver);
+ if (err < 0)
+ goto err_destroy_debugfs;
+
+ err = pci_register_driver(&nfp_netvf_pci_driver);
+ if (err)
+ goto err_unreg_pf;
+
+ return err;
+
+err_unreg_pf:
+ pci_unregister_driver(&nfp_pci_driver);
+err_destroy_debugfs:
+ nfp_net_debugfs_destroy();
+ return err;
+}
+
+static void __exit nfp_main_exit(void)
+{
+ pci_unregister_driver(&nfp_netvf_pci_driver);
+ pci_unregister_driver(&nfp_pci_driver);
+ nfp_net_debugfs_destroy();
+}
+
+module_init(nfp_main_init);
+module_exit(nfp_main_exit);
+
+MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw");
+
+MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
new file mode 100644
index 000000000000..39105d0435e9
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_main.h
+ * Author: Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#ifndef NFP_MAIN_H
+#define NFP_MAIN_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+struct dentry;
+struct pci_dev;
+
+struct nfp_cpp;
+struct nfp_cpp_area;
+struct nfp_eth_table;
+
+/**
+ * struct nfp_pf - NFP PF-specific device structure
+ * @pdev: Backpointer to PCI device
+ * @cpp: Pointer to the CPP handle
+ * @ctrl_area: Pointer to the CPP area for the control BAR
+ * @tx_area: Pointer to the CPP area for the TX queues
+ * @rx_area: Pointer to the CPP area for the FL/RX queues
+ * @irq_entries: Array of MSI-X entries for all ports
+ * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
+ * @num_vfs: Number of SR-IOV VFs enabled
+ * @fw_loaded: Is the firmware loaded?
+ * @eth_tbl: NSP ETH table
+ * @ddir: Per-device debugfs directory
+ * @num_ports: Number of adapter ports
+ * @ports: Linked list of port structures (struct nfp_net)
+ */
+struct nfp_pf {
+ struct pci_dev *pdev;
+
+ struct nfp_cpp *cpp;
+
+ struct nfp_cpp_area *ctrl_area;
+ struct nfp_cpp_area *tx_area;
+ struct nfp_cpp_area *rx_area;
+
+ struct msix_entry *irq_entries;
+
+ unsigned int limit_vfs;
+ unsigned int num_vfs;
+
+ bool fw_loaded;
+
+ struct nfp_eth_table *eth_tbl;
+
+ struct dentry *ddir;
+
+ unsigned int num_ports;
+ struct list_head ports;
+};
+
+extern struct pci_driver nfp_netvf_pci_driver;
+
+int nfp_net_pci_probe(struct nfp_pf *pf);
+void nfp_net_pci_remove(struct nfp_pf *pf);
+
+#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 2115f446031e..e614a376b595 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -43,6 +43,7 @@
#define _NFP_NET_H_
#include <linux/interrupt.h>
+#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
@@ -83,6 +84,7 @@
#define NFP_NET_NON_Q_VECTORS 2
#define NFP_NET_IRQ_LSC_IDX 0
#define NFP_NET_IRQ_EXN_IDX 1
+#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1)
/* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
@@ -109,6 +111,7 @@
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
/* Forward declarations */
+struct nfp_cpp;
struct nfp_net;
struct nfp_net_r_vector;
@@ -345,7 +348,7 @@ struct nfp_net_rx_ring {
* @tx_ring: Pointer to TX ring
* @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP
- * @irq_idx: Index into MSI-X table
+ * @irq_entry: MSI-X table entry (use for talking to the device)
* @rx_sync: Seqlock for atomic updates of RX stats
* @rx_pkts: Number of received packets
* @rx_bytes: Number of received bytes
@@ -362,6 +365,7 @@ struct nfp_net_rx_ring {
* @tx_lso: Counter of LSO packets sent
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
+ * @irq_vector: Interrupt vector number (use for talking to the OS)
* @handler: Interrupt handler for this ring vector
* @name: Name of the interrupt vector
* @affinity_mask: SMP affinity mask for this vector
@@ -378,7 +382,7 @@ struct nfp_net_r_vector {
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_rx_ring *rx_ring;
- int irq_idx;
+ u16 irq_entry;
struct u64_stats_sync rx_sync;
u64 rx_pkts;
@@ -400,6 +404,7 @@ struct nfp_net_r_vector {
u64 tx_errors;
u64 tx_busy;
+ u32 irq_vector;
irq_handler_t handler;
char name[IFNAMSIZ + 8];
cpumask_t affinity_mask;
@@ -431,20 +436,13 @@ struct nfp_stat_pair {
* struct nfp_net - NFP network device structure
* @pdev: Backpointer to PCI device
* @netdev: Backpointer to net_device structure
- * @nfp_fallback: Is the driver used in fallback mode?
* @is_vf: Is the driver attached to a VF?
- * @fw_loaded: Is the firmware loaded?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts
* @xdp_prog: Installed XDP program
- * @cpp: Pointer to the CPP handle
- * @nfp_dev_cpp: Pointer to the NFP Device handle
- * @ctrl_area: Pointer to the CPP area for the control BAR
- * @tx_area: Pointer to the CPP area for the TX queues
- * @rx_area: Pointer to the CPP area for the FL/RX queues
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
@@ -494,14 +492,15 @@ struct nfp_stat_pair {
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
+ * @ethtool_dump_flag: Ethtool dump flag
+ * @port_list: Entry on device port list
+ * @cpp: CPP device handle if available
*/
struct nfp_net {
struct pci_dev *pdev;
struct net_device *netdev;
- unsigned nfp_fallback:1;
unsigned is_vf:1;
- unsigned fw_loaded:1;
unsigned bpf_offload_skip_sw:1;
unsigned bpf_offload_xdp:1;
@@ -515,18 +514,6 @@ struct nfp_net {
struct nfp_net_tx_ring *tx_rings;
struct nfp_net_rx_ring *rx_rings;
-#ifdef CONFIG_PCI_IOV
- unsigned int num_vfs;
- struct vf_data_storage *vfinfo;
- int vf_rate_link_speed;
-#endif
-
- struct nfp_cpp *cpp;
- struct platform_device *nfp_dev_cpp;
- struct nfp_cpp_area *ctrl_area;
- struct nfp_cpp_area *tx_area;
- struct nfp_cpp_area *rx_area;
-
struct nfp_net_fw_version fw_ver;
u32 cap;
u32 max_mtu;
@@ -589,11 +576,15 @@ struct nfp_net {
u8 __iomem *qcp_cfg;
u8 __iomem *ctrl_bar;
- u8 __iomem *q_bar;
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
struct dentry *debugfs_dir;
+ u32 ethtool_dump_flag;
+
+ struct list_head port_list;
+
+ struct nfp_cpp *cpp;
};
struct nfp_net_ring_set {
@@ -770,8 +761,7 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
}
/* Globals */
-extern const char nfp_net_driver_name[];
-extern const char nfp_net_driver_version[];
+extern const char nfp_driver_version[];
/* Prototypes */
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
@@ -789,17 +779,24 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
-int nfp_net_irqs_alloc(struct nfp_net *nn);
-void nfp_net_irqs_disable(struct nfp_net *nn);
+
+unsigned int
+nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
+ unsigned int min_irqs, unsigned int want_irqs);
+void nfp_net_irqs_disable(struct pci_dev *pdev);
+void
+nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
+ unsigned int n);
int
nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
-#ifdef CONFIG_NFP_NET_DEBUG
+#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
-void nfp_net_debugfs_adapter_add(struct nfp_net *nn);
-void nfp_net_debugfs_adapter_del(struct nfp_net *nn);
+struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
+void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
+void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else
static inline void nfp_net_debugfs_create(void)
{
@@ -809,14 +806,20 @@ static inline void nfp_net_debugfs_destroy(void)
{
}
-static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
+static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
+{
+ return NULL;
+}
+
+static inline void
+nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
}
-static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
+static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
{
}
-#endif /* CONFIG_NFP_NET_DEBUG */
+#endif /* CONFIG_NFP_DEBUG */
void nfp_net_filter_stats_timer(unsigned long data);
int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e8d448109e03..074259cc8e06 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -42,6 +42,7 @@
*/
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -280,72 +281,76 @@ static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
}
/**
- * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
- * @nn: NFP Network structure
- * @nr_vecs: Number of MSI-X vectors to allocate
- *
- * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
+ * nfp_net_irqs_alloc() - allocates MSI-X irqs
+ * @pdev: PCI device structure
+ * @irq_entries: Array to be initialized and used to hold the irq entries
+ * @min_irqs: Minimal acceptable number of interrupts
+ * @wanted_irqs: Target number of interrupts to allocate
*
- * Return: Number of MSI-X vectors obtained or 0 on error.
+ * Return: Number of irqs obtained or 0 on error.
*/
-static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
+unsigned int
+nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
+ unsigned int min_irqs, unsigned int wanted_irqs)
{
- struct pci_dev *pdev = nn->pdev;
- int nvecs;
- int i;
+ unsigned int i;
+ int got_irqs;
- for (i = 0; i < nr_vecs; i++)
- nn->irq_entries[i].entry = i;
+ for (i = 0; i < wanted_irqs; i++)
+ irq_entries[i].entry = i;
- nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
- NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
- if (nvecs < 0) {
- nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
- NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
+ got_irqs = pci_enable_msix_range(pdev, irq_entries,
+ min_irqs, wanted_irqs);
+ if (got_irqs < 0) {
+ dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
+ min_irqs, wanted_irqs, got_irqs);
return 0;
}
- return nvecs;
+ if (got_irqs < wanted_irqs)
+ dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
+ wanted_irqs, got_irqs);
+
+ return got_irqs;
}
/**
- * nfp_net_irqs_alloc() - allocates MSI-X irqs
- * @nn: NFP Network structure
+ * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
+ * @nn: NFP Network structure
+ * @irq_entries: Table of allocated interrupts
+ * @n: Size of @irq_entries (number of entries to grab)
*
- * Return: Number of irqs obtained or 0 on error.
+ * After interrupts are allocated with nfp_net_irqs_alloc() this function
+ * should be called to assign them to a specific netdev (port).
*/
-int nfp_net_irqs_alloc(struct nfp_net *nn)
+void
+nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
+ unsigned int n)
{
- int wanted_irqs;
- unsigned int n;
-
- wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS;
-
- n = nfp_net_msix_alloc(nn, wanted_irqs);
- if (n == 0) {
- nn_err(nn, "Failed to allocate MSI-X IRQs\n");
- return 0;
- }
-
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
nn->num_r_vecs = nn->max_r_vecs;
- if (n < wanted_irqs)
- nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
- wanted_irqs, n);
+ memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
- return n;
+ if (nn->num_rx_rings > nn->num_r_vecs ||
+ nn->num_tx_rings > nn->num_r_vecs)
+ nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
+ nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
+
+ nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
+ nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
+ nn->num_stack_tx_rings = nn->num_tx_rings;
}
/**
* nfp_net_irqs_disable() - Disable interrupts
- * @nn: NFP Network structure
+ * @pdev: PCI device structure
*
* Undoes what @nfp_net_irqs_alloc() does.
*/
-void nfp_net_irqs_disable(struct nfp_net *nn)
+void nfp_net_irqs_disable(struct pci_dev *pdev)
{
- pci_disable_msix(nn->pdev);
+ pci_disable_msix(pdev);
}
/**
@@ -409,10 +414,13 @@ out:
static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
{
struct nfp_net *nn = data;
+ struct msix_entry *entry;
+
+ entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
nfp_net_read_link_status(nn);
- nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
+ nfp_net_irq_unmask(nn, entry->entry);
return IRQ_HANDLED;
}
@@ -475,32 +483,28 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
}
/**
- * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
+ * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
* @netdev: netdev structure
*/
-static void nfp_net_irqs_assign(struct net_device *netdev)
+static void nfp_net_vecs_init(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_r_vector *r_vec;
int r;
- if (nn->num_rx_rings > nn->num_r_vecs ||
- nn->num_tx_rings > nn->num_r_vecs)
- nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
- nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
-
- nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
- nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
- nn->num_stack_tx_rings = nn->num_tx_rings;
-
nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn;
for (r = 0; r < nn->max_r_vecs; r++) {
+ struct msix_entry *entry;
+
+ entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
+
r_vec = &nn->r_vecs[r];
r_vec->nfp_net = nn;
r_vec->handler = nfp_net_irq_rxtx;
- r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
+ r_vec->irq_entry = entry->entry;
+ r_vec->irq_vector = entry->vector;
cpumask_set_cpu(r, &r_vec->affinity_mask);
}
@@ -533,7 +537,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
entry->vector, err);
return err;
}
- nn_writeb(nn, ctrl_offset, vector_idx);
+ nn_writeb(nn, ctrl_offset, entry->entry);
return 0;
}
@@ -1459,7 +1463,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
dev_kfree_skb_any(skb);
}
-static void
+static bool
nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
@@ -1473,13 +1477,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return;
+ return false;
}
new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
if (unlikely(!new_frag)) {
nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return;
+ return false;
}
nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
@@ -1509,6 +1513,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
tx_ring->wr_p++;
tx_ring->wr_ptr_add++;
+ return true;
}
static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
@@ -1613,12 +1618,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
case XDP_PASS:
break;
case XDP_TX:
- nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf,
- pkt_off, pkt_len);
+ if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
+ tx_ring, rxbuf,
+ pkt_off, pkt_len)))
+ trace_xdp_exception(nn->netdev, xdp_prog, act);
continue;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(nn->netdev, xdp_prog, act);
case XDP_DROP:
nfp_net_rx_give_one(rx_ring, rxbuf->frag,
rxbuf->dma_addr);
@@ -1701,7 +1709,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (pkts_polled < budget) {
napi_complete_done(napi, pkts_polled);
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx);
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
}
return pkts_polled;
@@ -1983,7 +1991,6 @@ static int
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx)
{
- struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
int err;
/* Setup NAPI */
@@ -1992,17 +1999,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nn->netdev->name, idx);
- err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
+ err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
+ r_vec);
if (err) {
netif_napi_del(&r_vec->napi);
- nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
+ nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
- disable_irq(entry->vector);
+ disable_irq(r_vec->irq_vector);
- irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+ irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
- nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
+ nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
+ r_vec->irq_entry);
return 0;
}
@@ -2010,11 +2019,9 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
- struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
-
- irq_set_affinity_hint(entry->vector, NULL);
+ irq_set_affinity_hint(r_vec->irq_vector, NULL);
netif_napi_del(&r_vec->napi);
- free_irq(entry->vector, r_vec);
+ free_irq(r_vec->irq_vector, r_vec);
}
/**
@@ -2143,7 +2150,7 @@ nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
/* Write the DMA address, size and MSI-X info to the device */
nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx);
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
}
static void
@@ -2152,7 +2159,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
{
nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx);
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
}
static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
@@ -2246,7 +2253,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
for (r = 0; r < nn->num_r_vecs; r++) {
napi_enable(&nn->r_vecs[r].napi);
- enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+ enable_irq(nn->r_vecs[r].irq_vector);
}
netif_tx_wake_all_queues(nn->netdev);
@@ -2370,7 +2377,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
nn->link_up = false;
for (r = 0; r < nn->num_r_vecs; r++) {
- disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+ disable_irq(nn->r_vecs[r].irq_vector);
napi_disable(&nn->r_vecs[r].napi);
}
@@ -2638,8 +2645,8 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
}
-static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void nfp_net_stat64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct nfp_net *nn = netdev_priv(netdev);
int r;
@@ -2669,8 +2676,6 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
}
-
- return stats;
}
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
@@ -3256,7 +3261,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev);
- nfp_net_irqs_assign(netdev);
+ nfp_net_vecs_init(netdev);
return register_netdev(netdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index c66f3f954aa8..6e9372a18375 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -202,16 +202,17 @@ static const struct file_operations nfp_xdp_q_fops = {
.llseek = seq_lseek
};
-void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
+void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
struct dentry *queues, *tx, *rx, *xdp;
- char int_name[16];
+ char name[20];
int i;
if (IS_ERR_OR_NULL(nfp_dir))
return;
- nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir);
+ sprintf(name, "port%d", id);
+ nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir))
return;
@@ -227,24 +228,38 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
return;
for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
- sprintf(int_name, "%d", i);
- debugfs_create_file(int_name, S_IRUSR, rx,
+ sprintf(name, "%d", i);
+ debugfs_create_file(name, S_IRUSR, rx,
&nn->r_vecs[i], &nfp_rx_q_fops);
- debugfs_create_file(int_name, S_IRUSR, xdp,
+ debugfs_create_file(name, S_IRUSR, xdp,
&nn->r_vecs[i], &nfp_xdp_q_fops);
}
for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) {
- sprintf(int_name, "%d", i);
- debugfs_create_file(int_name, S_IRUSR, tx,
+ sprintf(name, "%d", i);
+ debugfs_create_file(name, S_IRUSR, tx,
&nn->r_vecs[i], &nfp_tx_q_fops);
}
}
-void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
+struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
{
- debugfs_remove_recursive(nn->debugfs_dir);
- nn->debugfs_dir = NULL;
+ struct dentry *dev_dir;
+
+ if (IS_ERR_OR_NULL(nfp_dir))
+ return NULL;
+
+ dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir);
+ if (IS_ERR_OR_NULL(dev_dir))
+ return NULL;
+
+ return dev_dir;
+}
+
+void nfp_net_debugfs_dir_clean(struct dentry **dir)
+{
+ debugfs_remove_recursive(*dir);
+ *dir = NULL;
}
void nfp_net_debugfs_create(void)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b26e9646574..2649f7523c81 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -47,9 +47,14 @@
#include <linux/pci.h>
#include <linux/ethtool.h>
+#include "nfpcore/nfp.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+enum nfp_dump_diag {
+ NFP_DUMP_NSP_DIAG = 0,
+};
+
/* Support for stats. Returns netdev, driver, and device stats */
enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS };
struct _nfp_net_et_stats {
@@ -127,19 +132,39 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
+static void nfp_net_get_nspinfo(struct nfp_net *nn, char *version)
+{
+ struct nfp_nsp *nsp;
+
+ if (!nn->cpp)
+ return;
+
+ nsp = nfp_nsp_open(nn->cpp);
+ if (IS_ERR(nsp))
+ return;
+
+ snprintf(version, ETHTOOL_FWVERS_LEN, "sp:%hu.%hu",
+ nfp_nsp_get_abi_ver_major(nsp),
+ nfp_nsp_get_abi_ver_minor(nsp));
+
+ nfp_nsp_close(nsp);
+}
+
static void nfp_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
+ char nsp_version[ETHTOOL_FWVERS_LEN] = {};
struct nfp_net *nn = netdev_priv(netdev);
- strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, nfp_net_driver_version,
- sizeof(drvinfo->version));
+ strlcpy(drvinfo->driver, nn->pdev->driver->name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
+ nfp_net_get_nspinfo(nn, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d.%d",
+ "%d.%d.%d.%d %s",
nn->fw_ver.resv, nn->fw_ver.class,
- nn->fw_ver.major, nn->fw_ver.minor);
+ nn->fw_ver.major, nn->fw_ver.minor, nsp_version);
strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
@@ -558,6 +583,75 @@ static int nfp_net_get_coalesce(struct net_device *netdev,
return 0;
}
+/* Other debug dumps
+ */
+static int
+nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
+{
+ struct nfp_resource *res;
+ int ret;
+
+ if (!nn->cpp)
+ return -EOPNOTSUPP;
+
+ dump->version = 1;
+ dump->flag = NFP_DUMP_NSP_DIAG;
+
+ res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ if (buffer) {
+ if (dump->len != nfp_resource_size(res)) {
+ ret = -EINVAL;
+ goto exit_release;
+ }
+
+ ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res),
+ nfp_resource_address(res),
+ buffer, dump->len);
+ if (ret != dump->len)
+ ret = ret < 0 ? ret : -EIO;
+ else
+ ret = 0;
+ } else {
+ dump->len = nfp_resource_size(res);
+ ret = 0;
+ }
+exit_release:
+ nfp_resource_release(res);
+
+ return ret;
+}
+
+static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (!nn->cpp)
+ return -EOPNOTSUPP;
+
+ if (val->flag != NFP_DUMP_NSP_DIAG)
+ return -EINVAL;
+
+ nn->ethtool_dump_flag = val->flag;
+
+ return 0;
+}
+
+static int
+nfp_net_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ return nfp_dump_nsp_diag(netdev_priv(netdev), dump, NULL);
+}
+
+static int
+nfp_net_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
+{
+ return nfp_dump_nsp_diag(netdev_priv(netdev), dump, buffer);
+}
+
static int nfp_net_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -722,6 +816,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_rxfh = nfp_net_set_rxfh,
.get_regs_len = nfp_net_get_regs_len,
.get_regs = nfp_net_get_regs,
+ .set_dump = nfp_net_set_dump,
+ .get_dump_flag = nfp_net_get_dump_flag,
+ .get_dump_data = nfp_net_get_dump_data,
.get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
new file mode 100644
index 000000000000..3afcdc11480c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_net_main.c
+ * Netronome network device driver: Main entry point
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Alejandro Lucero <alejandro.lucero@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/msi.h>
+#include <linux/random.h>
+
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp6000_pcie.h"
+
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+#include "nfp_main.h"
+
+#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
+
+static int nfp_is_ready(struct nfp_cpp *cpp)
+{
+ const char *cp;
+ long state;
+ int err;
+
+ cp = nfp_hwinfo_lookup(cpp, "board.state");
+ if (!cp)
+ return 0;
+
+ err = kstrtol(cp, 0, &state);
+ if (err < 0)
+ return 0;
+
+ return state == 15;
+}
+
+/**
+ * nfp_net_map_area() - Help function to map an area
+ * @cpp: NFP CPP handler
+ * @name: Name for the area
+ * @target: CPP target
+ * @addr: CPP address
+ * @size: Size of the area
+ * @area: Area handle (returned).
+ *
+ * This function is primarily to simplify the code in the main probe
+ * function. To undo the effect of this functions call
+ * @nfp_cpp_area_release_free(*area);
+ *
+ * Return: Pointer to memory mapped area or ERR_PTR
+ */
+static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
+ const char *name, int isl, int target,
+ unsigned long long addr, unsigned long size,
+ struct nfp_cpp_area **area)
+{
+ u8 __iomem *res;
+ u32 dest;
+ int err;
+
+ dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
+
+ *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
+ if (!*area) {
+ err = -EIO;
+ goto err_area;
+ }
+
+ err = nfp_cpp_area_acquire(*area);
+ if (err < 0)
+ goto err_acquire;
+
+ res = nfp_cpp_area_iomem(*area);
+ if (!res) {
+ err = -EIO;
+ goto err_map;
+ }
+
+ return res;
+
+err_map:
+ nfp_cpp_area_release(*area);
+err_acquire:
+ nfp_cpp_area_free(*area);
+err_area:
+ return (u8 __iomem *)ERR_PTR(err);
+}
+
+static void
+nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
+ unsigned int id)
+{
+ u8 mac_addr[ETH_ALEN];
+ const char *mac_str;
+ char name[32];
+
+ snprintf(name, sizeof(name), "eth%d.mac", id);
+
+ mac_str = nfp_hwinfo_lookup(cpp, name);
+ if (!mac_str) {
+ dev_warn(&nn->pdev->dev,
+ "Can't lookup MAC address. Generate\n");
+ eth_hw_addr_random(nn->netdev);
+ return;
+ }
+
+ if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &mac_addr[0], &mac_addr[1], &mac_addr[2],
+ &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
+ dev_warn(&nn->pdev->dev,
+ "Can't parse MAC address (%s). Generate.\n", mac_str);
+ eth_hw_addr_random(nn->netdev);
+ return;
+ }
+
+ ether_addr_copy(nn->netdev->dev_addr, mac_addr);
+ ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+}
+
+/**
+ * nfp_net_get_mac_addr() - Get the MAC address.
+ * @nn: NFP Network structure
+ * @pf: NFP PF device structure
+ * @id: NFP port id
+ *
+ * First try to get the MAC address from NSP ETH table. If that
+ * fails try HWInfo. As a last resort generate a random address.
+ */
+static void
+nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
+{
+ int i;
+
+ for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
+ if (pf->eth_tbl->ports[i].eth_index == id) {
+ const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
+
+ ether_addr_copy(nn->netdev->dev_addr, mac_addr);
+ ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ return;
+ }
+
+ nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
+}
+
+static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
+{
+ char name[256];
+ u16 interface;
+ int pcie_pf;
+ int err = 0;
+ u64 val;
+
+ interface = nfp_cpp_interface(pf->cpp);
+ pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
+
+ snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
+
+ val = nfp_rtsym_read_le(pf->cpp, name, &err);
+ /* Default to one port */
+ if (err) {
+ if (err != -ENOENT)
+ nfp_err(pf->cpp, "Unable to read adapter port count\n");
+ val = 1;
+ }
+
+ return val;
+}
+
+static unsigned int
+nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
+ unsigned int stride, u32 start_off, u32 num_off)
+{
+ unsigned int i, min_qc, max_qc;
+
+ min_qc = readl(ctrl_bar + start_off);
+ max_qc = min_qc;
+
+ for (i = 0; i < pf->num_ports; i++) {
+ /* To make our lives simpler only accept configuration where
+ * queues are allocated to PFs in order (queues of PFn all have
+ * indexes lower than PFn+1).
+ */
+ if (max_qc > readl(ctrl_bar + start_off))
+ return 0;
+
+ max_qc = readl(ctrl_bar + start_off);
+ max_qc += readl(ctrl_bar + num_off) * stride;
+ ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
+ }
+
+ return max_qc - min_qc;
+}
+
+static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
+{
+ const struct nfp_rtsym *ctrl_sym;
+ u8 __iomem *ctrl_bar;
+ char pf_symbol[256];
+ u16 interface;
+ int pcie_pf;
+
+ interface = nfp_cpp_interface(pf->cpp);
+ pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
+
+ snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
+
+ ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
+ if (!ctrl_sym) {
+ dev_err(&pf->pdev->dev,
+ "Failed to find PF BAR0 symbol %s\n", pf_symbol);
+ return NULL;
+ }
+
+ if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
+ dev_err(&pf->pdev->dev,
+ "PF BAR0 too small to contain %d ports\n",
+ pf->num_ports);
+ return NULL;
+ }
+
+ ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
+ ctrl_sym->domain, ctrl_sym->target,
+ ctrl_sym->addr, ctrl_sym->size,
+ &pf->ctrl_area);
+ if (IS_ERR(ctrl_bar)) {
+ dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
+ PTR_ERR(ctrl_bar));
+ return NULL;
+ }
+
+ return ctrl_bar;
+}
+
+static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
+{
+ struct nfp_net *nn;
+
+ while (!list_empty(&pf->ports)) {
+ nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
+ list_del(&nn->port_list);
+
+ nfp_net_netdev_free(nn);
+ }
+}
+
+static struct nfp_net *
+nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
+ void __iomem *tx_bar, void __iomem *rx_bar,
+ int stride, struct nfp_net_fw_version *fw_ver)
+{
+ u32 n_tx_rings, n_rx_rings;
+ struct nfp_net *nn;
+
+ n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
+ n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
+
+ /* Allocate and initialise the netdev */
+ nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
+ if (IS_ERR(nn))
+ return nn;
+
+ nn->cpp = pf->cpp;
+ nn->fw_ver = *fw_ver;
+ nn->ctrl_bar = ctrl_bar;
+ nn->tx_bar = tx_bar;
+ nn->rx_bar = rx_bar;
+ nn->is_vf = 0;
+ nn->stride_rx = stride;
+ nn->stride_tx = stride;
+
+ return nn;
+}
+
+static int
+nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
+ unsigned int id)
+{
+ int err;
+
+ /* Get MAC address */
+ nfp_net_get_mac_addr(nn, pf, id);
+
+ /* Get ME clock frequency from ctrl BAR
+ * XXX for now frequency is hardcoded until we figure out how
+ * to get the value from nfp-hwinfo into ctrl bar
+ */
+ nn->me_freq_mhz = 1200;
+
+ err = nfp_net_netdev_init(nn->netdev);
+ if (err)
+ return err;
+
+ nfp_net_debugfs_port_add(nn, pf->ddir, id);
+
+ nfp_net_info(nn);
+
+ return 0;
+}
+
+static int
+nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
+ void __iomem *tx_bar, void __iomem *rx_bar,
+ int stride, struct nfp_net_fw_version *fw_ver)
+{
+ u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
+ struct nfp_net *nn;
+ unsigned int i;
+ int err;
+
+ prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+ prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
+
+ for (i = 0; i < pf->num_ports; i++) {
+ tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+ tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
+ tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
+ rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
+ prev_tx_base = tgt_tx_base;
+ prev_rx_base = tgt_rx_base;
+
+ nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
+ stride, fw_ver);
+ if (IS_ERR(nn)) {
+ err = PTR_ERR(nn);
+ goto err_free_prev;
+ }
+ list_add_tail(&nn->port_list, &pf->ports);
+
+ ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
+ }
+
+ return 0;
+
+err_free_prev:
+ nfp_net_pf_free_netdevs(pf);
+ return err;
+}
+
+static int
+nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
+ void __iomem *ctrl_bar, void __iomem *tx_bar,
+ void __iomem *rx_bar, int stride,
+ struct nfp_net_fw_version *fw_ver)
+{
+ unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
+ struct nfp_net *nn;
+ int err;
+
+ /* Allocate the netdevs and do basic init */
+ err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
+ stride, fw_ver);
+ if (err)
+ return err;
+
+ /* Get MSI-X vectors */
+ wanted_irqs = 0;
+ list_for_each_entry(nn, &pf->ports, port_list)
+ wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
+ pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
+ GFP_KERNEL);
+ if (!pf->irq_entries) {
+ err = -ENOMEM;
+ goto err_nn_free;
+ }
+
+ num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
+ NFP_NET_MIN_PORT_IRQS * pf->num_ports,
+ wanted_irqs);
+ if (!num_irqs) {
+ nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
+ err = -ENOMEM;
+ goto err_vec_free;
+ }
+
+ /* Distribute IRQs to ports */
+ irqs_left = num_irqs;
+ ports_left = pf->num_ports;
+ list_for_each_entry(nn, &pf->ports, port_list) {
+ unsigned int n;
+
+ n = DIV_ROUND_UP(irqs_left, ports_left);
+ nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
+ n);
+ irqs_left -= n;
+ ports_left--;
+ }
+
+ /* Finish netdev init and register */
+ id = 0;
+ list_for_each_entry(nn, &pf->ports, port_list) {
+ err = nfp_net_pf_init_port_netdev(pf, nn, id);
+ if (err)
+ goto err_prev_deinit;
+
+ id++;
+ }
+
+ return 0;
+
+err_prev_deinit:
+ list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_netdev_clean(nn->netdev);
+ }
+ nfp_net_irqs_disable(pf->pdev);
+err_vec_free:
+ kfree(pf->irq_entries);
+err_nn_free:
+ nfp_net_pf_free_netdevs(pf);
+ return err;
+}
+
+/*
+ * PCI device functions
+ */
+int nfp_net_pci_probe(struct nfp_pf *pf)
+{
+ u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
+ u32 total_tx_qcs, total_rx_qcs;
+ struct nfp_net_fw_version fw_ver;
+ u32 tx_area_sz, rx_area_sz;
+ u32 start_q;
+ int stride;
+ int err;
+
+ /* Verify that the board has completed initialization */
+ if (!nfp_is_ready(pf->cpp)) {
+ nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
+ return -EINVAL;
+ }
+
+ pf->num_ports = nfp_net_pf_get_num_ports(pf);
+
+ ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
+ if (!ctrl_bar)
+ return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+
+ nfp_net_get_fw_version(&fw_ver, ctrl_bar);
+ if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
+ fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+ err = -EINVAL;
+ goto err_ctrl_unmap;
+ }
+
+ /* Determine stride */
+ if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
+ stride = 2;
+ nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
+ } else {
+ switch (fw_ver.major) {
+ case 1 ... 4:
+ stride = 4;
+ break;
+ default:
+ nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
+ fw_ver.resv, fw_ver.class,
+ fw_ver.major, fw_ver.minor);
+ err = -EINVAL;
+ goto err_ctrl_unmap;
+ }
+ }
+
+ /* Find how many QC structs need to be mapped */
+ total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
+ NFP_NET_CFG_START_TXQ,
+ NFP_NET_CFG_MAX_TXRINGS);
+ total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
+ NFP_NET_CFG_START_RXQ,
+ NFP_NET_CFG_MAX_RXRINGS);
+ if (!total_tx_qcs || !total_rx_qcs) {
+ nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
+ total_tx_qcs, total_rx_qcs);
+ err = -EINVAL;
+ goto err_ctrl_unmap;
+ }
+
+ tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
+ rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
+
+ /* Map TX queues */
+ start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+ tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
+ NFP_PCIE_QUEUE(start_q),
+ tx_area_sz, &pf->tx_area);
+ if (IS_ERR(tx_bar)) {
+ nfp_err(pf->cpp, "Failed to map TX area.\n");
+ err = PTR_ERR(tx_bar);
+ goto err_ctrl_unmap;
+ }
+
+ /* Map RX queues */
+ start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
+ rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
+ NFP_PCIE_QUEUE(start_q),
+ rx_area_sz, &pf->rx_area);
+ if (IS_ERR(rx_bar)) {
+ nfp_err(pf->cpp, "Failed to map RX area.\n");
+ err = PTR_ERR(rx_bar);
+ goto err_unmap_tx;
+ }
+
+ pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
+
+ err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
+ stride, &fw_ver);
+ if (err)
+ goto err_clean_ddir;
+
+ return 0;
+
+err_clean_ddir:
+ nfp_net_debugfs_dir_clean(&pf->ddir);
+ nfp_cpp_area_release_free(pf->rx_area);
+err_unmap_tx:
+ nfp_cpp_area_release_free(pf->tx_area);
+err_ctrl_unmap:
+ nfp_cpp_area_release_free(pf->ctrl_area);
+ return err;
+}
+
+void nfp_net_pci_remove(struct nfp_pf *pf)
+{
+ struct nfp_net *nn;
+
+ list_for_each_entry(nn, &pf->ports, port_list) {
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+
+ nfp_net_netdev_clean(nn->netdev);
+ }
+
+ nfp_net_pf_free_netdevs(pf);
+
+ nfp_net_debugfs_dir_clean(&pf->ddir);
+
+ nfp_net_irqs_disable(pf->pdev);
+ kfree(pf->irq_entries);
+
+ nfp_cpp_area_release_free(pf->rx_area);
+ nfp_cpp_area_release_free(pf->tx_area);
+ nfp_cpp_area_release_free(pf->ctrl_area);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index d065235034d4..39407f7cc586 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -45,9 +45,27 @@
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_main.h"
+
+/**
+ * struct nfp_net_vf - NFP VF-specific device structure
+ * @nn: NFP Net structure for this device
+ * @irq_entries: Pre-allocated array of MSI-X entries
+ * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly)
+ * @ddir: Per-device debugfs directory
+ */
+struct nfp_net_vf {
+ struct nfp_net *nn;
+
+ struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
+ NFP_NET_MAX_TX_RINGS];
+ u8 __iomem *q_bar;
+
+ struct dentry *ddir;
+};
+
+static const char nfp_net_driver_name[] = "nfp_netvf";
-const char nfp_net_driver_name[] = "nfp_netvf";
-const char nfp_net_driver_version[] = "0.1";
#define PCI_DEVICE_NFP6000VF 0x6003
static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
@@ -82,15 +100,22 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
u32 tx_bar_off, rx_bar_off;
u32 tx_bar_sz, rx_bar_sz;
int tx_bar_no, rx_bar_no;
+ struct nfp_net_vf *vf;
+ unsigned int num_irqs;
u8 __iomem *ctrl_bar;
struct nfp_net *nn;
u32 startq;
int stride;
int err;
+ vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+ if (!vf)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, vf);
+
err = pci_enable_device_mem(pdev);
if (err)
- return err;
+ goto err_free_vf;
err = pci_request_regions(pdev, nfp_net_driver_name);
if (err) {
@@ -182,6 +207,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
err = PTR_ERR(nn);
goto err_ctrl_unmap;
}
+ vf->nn = nn;
nn->fw_ver = fw_ver;
nn->ctrl_bar = ctrl_bar;
@@ -205,17 +231,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
- nn->q_bar = ioremap_nocache(map_addr, bar_sz);
- if (!nn->q_bar) {
+ vf->q_bar = ioremap_nocache(map_addr, bar_sz);
+ if (!vf->q_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
goto err_netdev_free;
}
/* TX queues */
- nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off);
+ nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off);
/* RX queues */
- nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off);
+ nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off);
} else {
resource_size_t map_addr;
@@ -240,12 +266,15 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_netvf_get_mac_addr(nn);
- err = nfp_net_irqs_alloc(nn);
- if (!err) {
+ num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
+ NFP_NET_MIN_PORT_IRQS,
+ NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
+ if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -EIO;
goto err_unmap_rx;
}
+ nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
/* Get ME clock frequency from ctrl BAR
* XXX for now frequency is hardcoded until we figure out how
@@ -257,25 +286,23 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
if (err)
goto err_irqs_disable;
- pci_set_drvdata(pdev, nn);
-
nfp_net_info(nn);
- nfp_net_debugfs_adapter_add(nn);
+ vf->ddir = nfp_net_debugfs_device_add(pdev);
+ nfp_net_debugfs_port_add(nn, vf->ddir, 0);
return 0;
err_irqs_disable:
- nfp_net_irqs_disable(nn);
+ nfp_net_irqs_disable(pdev);
err_unmap_rx:
- if (!nn->q_bar)
+ if (!vf->q_bar)
iounmap(nn->rx_bar);
err_unmap_tx:
- if (!nn->q_bar)
+ if (!vf->q_bar)
iounmap(nn->tx_bar);
else
- iounmap(nn->q_bar);
+ iounmap(vf->q_bar);
err_netdev_free:
- pci_set_drvdata(pdev, NULL);
nfp_net_netdev_free(nn);
err_ctrl_unmap:
iounmap(ctrl_bar);
@@ -283,71 +310,47 @@ err_pci_regions:
pci_release_regions(pdev);
err_pci_disable:
pci_disable_device(pdev);
+err_free_vf:
+ pci_set_drvdata(pdev, NULL);
+ kfree(vf);
return err;
}
static void nfp_netvf_pci_remove(struct pci_dev *pdev)
{
- struct nfp_net *nn = pci_get_drvdata(pdev);
+ struct nfp_net_vf *vf = pci_get_drvdata(pdev);
+ struct nfp_net *nn = vf->nn;
/* Note, the order is slightly different from above as we need
* to keep the nn pointer around till we have freed everything.
*/
- nfp_net_debugfs_adapter_del(nn);
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_debugfs_dir_clean(&vf->ddir);
nfp_net_netdev_clean(nn->netdev);
- nfp_net_irqs_disable(nn);
+ nfp_net_irqs_disable(pdev);
- if (!nn->q_bar) {
+ if (!vf->q_bar) {
iounmap(nn->rx_bar);
iounmap(nn->tx_bar);
} else {
- iounmap(nn->q_bar);
+ iounmap(vf->q_bar);
}
iounmap(nn->ctrl_bar);
- pci_set_drvdata(pdev, NULL);
-
nfp_net_netdev_free(nn);
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(vf);
}
-static struct pci_driver nfp_netvf_pci_driver = {
+struct pci_driver nfp_netvf_pci_driver = {
.name = nfp_net_driver_name,
.id_table = nfp_netvf_pci_device_ids,
.probe = nfp_netvf_pci_probe,
.remove = nfp_netvf_pci_remove,
};
-
-static int __init nfp_netvf_init(void)
-{
- int err;
-
- pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n",
- nfp_net_driver_name);
-
- nfp_net_debugfs_create();
- err = pci_register_driver(&nfp_netvf_pci_driver);
- if (err) {
- nfp_net_debugfs_destroy();
- return err;
- }
-
- return 0;
-}
-
-static void __exit nfp_netvf_exit(void)
-{
- pci_unregister_driver(&nfp_netvf_pci_driver);
- nfp_net_debugfs_destroy();
-}
-
-module_init(nfp_netvf_init);
-module_exit(nfp_netvf_exit);
-
-MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("NFP VF network device driver");
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
new file mode 100644
index 000000000000..6cee6382deb4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_CRC32_H
+#define NFP_CRC32_H
+
+#include <linux/kernel.h>
+#include <linux/crc32.h>
+
+/**
+ * crc32_posix_end() - Finalize POSIX CRC32 working state
+ * @crc: Current CRC32 working state
+ * @total_len: Total length of data that was CRC32'd
+ *
+ * Return: Final POSIX CRC32 value
+ */
+static inline u32 crc32_posix_end(u32 crc, size_t total_len)
+{
+ /* Extend with the length of the string. */
+ while (total_len != 0) {
+ u8 c = total_len & 0xff;
+
+ crc = crc32_be(crc, &c, 1);
+ total_len >>= 8;
+ }
+
+ return ~crc;
+}
+
+static inline u32 crc32_posix(const void *buff, size_t len)
+{
+ return crc32_posix_end(crc32_be(0, buff, len), len);
+}
+
+#endif /* NFP_CRC32_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
new file mode 100644
index 000000000000..42cb720b696d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp.h
+ * Interface for NFP device access and query functions.
+ */
+
+#ifndef __NFP_H__
+#define __NFP_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include "nfp_cpp.h"
+
+/* Implemented in nfp_hwinfo.c */
+
+const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
+
+/* Implemented in nfp_nsp.c */
+
+struct nfp_nsp;
+struct firmware;
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_write_eth_table(struct nfp_nsp *state,
+ const void *buf, unsigned int size);
+
+/* Implemented in nfp_resource.c */
+
+#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME "nfp.res"
+#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
+
+/* All other keys are CRC32-POSIX of the 8-byte identification string */
+
+/* ARM/PCI vNIC Interfaces 0..3 */
+#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0"
+#define NFP_RESOURCE_VNIC_PCI_1 "vnic.p1"
+#define NFP_RESOURCE_VNIC_PCI_2 "vnic.p2"
+#define NFP_RESOURCE_VNIC_PCI_3 "vnic.p3"
+
+/* NFP Hardware Info Database */
+#define NFP_RESOURCE_NFP_HWINFO "nfp.info"
+
+/* Service Processor */
+#define NFP_RESOURCE_NSP "nfp.sp"
+#define NFP_RESOURCE_NSP_DIAG "arm.diag"
+
+/* Netronone Flow Firmware Table */
+#define NFP_RESOURCE_NFP_NFFW "nfp.nffw"
+
+/* MAC Statistics Accumulator */
+#define NFP_RESOURCE_MAC_STATISTICS "mac.stat"
+
+struct nfp_resource *
+nfp_resource_acquire(struct nfp_cpp *cpp, const char *name);
+
+void nfp_resource_release(struct nfp_resource *res);
+
+u32 nfp_resource_cpp_id(struct nfp_resource *res);
+
+const char *nfp_resource_name(struct nfp_resource *res);
+
+u64 nfp_resource_address(struct nfp_resource *res);
+
+u64 nfp_resource_size(struct nfp_resource *res);
+
+#endif /* !__NFP_H__ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
new file mode 100644
index 000000000000..0e497a6154db
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP6000_NFP6000_H
+#define NFP6000_NFP6000_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+/* CPP Target IDs */
+#define NFP_CPP_TARGET_INVALID 0
+#define NFP_CPP_TARGET_NBI 1
+#define NFP_CPP_TARGET_QDR 2
+#define NFP_CPP_TARGET_ILA 6
+#define NFP_CPP_TARGET_MU 7
+#define NFP_CPP_TARGET_PCIE 9
+#define NFP_CPP_TARGET_ARM 10
+#define NFP_CPP_TARGET_CRYPTO 12
+#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */
+#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */
+#define NFP_CPP_TARGET_CT_XPB 14
+#define NFP_CPP_TARGET_LOCAL_SCRATCH 15
+#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH
+
+#define NFP_ISL_EMEM0 24
+
+#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL
+#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL
+
+#define PUSHPULL(_pull, _push) ((_pull) << 4 | (_push) << 0)
+#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0)
+#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4)
+
+static inline int pushpull_width(int pp)
+{
+ pp &= 0xf;
+
+ if (pp == 0)
+ return -EINVAL;
+ return 2 << pp;
+}
+
+static inline int nfp_cppat_mu_locality_lsb(int mode, bool addr40)
+{
+ switch (mode) {
+ case 0 ... 3:
+ return addr40 ? 38 : 30;
+ default:
+ return -EINVAL;
+ }
+}
+
+int nfp_target_pushpull(u32 cpp_id, u64 address);
+int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
+ u32 *cpp_target_id, u64 *cpp_target_address,
+ const u32 *imb_table);
+
+#endif /* NFP6000_NFP6000_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
new file mode 100644
index 000000000000..40fb19939505
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_xpb.h
+ * Author: Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#ifndef NFP6000_XPB_H
+#define NFP6000_XPB_H
+
+/* For use with NFP6000 Databook "XPB Addressing" section
+ */
+#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24)
+
+#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000)
+
+#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F)
+
+/* For use with NFP6000 Databook "XPB Island and Device IDs" chapter
+ */
+#define NFP_XPB_DEVICE(island, slave, device) \
+ (NFP_XPB_OVERLAY(island) | \
+ (((slave) & 3) << 22) | \
+ (((device) & 0x3f) << 16))
+
+#endif /* NFP6000_XPB_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
new file mode 100644
index 000000000000..15cc3e77cf6a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -0,0 +1,1364 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp6000_pcie.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ *
+ * Multiplexes the NFP BARs between NFP internal resources and
+ * implements the PCIe specific interface for generic CPP bus access.
+ *
+ * The BARs are managed with refcounts and are allocated/acquired
+ * using target, token and offset/size matching. The generic CPP bus
+ * abstraction builds upon this BAR interface.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sort.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "nfp_cpp.h"
+
+#include "nfp6000/nfp6000.h"
+
+#include "nfp6000_pcie.h"
+
+#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0)
+#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
+ (0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
+#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x) (((_x) & 0x3) << 30)
+#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x) (((_x) >> 30) & 0x3)
+#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x) (((_x) & 0x3) << 28)
+#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x) (((_x) >> 28) & 0x3)
+#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x) (((_x) & 0xffffff) << 0)
+#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x) (((_x) >> 0) & 0xffffff)
+#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
+ (0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
+#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x) (((_x) & 0x7f) << 24)
+#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x) (((_x) >> 24) & 0x7f)
+#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x) (((_x) & 0x3ff) << 14)
+#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x) (((_x) >> 14) & 0x3ff)
+#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x) (((_x) & 0x3fff) << 0)
+#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x) (((_x) >> 0) & 0x3fff)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
+ (0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x) (((_x) & 0xf) << 28)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x) (((_x) >> 28) & 0xf)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x) (((_x) & 0x1f) << 23)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x) (((_x) >> 23) & 0x1f)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x) (((_x) & 0x1f) << 18)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x) (((_x) >> 18) & 0x1f)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x) (((_x) & 0xff) << 10)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x) (((_x) >> 10) & 0xff)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x) (((_x) & 0x3ff) << 0)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
+
+#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x) (((_x) & 0x1f) << 16)
+#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
+#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x) (((_x) & 0xffff) << 0)
+#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x) (((_x) >> 0) & 0xffff)
+#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x) (((_x) & 0x3) << 27)
+#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x) (((_x) >> 27) & 0x3)
+#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT 0
+#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT 1
+#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE 3
+#define NFP_PCIE_BAR_PCIE2CPP_MapType(_x) (((_x) & 0x7) << 29)
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x) (((_x) >> 29) & 0x7)
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED 0
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_BULK 1
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET 2
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL 3
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0 4
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1 5
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2 6
+#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3 7
+#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x) (((_x) & 0xf) << 23)
+#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
+#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x) (((_x) & 0x3) << 21)
+#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
+#define NFP_PCIE_EM 0x020000
+#define NFP_PCIE_SRAM 0x000000
+
+#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
+#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
+#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
+
+#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
+ (0x400 + ((bar) * 8 + (slot)) * 4)
+
+#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
+ (((bar) * 8 + (slot)) * 4)
+
+/* The number of explicit BARs to reserve.
+ * Minimum is 0, maximum is 4 on the NFP6000.
+ */
+#define NFP_PCIE_EXPLICIT_BARS 2
+
+struct nfp6000_pcie;
+struct nfp6000_area_priv;
+
+/**
+ * struct nfp_bar - describes BAR configuration and usage
+ * @nfp: backlink to owner
+ * @barcfg: cached contents of BAR config CSR
+ * @base: the BAR's base CPP offset
+ * @mask: mask for the BAR aperture (read only)
+ * @bitsize: bitsize of BAR aperture (read only)
+ * @index: index of the BAR
+ * @refcnt: number of current users
+ * @iomem: mapped IO memory
+ * @resource: iomem resource window
+ */
+struct nfp_bar {
+ struct nfp6000_pcie *nfp;
+ u32 barcfg;
+ u64 base; /* CPP address base */
+ u64 mask; /* Bit mask of the bar */
+ u32 bitsize; /* Bit size of the bar */
+ int index;
+ atomic_t refcnt;
+
+ void __iomem *iomem;
+ struct resource *resource;
+};
+
+#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8)
+
+struct nfp6000_pcie {
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* PCI BAR management */
+ spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */
+ int bars;
+ struct nfp_bar bar[NFP_PCI_BAR_MAX];
+ wait_queue_head_t bar_waiters;
+
+ /* Reserved BAR access */
+ struct {
+ void __iomem *csr;
+ void __iomem *em;
+ void __iomem *expl[4];
+ } iomem;
+
+ /* Explicit IO access */
+ struct {
+ struct mutex mutex; /* Lock access to this explicit group */
+ u8 master_id;
+ u8 signal_ref;
+ void __iomem *data;
+ struct {
+ void __iomem *addr;
+ int bitsize;
+ int free[4];
+ } group[4];
+ } expl;
+};
+
+static u32 nfp_bar_maptype(struct nfp_bar *bar)
+{
+ return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
+}
+
+static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
+{
+ return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
+}
+
+static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
+{
+ return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
+ + nfp_bar_resource_len(bar) * (bar->index & 7);
+}
+
+#define TARGET_WIDTH_32 4
+#define TARGET_WIDTH_64 8
+
+static int
+compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
+ u32 *bar_config, u64 *bar_base,
+ int tgt, int act, int tok, u64 offset, size_t size, int width)
+{
+ int bitsize;
+ u32 newcfg;
+
+ if (tgt >= NFP_CPP_NUM_TARGETS)
+ return -EINVAL;
+
+ switch (width) {
+ case 8:
+ newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
+ break;
+ case 4:
+ newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
+ break;
+ case 0:
+ newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (act != NFP_CPP_ACTION_RW && act != 0) {
+ /* Fixed CPP mapping with specific action */
+ u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
+
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask))
+ return -EINVAL;
+ offset &= mask;
+
+ bitsize = 40 - 16;
+ } else {
+ u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
+
+ /* Bulk mapping */
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask))
+ return -EINVAL;
+
+ offset &= mask;
+
+ bitsize = 40 - 21;
+ }
+
+ if (bar->bitsize < bitsize)
+ return -EINVAL;
+
+ newcfg |= offset >> bitsize;
+
+ if (bar_base)
+ *bar_base = offset;
+
+ if (bar_config)
+ *bar_config = newcfg;
+
+ return 0;
+}
+
+static int
+nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
+{
+ int base, slot;
+ int xbar;
+
+ base = bar->index >> 3;
+ slot = bar->index & 7;
+
+ if (nfp->iomem.csr) {
+ xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+ writel(newcfg, nfp->iomem.csr + xbar);
+ /* Readback to ensure BAR is flushed */
+ readl(nfp->iomem.csr + xbar);
+ } else {
+ xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+ pci_write_config_dword(nfp->pdev, xbar, newcfg);
+ }
+
+ bar->barcfg = newcfg;
+
+ return 0;
+}
+
+static int
+reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
+ int tgt, int act, int tok, u64 offset, size_t size, int width)
+{
+ u64 newbase;
+ u32 newcfg;
+ int err;
+
+ err = compute_bar(nfp, bar, &newcfg, &newbase,
+ tgt, act, tok, offset, size, width);
+ if (err)
+ return err;
+
+ bar->base = newbase;
+
+ return nfp6000_bar_write(nfp, bar, newcfg);
+}
+
+/* Check if BAR can be used with the given parameters. */
+static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
+ u64 offset, size_t size, int width)
+{
+ int bartgt, baract, bartok;
+ int barwidth;
+ u32 maptype;
+
+ maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
+ bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
+ bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
+ baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
+
+ barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
+ switch (barwidth) {
+ case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
+ barwidth = 4;
+ break;
+ case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
+ barwidth = 8;
+ break;
+ case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
+ barwidth = 0;
+ break;
+ default:
+ barwidth = -1;
+ break;
+ }
+
+ switch (maptype) {
+ case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
+ bartok = -1;
+ /* FALLTHROUGH */
+ case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
+ baract = NFP_CPP_ACTION_RW;
+ if (act == 0)
+ act = NFP_CPP_ACTION_RW;
+ /* FALLTHROUGH */
+ case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
+ break;
+ default:
+ /* We don't match explicit bars through the area interface */
+ return 0;
+ }
+
+ /* Make sure to match up the width */
+ if (barwidth != width)
+ return 0;
+
+ if ((bartgt < 0 || bartgt == tgt) &&
+ (bartok < 0 || bartok == tok) &&
+ (baract == act) &&
+ bar->base <= offset &&
+ (bar->base + (1 << bar->bitsize)) >= (offset + size))
+ return 1;
+
+ /* No match */
+ return 0;
+}
+
+static int
+find_matching_bar(struct nfp6000_pcie *nfp,
+ u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
+{
+ int n;
+
+ for (n = 0; n < nfp->bars; n++) {
+ struct nfp_bar *bar = &nfp->bar[n];
+
+ if (matching_bar(bar, tgt, act, tok, offset, size, width))
+ return n;
+ }
+
+ return -1;
+}
+
+/* Return EAGAIN if no resource is available */
+static int
+find_unused_bar_noblock(struct nfp6000_pcie *nfp,
+ int tgt, int act, int tok,
+ u64 offset, size_t size, int width)
+{
+ int n, invalid = 0;
+
+ for (n = 0; n < nfp->bars; n++) {
+ struct nfp_bar *bar = &nfp->bar[n];
+ int err;
+
+ if (bar->bitsize == 0) {
+ invalid++;
+ continue;
+ }
+
+ if (atomic_read(&bar->refcnt) != 0)
+ continue;
+
+ /* Just check to see if we can make it fit... */
+ err = compute_bar(nfp, bar, NULL, NULL,
+ tgt, act, tok, offset, size, width);
+
+ if (err < 0)
+ invalid++;
+ else
+ return n;
+ }
+
+ return (n == invalid) ? -EINVAL : -EAGAIN;
+}
+
+static int
+find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
+ int tgt, int act, int tok,
+ u64 offset, size_t size, int width)
+{
+ unsigned long flags;
+ int n;
+
+ spin_lock_irqsave(&nfp->bar_lock, flags);
+
+ n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
+ if (n < 0)
+ spin_unlock_irqrestore(&nfp->bar_lock, flags);
+ else
+ __release(&nfp->bar_lock);
+
+ return n;
+}
+
+static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
+{
+ atomic_inc(&bar->refcnt);
+}
+
+static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
+{
+ if (atomic_dec_and_test(&bar->refcnt))
+ wake_up_interruptible(&nfp->bar_waiters);
+}
+
+static int
+nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
+ u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
+{
+ return wait_event_interruptible(nfp->bar_waiters,
+ (*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
+ offset, size, width))
+ != -EAGAIN);
+}
+
+static int
+nfp_alloc_bar(struct nfp6000_pcie *nfp,
+ u32 tgt, u32 act, u32 tok,
+ u64 offset, size_t size, int width, int nonblocking)
+{
+ unsigned long irqflags;
+ int barnum, retval;
+
+ if (size > (1 << 24))
+ return -EINVAL;
+
+ spin_lock_irqsave(&nfp->bar_lock, irqflags);
+ barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
+ if (barnum >= 0) {
+ /* Found a perfect match. */
+ nfp_bar_get(nfp, &nfp->bar[barnum]);
+ spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
+ return barnum;
+ }
+
+ barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
+ offset, size, width);
+ if (barnum < 0) {
+ if (nonblocking)
+ goto err_nobar;
+
+ /* Wait until a BAR becomes available. The
+ * find_unused_bar function will reclaim the bar_lock
+ * if a free BAR is found.
+ */
+ spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
+ retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
+ offset, size, width);
+ if (retval)
+ return retval;
+ __acquire(&nfp->bar_lock);
+ }
+
+ nfp_bar_get(nfp, &nfp->bar[barnum]);
+ retval = reconfigure_bar(nfp, &nfp->bar[barnum],
+ tgt, act, tok, offset, size, width);
+ if (retval < 0) {
+ nfp_bar_put(nfp, &nfp->bar[barnum]);
+ barnum = retval;
+ }
+
+err_nobar:
+ spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
+ return barnum;
+}
+
+static void disable_bars(struct nfp6000_pcie *nfp);
+
+static int bar_cmp(const void *aptr, const void *bptr)
+{
+ const struct nfp_bar *a = aptr, *b = bptr;
+
+ if (a->bitsize == b->bitsize)
+ return a->index - b->index;
+ else
+ return a->bitsize - b->bitsize;
+}
+
+/* Map all PCI bars and fetch the actual BAR configurations from the
+ * board. We assume that the BAR with the PCIe config block is
+ * already mapped.
+ *
+ * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
+ * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA)
+ * BAR0.2: --
+ * BAR0.3: --
+ * BAR0.4: Reserved for Explicit 0.0-0.3 access
+ * BAR0.5: Reserved for Explicit 1.0-1.3 access
+ * BAR0.6: Reserved for Explicit 2.0-2.3 access
+ * BAR0.7: Reserved for Explicit 3.0-3.3 access
+ *
+ * BAR1.0-BAR1.7: --
+ * BAR2.0-BAR2.7: --
+ */
+static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
+{
+ const u32 barcfg_msix_general =
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+ const u32 barcfg_msix_xpb =
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+ NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
+ NFP_CPP_TARGET_ISLAND_XPB);
+ const u32 barcfg_explicit[4] = {
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
+ };
+ struct nfp_bar *bar;
+ int i, bars_free;
+ int expl_groups;
+
+ bar = &nfp->bar[0];
+ for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
+ struct resource *res;
+
+ res = &nfp->pdev->resource[(i >> 3) * 2];
+
+ /* Skip over BARs that are not IORESOURCE_MEM */
+ if (!(resource_type(res) & IORESOURCE_MEM)) {
+ bar--;
+ continue;
+ }
+
+ bar->resource = res;
+ bar->barcfg = 0;
+
+ bar->nfp = nfp;
+ bar->index = i;
+ bar->mask = nfp_bar_resource_len(bar) - 1;
+ bar->bitsize = fls(bar->mask);
+ bar->base = 0;
+ bar->iomem = NULL;
+ }
+
+ nfp->bars = bar - &nfp->bar[0];
+ if (nfp->bars < 8) {
+ dev_err(nfp->dev, "No usable BARs found!\n");
+ return -EINVAL;
+ }
+
+ bars_free = nfp->bars;
+
+ /* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70)
+ */
+ mutex_init(&nfp->expl.mutex);
+
+ nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
+ << 4;
+ nfp->expl.signal_ref = 0x10;
+
+ /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
+ bar = &nfp->bar[0];
+ bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ nfp_bar_resource_len(bar));
+ if (bar->iomem) {
+ dev_info(nfp->dev,
+ "BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n");
+ atomic_inc(&bar->refcnt);
+ bars_free--;
+
+ nfp6000_bar_write(nfp, bar, barcfg_msix_general);
+
+ nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+ }
+
+ if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
+ nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
+ nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
+ expl_groups = 4;
+ } else {
+ int pf = nfp->pdev->devfn & 7;
+
+ nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
+ expl_groups = 1;
+ }
+ nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
+
+ /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
+ bar = &nfp->bar[1];
+ dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n");
+ atomic_inc(&bar->refcnt);
+ bars_free--;
+
+ nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
+
+ /* Use BAR0.4..BAR0.7 for EXPL IO */
+ for (i = 0; i < 4; i++) {
+ int j;
+
+ if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
+ nfp->expl.group[i].bitsize = 0;
+ continue;
+ }
+
+ bar = &nfp->bar[4 + i];
+ bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ nfp_bar_resource_len(bar));
+ if (bar->iomem) {
+ dev_info(nfp->dev,
+ "BAR0.%d RESERVED: Explicit%d Mapping\n",
+ 4 + i, i);
+ atomic_inc(&bar->refcnt);
+ bars_free--;
+
+ nfp->expl.group[i].bitsize = bar->bitsize;
+ nfp->expl.group[i].addr = bar->iomem;
+ nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
+
+ for (j = 0; j < 4; j++)
+ nfp->expl.group[i].free[j] = true;
+ }
+ nfp->iomem.expl[i] = bar->iomem;
+ }
+
+ /* Sort bars by bit size - use the smallest possible first. */
+ sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
+ bar_cmp, NULL);
+
+ dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n",
+ nfp->bars, bars_free);
+
+ return 0;
+}
+
+static void disable_bars(struct nfp6000_pcie *nfp)
+{
+ struct nfp_bar *bar = &nfp->bar[0];
+ int n;
+
+ for (n = 0; n < nfp->bars; n++, bar++) {
+ if (bar->iomem) {
+ iounmap(bar->iomem);
+ bar->iomem = NULL;
+ }
+ }
+}
+
+/*
+ * Generic CPP bus access interface.
+ */
+
+struct nfp6000_area_priv {
+ atomic_t refcnt;
+
+ struct nfp_bar *bar;
+ u32 bar_offset;
+
+ u32 target;
+ u32 action;
+ u32 token;
+ u64 offset;
+ struct {
+ int read;
+ int write;
+ int bar;
+ } width;
+ size_t size;
+
+ void __iomem *iomem;
+ phys_addr_t phys;
+ struct resource resource;
+};
+
+static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ u32 target = NFP_CPP_ID_TARGET_of(dest);
+ u32 action = NFP_CPP_ID_ACTION_of(dest);
+ u32 token = NFP_CPP_ID_TOKEN_of(dest);
+ int pp;
+
+ pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
+ if (pp < 0)
+ return pp;
+
+ priv->width.read = PUSH_WIDTH(pp);
+ priv->width.write = PULL_WIDTH(pp);
+ if (priv->width.read > 0 &&
+ priv->width.write > 0 &&
+ priv->width.read != priv->width.write) {
+ return -EINVAL;
+ }
+
+ if (priv->width.read > 0)
+ priv->width.bar = priv->width.read;
+ else
+ priv->width.bar = priv->width.write;
+
+ atomic_set(&priv->refcnt, 0);
+ priv->bar = NULL;
+
+ priv->target = target;
+ priv->action = action;
+ priv->token = token;
+ priv->offset = address;
+ priv->size = size;
+ memset(&priv->resource, 0, sizeof(priv->resource));
+
+ return 0;
+}
+
+static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
+{
+}
+
+static void priv_area_get(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ atomic_inc(&priv->refcnt);
+}
+
+static int priv_area_put(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ if (WARN_ON(!atomic_read(&priv->refcnt)))
+ return 0;
+
+ return atomic_dec_and_test(&priv->refcnt);
+}
+
+static int nfp6000_area_acquire(struct nfp_cpp_area *area)
+{
+ struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ int barnum, err;
+
+ if (priv->bar) {
+ /* Already allocated. */
+ priv_area_get(area);
+ return 0;
+ }
+
+ barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
+ priv->offset, priv->size, priv->width.bar, 1);
+
+ if (barnum < 0) {
+ err = barnum;
+ goto err_alloc_bar;
+ }
+ priv->bar = &nfp->bar[barnum];
+
+ /* Calculate offset into BAR. */
+ if (nfp_bar_maptype(priv->bar) ==
+ NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
+ priv->bar_offset = priv->offset &
+ (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
+ priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
+ priv->bar, priv->target);
+ priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
+ priv->bar, priv->token);
+ } else {
+ priv->bar_offset = priv->offset & priv->bar->mask;
+ }
+
+ /* We don't actually try to acquire the resource area using
+ * request_resource. This would prevent sharing the mapped
+ * BAR between multiple CPP areas and prevent us from
+ * effectively utilizing the limited amount of BAR resources.
+ */
+ priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
+ priv->resource.name = nfp_cpp_area_name(area);
+ priv->resource.start = priv->phys;
+ priv->resource.end = priv->resource.start + priv->size - 1;
+ priv->resource.flags = IORESOURCE_MEM;
+
+ /* If the bar is already mapped in, use its mapping */
+ if (priv->bar->iomem)
+ priv->iomem = priv->bar->iomem + priv->bar_offset;
+ else
+ /* Must have been too big. Sub-allocate. */
+ priv->iomem = ioremap_nocache(priv->phys, priv->size);
+
+ if (IS_ERR_OR_NULL(priv->iomem)) {
+ dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
+ (int)priv->size, priv->bar->index);
+ err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
+ priv->iomem = NULL;
+ goto err_iomem_remap;
+ }
+
+ priv_area_get(area);
+ return 0;
+
+err_iomem_remap:
+ nfp_bar_put(nfp, priv->bar);
+ priv->bar = NULL;
+err_alloc_bar:
+ return err;
+}
+
+static void nfp6000_area_release(struct nfp_cpp_area *area)
+{
+ struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ if (!priv_area_put(area))
+ return;
+
+ if (!priv->bar->iomem)
+ iounmap(priv->iomem);
+
+ nfp_bar_put(nfp, priv->bar);
+
+ priv->bar = NULL;
+ priv->iomem = NULL;
+}
+
+static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ return priv->phys;
+}
+
+static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ return priv->iomem;
+}
+
+static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
+{
+ /* Use the BAR resource as the resource for the CPP area.
+ * This enables us to share the BAR among multiple CPP areas
+ * without resource conflicts.
+ */
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ return priv->bar->resource;
+}
+
+static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ u64 __maybe_unused *wrptr64 = kernel_vaddr;
+ const u64 __iomem __maybe_unused *rdptr64;
+ struct nfp6000_area_priv *priv;
+ u32 *wrptr32 = kernel_vaddr;
+ const u32 __iomem *rdptr32;
+ int n, width;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ rdptr64 = priv->iomem + offset;
+ rdptr32 = priv->iomem + offset;
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.read;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1))
+ return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
+ NFP_CPP_ID(priv->target,
+ priv->action,
+ priv->token),
+ priv->offset + offset,
+ kernel_vaddr, length, width);
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW)
+ is_64 = false;
+
+ if (is_64) {
+ if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
+ return -EINVAL;
+ }
+
+ if (WARN_ON(!priv->bar))
+ return -EFAULT;
+
+ if (is_64)
+#ifndef __raw_readq
+ return -EINVAL;
+#else
+ for (n = 0; n < length; n += sizeof(u64))
+ *wrptr64++ = __raw_readq(rdptr64++);
+#endif
+ else
+ for (n = 0; n < length; n += sizeof(u32))
+ *wrptr32++ = __raw_readl(rdptr32++);
+
+ return n;
+}
+
+static int
+nfp6000_area_write(struct nfp_cpp_area *area,
+ const void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ const u64 __maybe_unused *rdptr64 = kernel_vaddr;
+ u64 __iomem __maybe_unused *wrptr64;
+ const u32 *rdptr32 = kernel_vaddr;
+ struct nfp6000_area_priv *priv;
+ u32 __iomem *wrptr32;
+ int n, width;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ wrptr64 = priv->iomem + offset;
+ wrptr32 = priv->iomem + offset;
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.write;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1))
+ return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
+ NFP_CPP_ID(priv->target,
+ priv->action,
+ priv->token),
+ priv->offset + offset,
+ kernel_vaddr, length, width);
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW)
+ is_64 = false;
+
+ if (is_64) {
+ if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
+ return -EINVAL;
+ }
+
+ if (WARN_ON(!priv->bar))
+ return -EFAULT;
+
+ if (is_64)
+#ifndef __raw_writeq
+ return -EINVAL;
+#else
+ for (n = 0; n < length; n += sizeof(u64)) {
+ __raw_writeq(*rdptr64++, wrptr64++);
+ wmb();
+ }
+#endif
+ else
+ for (n = 0; n < length; n += sizeof(u32)) {
+ __raw_writel(*rdptr32++, wrptr32++);
+ wmb();
+ }
+
+ return n;
+}
+
+struct nfp6000_explicit_priv {
+ struct nfp6000_pcie *nfp;
+ struct {
+ int group;
+ int area;
+ } bar;
+ int bitsize;
+ void __iomem *data;
+ void __iomem *addr;
+};
+
+static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
+{
+ struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
+ struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+ int i, j;
+
+ mutex_lock(&nfp->expl.mutex);
+ for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
+ if (!nfp->expl.group[i].bitsize)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
+ u16 data_offset;
+
+ if (!nfp->expl.group[i].free[j])
+ continue;
+
+ priv->nfp = nfp;
+ priv->bar.group = i;
+ priv->bar.area = j;
+ priv->bitsize = nfp->expl.group[i].bitsize - 2;
+
+ data_offset = (priv->bar.group << 9) +
+ (priv->bar.area << 7);
+ priv->data = nfp->expl.data + data_offset;
+ priv->addr = nfp->expl.group[i].addr +
+ (priv->bar.area << priv->bitsize);
+ nfp->expl.group[i].free[j] = false;
+
+ mutex_unlock(&nfp->expl.mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&nfp->expl.mutex);
+
+ return -EAGAIN;
+}
+
+static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
+{
+ struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+ struct nfp6000_pcie *nfp = priv->nfp;
+
+ mutex_lock(&nfp->expl.mutex);
+ nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
+ mutex_unlock(&nfp->expl.mutex);
+}
+
+static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
+ const void *buff, size_t len)
+{
+ struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+ const u32 *src = buff;
+ size_t i;
+
+ for (i = 0; i < len; i += sizeof(u32))
+ writel(*(src++), priv->data + i);
+
+ return i;
+}
+
+static int
+nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
+ const struct nfp_cpp_explicit_command *cmd, u64 address)
+{
+ struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+ u8 signal_master, signal_ref, data_master;
+ struct nfp6000_pcie *nfp = priv->nfp;
+ int sigmask = 0;
+ u16 data_ref;
+ u32 csr[3];
+
+ if (cmd->siga_mode)
+ sigmask |= 1 << cmd->siga;
+ if (cmd->sigb_mode)
+ sigmask |= 1 << cmd->sigb;
+
+ signal_master = cmd->signal_master;
+ if (!signal_master)
+ signal_master = nfp->expl.master_id;
+
+ signal_ref = cmd->signal_ref;
+ if (signal_master == nfp->expl.master_id)
+ signal_ref = nfp->expl.signal_ref +
+ ((priv->bar.group * 4 + priv->bar.area) << 1);
+
+ data_master = cmd->data_master;
+ if (!data_master)
+ data_master = nfp->expl.master_id;
+
+ data_ref = cmd->data_ref;
+ if (data_master == nfp->expl.master_id)
+ data_ref = 0x1000 +
+ (priv->bar.group << 9) + (priv->bar.area << 7);
+
+ csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
+ NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
+ NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
+ NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
+
+ csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
+ NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
+ NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
+
+ csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
+ NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
+ NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
+ NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
+ NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
+ NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
+ NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
+
+ if (nfp->iomem.csr) {
+ writel(csr[0], nfp->iomem.csr +
+ NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
+ priv->bar.area));
+ writel(csr[1], nfp->iomem.csr +
+ NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
+ priv->bar.area));
+ writel(csr[2], nfp->iomem.csr +
+ NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
+ priv->bar.area));
+ /* Readback to ensure BAR is flushed */
+ readl(nfp->iomem.csr +
+ NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
+ priv->bar.area));
+ readl(nfp->iomem.csr +
+ NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
+ priv->bar.area));
+ readl(nfp->iomem.csr +
+ NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
+ priv->bar.area));
+ } else {
+ pci_write_config_dword(nfp->pdev, 0x400 +
+ NFP_PCIE_BAR_EXPLICIT_BAR0(
+ priv->bar.group, priv->bar.area),
+ csr[0]);
+
+ pci_write_config_dword(nfp->pdev, 0x400 +
+ NFP_PCIE_BAR_EXPLICIT_BAR1(
+ priv->bar.group, priv->bar.area),
+ csr[1]);
+
+ pci_write_config_dword(nfp->pdev, 0x400 +
+ NFP_PCIE_BAR_EXPLICIT_BAR2(
+ priv->bar.group, priv->bar.area),
+ csr[2]);
+ }
+
+ /* Issue the 'kickoff' transaction */
+ readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
+
+ return sigmask;
+}
+
+static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
+ void *buff, size_t len)
+{
+ struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+ u32 *dst = buff;
+ size_t i;
+
+ for (i = 0; i < len; i += sizeof(u32))
+ *(dst++) = readl(priv->data + i);
+
+ return i;
+}
+
+static int nfp6000_init(struct nfp_cpp *cpp)
+{
+ nfp_cpp_area_cache_add(cpp, SZ_64K);
+ nfp_cpp_area_cache_add(cpp, SZ_64K);
+ nfp_cpp_area_cache_add(cpp, SZ_256K);
+
+ return 0;
+}
+
+static void nfp6000_free(struct nfp_cpp *cpp)
+{
+ struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
+
+ disable_bars(nfp);
+ kfree(nfp);
+}
+
+static void nfp6000_read_serial(struct device *dev, u8 *serial)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int pos;
+ u32 reg;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ if (!pos) {
+ memset(serial, 0, NFP_SERIAL_LEN);
+ return;
+ }
+
+ pci_read_config_dword(pdev, pos + 4, &reg);
+ put_unaligned_be16(reg >> 16, serial + 4);
+ pci_read_config_dword(pdev, pos + 8, &reg);
+ put_unaligned_be32(reg, serial);
+}
+
+static u16 nfp6000_get_interface(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int pos;
+ u32 reg;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ if (!pos)
+ return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
+
+ pci_read_config_dword(pdev, pos + 4, &reg);
+
+ return reg & 0xffff;
+}
+
+static const struct nfp_cpp_operations nfp6000_pcie_ops = {
+ .owner = THIS_MODULE,
+
+ .init = nfp6000_init,
+ .free = nfp6000_free,
+
+ .read_serial = nfp6000_read_serial,
+ .get_interface = nfp6000_get_interface,
+
+ .area_priv_size = sizeof(struct nfp6000_area_priv),
+ .area_init = nfp6000_area_init,
+ .area_cleanup = nfp6000_area_cleanup,
+ .area_acquire = nfp6000_area_acquire,
+ .area_release = nfp6000_area_release,
+ .area_phys = nfp6000_area_phys,
+ .area_iomem = nfp6000_area_iomem,
+ .area_resource = nfp6000_area_resource,
+ .area_read = nfp6000_area_read,
+ .area_write = nfp6000_area_write,
+
+ .explicit_priv_size = sizeof(struct nfp6000_explicit_priv),
+ .explicit_acquire = nfp6000_explicit_acquire,
+ .explicit_release = nfp6000_explicit_release,
+ .explicit_put = nfp6000_explicit_put,
+ .explicit_do = nfp6000_explicit_do,
+ .explicit_get = nfp6000_explicit_get,
+};
+
+/**
+ * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
+ * @pdev: NFP6000 PCI device
+ *
+ * Return: NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
+{
+ struct nfp6000_pcie *nfp;
+ u16 interface;
+ int err;
+
+ /* Finished with card initialization. */
+ dev_info(&pdev->dev,
+ "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
+
+ nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
+ if (!nfp) {
+ err = -ENOMEM;
+ goto err_ret;
+ }
+
+ nfp->dev = &pdev->dev;
+ nfp->pdev = pdev;
+ init_waitqueue_head(&nfp->bar_waiters);
+ spin_lock_init(&nfp->bar_lock);
+
+ interface = nfp6000_get_interface(&pdev->dev);
+
+ if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
+ NFP_CPP_INTERFACE_TYPE_PCI) {
+ dev_err(&pdev->dev,
+ "Interface type %d is not the expected %d\n",
+ NFP_CPP_INTERFACE_TYPE_of(interface),
+ NFP_CPP_INTERFACE_TYPE_PCI);
+ err = -ENODEV;
+ goto err_free_nfp;
+ }
+
+ if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
+ NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
+ dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
+ NFP_CPP_INTERFACE_CHANNEL_of(interface),
+ NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
+ err = -ENODEV;
+ goto err_free_nfp;
+ }
+
+ err = enable_bars(nfp, interface);
+ if (err)
+ goto err_free_nfp;
+
+ /* Probe for all the common NFP devices */
+ return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
+
+err_free_nfp:
+ kfree(nfp);
+err_ret:
+ dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
+ return ERR_PTR(err);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
new file mode 100644
index 000000000000..245d8aaaa97d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp6000_pcie.h
+ * Author: Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#ifndef NFP6000_PCIE_H
+#define NFP6000_PCIE_H
+
+#include "nfp_cpp.h"
+
+struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev);
+
+#endif /* NFP6000_PCIE_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
new file mode 100644
index 000000000000..31fe92247f51
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_arm.h
+ * Definitions for ARM-based registers and memory spaces
+ */
+
+#ifndef NFP_ARM_H
+#define NFP_ARM_H
+
+#define NFP_ARM_QUEUE(_q) (0x100000 + (0x800 * ((_q) & 0xff)))
+#define NFP_ARM_IM 0x200000
+#define NFP_ARM_EM 0x300000
+#define NFP_ARM_GCSR 0x400000
+#define NFP_ARM_MPCORE 0x800000
+#define NFP_ARM_PL310 0xa00000
+/* Register Type: BulkBARConfig */
+#define NFP_ARM_GCSR_BULK_BAR(_bar) (0x0 + (0x4 * ((_bar) & 0x7)))
+#define NFP_ARM_GCSR_BULK_BAR_TYPE (0x1 << 31)
+#define NFP_ARM_GCSR_BULK_BAR_TYPE_BULK (0x0)
+#define NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA (0x80000000)
+#define NFP_ARM_GCSR_BULK_BAR_TGT(_x) (((_x) & 0xf) << 27)
+#define NFP_ARM_GCSR_BULK_BAR_TGT_of(_x) (((_x) >> 27) & 0xf)
+#define NFP_ARM_GCSR_BULK_BAR_TOK(_x) (((_x) & 0x3) << 25)
+#define NFP_ARM_GCSR_BULK_BAR_TOK_of(_x) (((_x) >> 25) & 0x3)
+#define NFP_ARM_GCSR_BULK_BAR_LEN (0x1 << 24)
+#define NFP_ARM_GCSR_BULK_BAR_LEN_32BIT (0x0)
+#define NFP_ARM_GCSR_BULK_BAR_LEN_64BIT (0x1000000)
+#define NFP_ARM_GCSR_BULK_BAR_ADDR(_x) ((_x) & 0x7ff)
+#define NFP_ARM_GCSR_BULK_BAR_ADDR_of(_x) ((_x) & 0x7ff)
+/* Register Type: ExpansionBARConfig */
+#define NFP_ARM_GCSR_EXPA_BAR(_bar) (0x20 + (0x4 * ((_bar) & 0xf)))
+#define NFP_ARM_GCSR_EXPA_BAR_TYPE (0x1 << 31)
+#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA (0x0)
+#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL (0x80000000)
+#define NFP_ARM_GCSR_EXPA_BAR_TGT(_x) (((_x) & 0xf) << 27)
+#define NFP_ARM_GCSR_EXPA_BAR_TGT_of(_x) (((_x) >> 27) & 0xf)
+#define NFP_ARM_GCSR_EXPA_BAR_TOK(_x) (((_x) & 0x3) << 25)
+#define NFP_ARM_GCSR_EXPA_BAR_TOK_of(_x) (((_x) >> 25) & 0x3)
+#define NFP_ARM_GCSR_EXPA_BAR_LEN (0x1 << 24)
+#define NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT (0x0)
+#define NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT (0x1000000)
+#define NFP_ARM_GCSR_EXPA_BAR_ACT(_x) (((_x) & 0x1f) << 19)
+#define NFP_ARM_GCSR_EXPA_BAR_ACT_of(_x) (((_x) >> 19) & 0x1f)
+#define NFP_ARM_GCSR_EXPA_BAR_ACT_DERIVED (0)
+#define NFP_ARM_GCSR_EXPA_BAR_ADDR(_x) ((_x) & 0x7fff)
+#define NFP_ARM_GCSR_EXPA_BAR_ADDR_of(_x) ((_x) & 0x7fff)
+/* Register Type: ExplicitBARConfig0_Reg */
+#define NFP_ARM_GCSR_EXPL0_BAR(_bar) (0x60 + (0x4 * ((_bar) & 0x7)))
+#define NFP_ARM_GCSR_EXPL0_BAR_ADDR(_x) ((_x) & 0x3ffff)
+#define NFP_ARM_GCSR_EXPL0_BAR_ADDR_of(_x) ((_x) & 0x3ffff)
+/* Register Type: ExplicitBARConfig1_Reg */
+#define NFP_ARM_GCSR_EXPL1_BAR(_bar) (0x80 + (0x4 * ((_bar) & 0x7)))
+#define NFP_ARM_GCSR_EXPL1_BAR_POSTED (0x1 << 31)
+#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(_x) (((_x) & 0x7f) << 24)
+#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF_of(_x) (((_x) >> 24) & 0x7f)
+#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(_x) (((_x) & 0xff) << 16)
+#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER_of(_x) (((_x) >> 16) & 0xff)
+#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(_x) ((_x) & 0x3fff)
+#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF_of(_x) ((_x) & 0x3fff)
+/* Register Type: ExplicitBARConfig2_Reg */
+#define NFP_ARM_GCSR_EXPL2_BAR(_bar) (0xa0 + (0x4 * ((_bar) & 0x7)))
+#define NFP_ARM_GCSR_EXPL2_BAR_TGT(_x) (((_x) & 0xf) << 28)
+#define NFP_ARM_GCSR_EXPL2_BAR_TGT_of(_x) (((_x) >> 28) & 0xf)
+#define NFP_ARM_GCSR_EXPL2_BAR_ACT(_x) (((_x) & 0x1f) << 23)
+#define NFP_ARM_GCSR_EXPL2_BAR_ACT_of(_x) (((_x) >> 23) & 0x1f)
+#define NFP_ARM_GCSR_EXPL2_BAR_LEN(_x) (((_x) & 0x1f) << 18)
+#define NFP_ARM_GCSR_EXPL2_BAR_LEN_of(_x) (((_x) >> 18) & 0x1f)
+#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(_x) (((_x) & 0xff) << 10)
+#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK_of(_x) (((_x) >> 10) & 0xff)
+#define NFP_ARM_GCSR_EXPL2_BAR_TOK(_x) (((_x) & 0x3) << 8)
+#define NFP_ARM_GCSR_EXPL2_BAR_TOK_of(_x) (((_x) >> 8) & 0x3)
+#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(_x) ((_x) & 0xff)
+#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER_of(_x) ((_x) & 0xff)
+/* Register Type: PostedCommandSignal */
+#define NFP_ARM_GCSR_EXPL_POST(_bar) (0xc0 + (0x4 * ((_bar) & 0x7)))
+#define NFP_ARM_GCSR_EXPL_POST_SIG_B(_x) (((_x) & 0x7f) << 25)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_B_of(_x) (((_x) >> 25) & 0x7f)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS (0x1 << 24)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL (0x0)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH (0x1000000)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_A(_x) (((_x) & 0x7f) << 17)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_A_of(_x) (((_x) >> 17) & 0x7f)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS (0x1 << 16)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL (0x0)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH (0x10000)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_B_RCVD (0x1 << 7)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID (0x1 << 6)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_A_RCVD (0x1 << 5)
+#define NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID (0x1 << 4)
+#define NFP_ARM_GCSR_EXPL_POST_CMD_COMPLETE (0x1)
+/* Register Type: MPCoreBaseAddress */
+#define NFP_ARM_GCSR_MPCORE_BASE 0x00e0
+#define NFP_ARM_GCSR_MPCORE_BASE_ADDR(_x) (((_x) & 0x7ffff) << 13)
+#define NFP_ARM_GCSR_MPCORE_BASE_ADDR_of(_x) (((_x) >> 13) & 0x7ffff)
+/* Register Type: PL310BaseAddress */
+#define NFP_ARM_GCSR_PL310_BASE 0x00e4
+#define NFP_ARM_GCSR_PL310_BASE_ADDR(_x) (((_x) & 0xfffff) << 12)
+#define NFP_ARM_GCSR_PL310_BASE_ADDR_of(_x) (((_x) >> 12) & 0xfffff)
+/* Register Type: MPCoreConfig */
+#define NFP_ARM_GCSR_MP0_CFG 0x00e8
+#define NFP_ARM_GCSR_MP0_CFG_SPI_BOOT (0x1 << 14)
+#define NFP_ARM_GCSR_MP0_CFG_ENDIAN(_x) (((_x) & 0x3) << 12)
+#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_of(_x) (((_x) >> 12) & 0x3)
+#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_LITTLE (0)
+#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_BIG (1)
+#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR (0x1 << 8)
+#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_LO (0x0)
+#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_HI (0x100)
+#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN(_x) (((_x) & 0xf) << 4)
+#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN_of(_x) (((_x) >> 4) & 0xf)
+#define NFP_ARM_GCSR_MP0_CFG_ARMID(_x) ((_x) & 0xf)
+#define NFP_ARM_GCSR_MP0_CFG_ARMID_of(_x) ((_x) & 0xf)
+/* Register Type: MPCoreIDCacheDataError */
+#define NFP_ARM_GCSR_MP0_CACHE_ERR 0x00ec
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D7 (0x1 << 15)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D6 (0x1 << 14)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D5 (0x1 << 13)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D4 (0x1 << 12)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D3 (0x1 << 11)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D2 (0x1 << 10)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D1 (0x1 << 9)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D0 (0x1 << 8)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I7 (0x1 << 7)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I6 (0x1 << 6)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I5 (0x1 << 5)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I4 (0x1 << 4)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I3 (0x1 << 3)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I2 (0x1 << 2)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I1 (0x1 << 1)
+#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I0 (0x1)
+/* Register Type: ARMDFT */
+#define NFP_ARM_GCSR_DFT 0x0100
+#define NFP_ARM_GCSR_DFT_DBG_REQ (0x1 << 20)
+#define NFP_ARM_GCSR_DFT_DBG_EN (0x1 << 19)
+#define NFP_ARM_GCSR_DFT_WFE_EVT_TRG (0x1 << 18)
+#define NFP_ARM_GCSR_DFT_ETM_WFI_RDY (0x1 << 17)
+#define NFP_ARM_GCSR_DFT_ETM_PWR_ON (0x1 << 16)
+#define NFP_ARM_GCSR_DFT_BIST_FAIL_of(_x) (((_x) >> 8) & 0xf)
+#define NFP_ARM_GCSR_DFT_BIST_DONE_of(_x) (((_x) >> 4) & 0xf)
+#define NFP_ARM_GCSR_DFT_BIST_RUN(_x) ((_x) & 0x7)
+#define NFP_ARM_GCSR_DFT_BIST_RUN_of(_x) ((_x) & 0x7)
+
+/* Gasket CSRs */
+/* NOTE: These cannot be remapped, and are always at this location.
+ */
+#define NFP_ARM_GCSR_START (0xd6000000 + NFP_ARM_GCSR)
+#define NFP_ARM_GCSR_SIZE SZ_64K
+
+/* BAR CSRs
+ */
+#define NFP_ARM_GCSR_BULK_BITS 11
+#define NFP_ARM_GCSR_EXPA_BITS 15
+#define NFP_ARM_GCSR_EXPL_BITS 18
+
+#define NFP_ARM_GCSR_BULK_SHIFT (40 - 11)
+#define NFP_ARM_GCSR_EXPA_SHIFT (40 - 15)
+#define NFP_ARM_GCSR_EXPL_SHIFT (40 - 18)
+
+#define NFP_ARM_GCSR_BULK_SIZE (1 << NFP_ARM_GCSR_BULK_SHIFT)
+#define NFP_ARM_GCSR_EXPA_SIZE (1 << NFP_ARM_GCSR_EXPA_SHIFT)
+#define NFP_ARM_GCSR_EXPL_SIZE (1 << NFP_ARM_GCSR_EXPL_SHIFT)
+
+#define NFP_ARM_GCSR_EXPL2_CSR(target, action, length, \
+ byte_mask, token, signal_master) \
+ (NFP_ARM_GCSR_EXPL2_BAR_TGT(target) | \
+ NFP_ARM_GCSR_EXPL2_BAR_ACT(action) | \
+ NFP_ARM_GCSR_EXPL2_BAR_LEN(length) | \
+ NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(byte_mask) | \
+ NFP_ARM_GCSR_EXPL2_BAR_TOK(token) | \
+ NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(signal_master))
+#define NFP_ARM_GCSR_EXPL1_CSR(posted, signal_ref, data_master, data_ref) \
+ (((posted) ? NFP_ARM_GCSR_EXPL1_BAR_POSTED : 0) | \
+ NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(signal_ref) | \
+ NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(data_master) | \
+ NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(data_ref))
+#define NFP_ARM_GCSR_EXPL0_CSR(address) \
+ NFP_ARM_GCSR_EXPL0_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPL_SHIFT)
+#define NFP_ARM_GCSR_EXPL_POST_EXPECT_A(sig_ref, is_push, is_required) \
+ (NFP_ARM_GCSR_EXPL_POST_SIG_A(sig_ref) | \
+ ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH : \
+ NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL) | \
+ ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID : 0))
+#define NFP_ARM_GCSR_EXPL_POST_EXPECT_B(sig_ref, is_push, is_required) \
+ (NFP_ARM_GCSR_EXPL_POST_SIG_B(sig_ref) | \
+ ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH : \
+ NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL) | \
+ ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID : 0))
+
+#define NFP_ARM_GCSR_EXPA_CSR(mode, target, token, is_64, action, address) \
+ (((mode) ? NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL : \
+ NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA) | \
+ NFP_ARM_GCSR_EXPA_BAR_TGT(target) | \
+ NFP_ARM_GCSR_EXPA_BAR_TOK(token) | \
+ ((is_64) ? NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT : \
+ NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT) | \
+ NFP_ARM_GCSR_EXPA_BAR_ACT(action) | \
+ NFP_ARM_GCSR_EXPA_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPA_SHIFT))
+
+#define NFP_ARM_GCSR_BULK_CSR(mode, target, token, is_64, address) \
+ (((mode) ? NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA : \
+ NFP_ARM_GCSR_BULK_BAR_TYPE_BULK) | \
+ NFP_ARM_GCSR_BULK_BAR_TGT(target) | \
+ NFP_ARM_GCSR_BULK_BAR_TOK(token) | \
+ ((is_64) ? NFP_ARM_GCSR_BULK_BAR_LEN_64BIT : \
+ NFP_ARM_GCSR_BULK_BAR_LEN_32BIT) | \
+ NFP_ARM_GCSR_BULK_BAR_ADDR((address) >> NFP_ARM_GCSR_BULK_SHIFT))
+
+ /* MP Core CSRs */
+#define NFP_ARM_MPCORE_SIZE SZ_128K
+
+ /* PL320 CSRs */
+#define NFP_ARM_PCSR_SIZE SZ_64K
+
+#endif /* NFP_ARM_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
new file mode 100644
index 000000000000..edecc0a27485
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_cpp.h
+ * Interface for low-level NFP CPP access.
+ * Authors: Jason McMullan <jason.mcmullan@netronome.com>
+ * Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+#ifndef __NFP_CPP_H__
+#define __NFP_CPP_H__
+
+#include <linux/ctype.h>
+#include <linux/types.h>
+
+#ifndef NFP_SUBSYS
+#define NFP_SUBSYS "nfp"
+#endif
+
+#define nfp_err(cpp, fmt, args...) \
+ dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define nfp_warn(cpp, fmt, args...) \
+ dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define nfp_info(cpp, fmt, args...) \
+ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define nfp_dbg(cpp, fmt, args...) \
+ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+
+#define PCI_64BIT_BAR_COUNT 3
+
+#define NFP_CPP_NUM_TARGETS 16
+
+struct device;
+
+struct nfp_cpp_area;
+struct nfp_cpp;
+struct resource;
+
+/* Wildcard indicating a CPP read or write action
+ *
+ * The action used will be either read or write depending on whether a
+ * read or write instruction/call is performed on the NFP_CPP_ID. It
+ * is recomended that the RW action is used even if all actions to be
+ * performed on a NFP_CPP_ID are known to be only reads or writes.
+ * Doing so will in many cases save NFP CPP internal software
+ * resources.
+ */
+#define NFP_CPP_ACTION_RW 32
+
+#define NFP_CPP_TARGET_ID_MASK 0x1f
+
+/**
+ * NFP_CPP_ID() - pack target, token, and action into a CPP ID.
+ * @target: NFP CPP target id
+ * @action: NFP CPP action id
+ * @token: NFP CPP token id
+ *
+ * Create a 32-bit CPP identifier representing the access to be made.
+ * These identifiers are used as parameters to other NFP CPP
+ * functions. Some CPP devices may allow wildcard identifiers to be
+ * specified.
+ *
+ * Return: NFP CPP ID
+ */
+#define NFP_CPP_ID(target, action, token) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8))
+
+/**
+ * NFP_CPP_ISLAND_ID() - pack target, token, action, and island into a CPP ID.
+ * @target: NFP CPP target id
+ * @action: NFP CPP action id
+ * @token: NFP CPP token id
+ * @island: NFP CPP island id
+ *
+ * Create a 32-bit CPP identifier representing the access to be made.
+ * These identifiers are used as parameters to other NFP CPP
+ * functions. Some CPP devices may allow wildcard identifiers to be
+ * specified.
+ *
+ * Return: NFP CPP ID
+ */
+#define NFP_CPP_ISLAND_ID(target, action, token, island) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8) | (((island) & 0xff) << 0))
+
+/**
+ * NFP_CPP_ID_TARGET_of() - Return the NFP CPP target of a NFP CPP ID
+ * @id: NFP CPP ID
+ *
+ * Return: NFP CPP target
+ */
+static inline u8 NFP_CPP_ID_TARGET_of(u32 id)
+{
+ return (id >> 24) & NFP_CPP_TARGET_ID_MASK;
+}
+
+/**
+ * NFP_CPP_ID_TOKEN_of() - Return the NFP CPP token of a NFP CPP ID
+ * @id: NFP CPP ID
+ * Return: NFP CPP token
+ */
+static inline u8 NFP_CPP_ID_TOKEN_of(u32 id)
+{
+ return (id >> 16) & 0xff;
+}
+
+/**
+ * NFP_CPP_ID_ACTION_of() - Return the NFP CPP action of a NFP CPP ID
+ * @id: NFP CPP ID
+ *
+ * Return: NFP CPP action
+ */
+static inline u8 NFP_CPP_ID_ACTION_of(u32 id)
+{
+ return (id >> 8) & 0xff;
+}
+
+/**
+ * NFP_CPP_ID_ISLAND_of() - Return the NFP CPP island of a NFP CPP ID
+ * @id: NFP CPP ID
+ *
+ * Return: NFP CPP island
+ */
+static inline u8 NFP_CPP_ID_ISLAND_of(u32 id)
+{
+ return (id >> 0) & 0xff;
+}
+
+/* NFP Interface types - logical interface for this CPP connection
+ * 4 bits are reserved for interface type.
+ */
+#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0
+#define NFP_CPP_INTERFACE_TYPE_PCI 0x1
+#define NFP_CPP_INTERFACE_TYPE_ARM 0x2
+#define NFP_CPP_INTERFACE_TYPE_RPC 0x3
+#define NFP_CPP_INTERFACE_TYPE_ILA 0x4
+
+/**
+ * NFP_CPP_INTERFACE() - Construct a 16-bit NFP Interface ID
+ * @type: NFP Interface Type
+ * @unit: Unit identifier for the interface type
+ * @channel: Channel identifier for the interface unit
+ *
+ * Interface IDs consists of 4 bits of interface type,
+ * 4 bits of unit identifier, and 8 bits of channel identifier.
+ *
+ * The NFP Interface ID is used in the implementation of
+ * NFP CPP API mutexes, which use the MU Atomic CompareAndWrite
+ * operation - hence the limit to 16 bits to be able to
+ * use the NFP Interface ID as a lock owner.
+ *
+ * Return: Interface ID
+ */
+#define NFP_CPP_INTERFACE(type, unit, channel) \
+ ((((type) & 0xf) << 12) | \
+ (((unit) & 0xf) << 8) | \
+ (((channel) & 0xff) << 0))
+
+/**
+ * NFP_CPP_INTERFACE_TYPE_of() - Get the interface type
+ * @interface: NFP Interface ID
+ * Return: NFP Interface ID's type
+ */
+#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf)
+
+/**
+ * NFP_CPP_INTERFACE_UNIT_of() - Get the interface unit
+ * @interface: NFP Interface ID
+ * Return: NFP Interface ID's unit
+ */
+#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf)
+
+/**
+ * NFP_CPP_INTERFACE_CHANNEL_of() - Get the interface channel
+ * @interface: NFP Interface ID
+ * Return: NFP Interface ID's channel
+ */
+#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff)
+
+/* Implemented in nfp_cppcore.c */
+void nfp_cpp_free(struct nfp_cpp *cpp);
+u32 nfp_cpp_model(struct nfp_cpp *cpp);
+u16 nfp_cpp_interface(struct nfp_cpp *cpp);
+int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial);
+
+void *nfp_hwinfo_cache(struct nfp_cpp *cpp);
+void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val);
+void *nfp_rtsym_cache(struct nfp_cpp *cpp);
+void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val);
+
+void nfp_nffw_cache_flush(struct nfp_cpp *cpp);
+
+struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
+ u32 cpp_id,
+ const char *name,
+ unsigned long long address,
+ unsigned long size);
+struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address,
+ unsigned long size);
+void nfp_cpp_area_free(struct nfp_cpp_area *area);
+int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
+int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area);
+void nfp_cpp_area_release(struct nfp_cpp_area *area);
+void nfp_cpp_area_release_free(struct nfp_cpp_area *area);
+int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+ void *buffer, size_t length);
+int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+ const void *buffer, size_t length);
+int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
+ unsigned long long offset, unsigned long size);
+const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
+void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
+struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
+struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area);
+phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area);
+void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area);
+
+int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+ u32 *value);
+int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+ u32 value);
+int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+ u64 *value);
+int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+ u64 value);
+int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+ u32 value, size_t length);
+
+int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_tgt, u32 *value);
+int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_tgt, u32 value);
+int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, u32 mask, u32 value);
+
+/* Implemented in nfp_cpplib.c */
+int nfp_cpp_read(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, void *kernel_vaddr, size_t length);
+int nfp_cpp_write(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, const void *kernel_vaddr,
+ size_t length);
+int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u32 *value);
+int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u32 value);
+int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u64 *value);
+int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u64 value);
+
+struct nfp_cpp_mutex;
+
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
+ unsigned long long address, u32 key_id);
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address,
+ u32 key_id);
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+
+struct nfp_cpp_explicit;
+
+struct nfp_cpp_explicit_command {
+ u32 cpp_id;
+ u16 data_ref;
+ u8 data_master;
+ u8 len;
+ u8 byte_mask;
+ u8 signal_master;
+ u8 signal_ref;
+ u8 posted;
+ u8 siga;
+ u8 sigb;
+ s8 siga_mode;
+ s8 sigb_mode;
+};
+
+#define NFP_SERIAL_LEN 6
+
+/**
+ * struct nfp_cpp_operations - NFP CPP operations structure
+ * @area_priv_size: Size of the nfp_cpp_area private data
+ * @owner: Owner module
+ * @init: Initialize the NFP CPP bus
+ * @free: Free the bus
+ * @read_serial: Read serial number to memory provided
+ * @get_interface: Return CPP interface
+ * @area_init: Initialize a new NFP CPP area (not serialized)
+ * @area_cleanup: Clean up a NFP CPP area (not serialized)
+ * @area_acquire: Acquire the NFP CPP area (serialized)
+ * @area_release: Release area (serialized)
+ * @area_resource: Get resource range of area (not serialized)
+ * @area_phys: Get physical address of area (not serialized)
+ * @area_iomem: Get iomem of area (not serialized)
+ * @area_read: Perform a read from a NFP CPP area (serialized)
+ * @area_write: Perform a write to a NFP CPP area (serialized)
+ * @explicit_priv_size: Size of an explicit's private area
+ * @explicit_acquire: Acquire an explicit area
+ * @explicit_release: Release an explicit area
+ * @explicit_put: Write data to send
+ * @explicit_get: Read data received
+ * @explicit_do: Perform the transaction
+ */
+struct nfp_cpp_operations {
+ size_t area_priv_size;
+ struct module *owner;
+
+ int (*init)(struct nfp_cpp *cpp);
+ void (*free)(struct nfp_cpp *cpp);
+
+ void (*read_serial)(struct device *dev, u8 *serial);
+ u16 (*get_interface)(struct device *dev);
+
+ int (*area_init)(struct nfp_cpp_area *area,
+ u32 dest, unsigned long long address,
+ unsigned long size);
+ void (*area_cleanup)(struct nfp_cpp_area *area);
+ int (*area_acquire)(struct nfp_cpp_area *area);
+ void (*area_release)(struct nfp_cpp_area *area);
+ struct resource *(*area_resource)(struct nfp_cpp_area *area);
+ phys_addr_t (*area_phys)(struct nfp_cpp_area *area);
+ void __iomem *(*area_iomem)(struct nfp_cpp_area *area);
+ int (*area_read)(struct nfp_cpp_area *area, void *kernel_vaddr,
+ unsigned long offset, unsigned int length);
+ int (*area_write)(struct nfp_cpp_area *area, const void *kernel_vaddr,
+ unsigned long offset, unsigned int length);
+
+ size_t explicit_priv_size;
+ int (*explicit_acquire)(struct nfp_cpp_explicit *expl);
+ void (*explicit_release)(struct nfp_cpp_explicit *expl);
+ int (*explicit_put)(struct nfp_cpp_explicit *expl,
+ const void *buff, size_t len);
+ int (*explicit_get)(struct nfp_cpp_explicit *expl,
+ void *buff, size_t len);
+ int (*explicit_do)(struct nfp_cpp_explicit *expl,
+ const struct nfp_cpp_explicit_command *cmd,
+ u64 address);
+};
+
+struct nfp_cpp *
+nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
+ struct device *parent, void *priv);
+void *nfp_cpp_priv(struct nfp_cpp *priv);
+
+int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size);
+
+/* The following section contains extensions to the
+ * NFP CPP API, to be used in a Linux kernel-space context.
+ */
+
+/* Use this channel ID for multiple virtual channel interfaces
+ * (ie ARM and PCIe) when setting up the interface field.
+ */
+#define NFP_CPP_INTERFACE_CHANNEL_PEROPENER 255
+struct device *nfp_cpp_device(struct nfp_cpp *cpp);
+
+/* Return code masks for nfp_cpp_explicit_do()
+ */
+#define NFP_SIGNAL_MASK_A BIT(0) /* Signal A fired */
+#define NFP_SIGNAL_MASK_B BIT(1) /* Signal B fired */
+
+enum nfp_cpp_explicit_signal_mode {
+ NFP_SIGNAL_NONE = 0,
+ NFP_SIGNAL_PUSH = 1,
+ NFP_SIGNAL_PUSH_OPTIONAL = -1,
+ NFP_SIGNAL_PULL = 2,
+ NFP_SIGNAL_PULL_OPTIONAL = -2,
+};
+
+struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp);
+int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, u32 cpp_id,
+ u8 len, u8 mask);
+int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
+ u8 data_master, u16 data_ref);
+int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
+ u8 signal_master, u8 signal_ref);
+int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
+ u8 siga,
+ enum nfp_cpp_explicit_signal_mode siga_mode,
+ u8 sigb,
+ enum nfp_cpp_explicit_signal_mode sigb_mode);
+int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
+ const void *buff, size_t len);
+int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address);
+int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len);
+void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl);
+struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *expl);
+void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit);
+
+/* Implemented in nfp_cpplib.c */
+
+int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model);
+
+int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id,
+ u64 addr, void *buff, size_t len,
+ int width_read);
+
+int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id,
+ u64 addr, const void *buff, size_t len,
+ int width_write);
+
+#endif /* !__NFP_CPP_H__ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
new file mode 100644
index 000000000000..40108e66c654
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -0,0 +1,1746 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_cppcore.c
+ * Provides low-level access to the NFP's internal CPP bus
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "nfp_arm.h"
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define NFP_ARM_GCSR_SOFTMODEL2 0x0000014c
+#define NFP_ARM_GCSR_SOFTMODEL3 0x00000150
+
+struct nfp_cpp_resource {
+ struct list_head list;
+ const char *name;
+ u32 cpp_id;
+ u64 start;
+ u64 end;
+};
+
+struct nfp_cpp_mutex {
+ struct list_head list;
+ struct nfp_cpp *cpp;
+ int target;
+ u16 usage;
+ u16 depth;
+ unsigned long long address;
+ u32 key;
+};
+
+struct nfp_cpp {
+ struct device dev;
+
+ void *priv; /* Private data of the low-level implementation */
+
+ u32 model;
+ u16 interface;
+ u8 serial[NFP_SERIAL_LEN];
+
+ const struct nfp_cpp_operations *op;
+ struct list_head resource_list; /* NFP CPP resource list */
+ struct list_head mutex_cache; /* Mutex cache */
+ rwlock_t resource_lock;
+ wait_queue_head_t waitq;
+
+ /* NFP6000 CPP Mapping Table */
+ u32 imb_cat_table[16];
+
+ /* Cached areas for cpp/xpb readl/writel speedups */
+ struct mutex area_cache_mutex; /* Lock for the area cache */
+ struct list_head area_cache_list;
+
+ /* Cached information */
+ void *hwinfo;
+ void *rtsym;
+};
+
+/* Element of the area_cache_list */
+struct nfp_cpp_area_cache {
+ struct list_head entry;
+ u32 id;
+ u64 addr;
+ u32 size;
+ struct nfp_cpp_area *area;
+};
+
+struct nfp_cpp_area {
+ struct nfp_cpp *cpp;
+ struct kref kref;
+ atomic_t refcount;
+ struct mutex mutex; /* Lock for the area's refcount */
+ unsigned long long offset;
+ unsigned long size;
+ struct nfp_cpp_resource resource;
+ void __iomem *iomem;
+ /* Here follows the 'priv' part of nfp_cpp_area. */
+};
+
+struct nfp_cpp_explicit {
+ struct nfp_cpp *cpp;
+ struct nfp_cpp_explicit_command cmd;
+ /* Here follows the 'priv' part of nfp_cpp_area. */
+};
+
+static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
+{
+ struct nfp_cpp_resource *tmp;
+ struct list_head *pos;
+
+ list_for_each(pos, head) {
+ tmp = container_of(pos, struct nfp_cpp_resource, list);
+
+ if (tmp->cpp_id > res->cpp_id)
+ break;
+
+ if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
+ break;
+ }
+
+ list_add_tail(&res->list, pos);
+}
+
+static void __resource_del(struct nfp_cpp_resource *res)
+{
+ list_del_init(&res->list);
+}
+
+static void __release_cpp_area(struct kref *kref)
+{
+ struct nfp_cpp_area *area =
+ container_of(kref, struct nfp_cpp_area, kref);
+ struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
+
+ if (area->cpp->op->area_cleanup)
+ area->cpp->op->area_cleanup(area);
+
+ write_lock(&cpp->resource_lock);
+ __resource_del(&area->resource);
+ write_unlock(&cpp->resource_lock);
+ kfree(area);
+}
+
+static void nfp_cpp_area_put(struct nfp_cpp_area *area)
+{
+ kref_put(&area->kref, __release_cpp_area);
+}
+
+static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
+{
+ kref_get(&area->kref);
+
+ return area;
+}
+
+/**
+ * nfp_cpp_free() - free the CPP handle
+ * @cpp: CPP handle
+ */
+void nfp_cpp_free(struct nfp_cpp *cpp)
+{
+ struct nfp_cpp_area_cache *cache, *ctmp;
+ struct nfp_cpp_resource *res, *rtmp;
+ struct nfp_cpp_mutex *mutex, *mtmp;
+
+ /* There should be no mutexes in the cache at this point. */
+ WARN_ON(!list_empty(&cpp->mutex_cache));
+ /* .. but if there are, unlock them and complain. */
+ list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) {
+ dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n",
+ mutex->target, (unsigned long long)mutex->address,
+ mutex->depth, mutex->usage);
+
+ /* Forcing an unlock */
+ mutex->depth = 1;
+ nfp_cpp_mutex_unlock(mutex);
+
+ /* Forcing a free */
+ mutex->usage = 1;
+ nfp_cpp_mutex_free(mutex);
+ }
+
+ /* Remove all caches */
+ list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
+ list_del(&cache->entry);
+ if (cache->id)
+ nfp_cpp_area_release(cache->area);
+ nfp_cpp_area_free(cache->area);
+ kfree(cache);
+ }
+
+ /* There should be no dangling areas at this point */
+ WARN_ON(!list_empty(&cpp->resource_list));
+
+ /* .. but if they weren't, try to clean up. */
+ list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
+ struct nfp_cpp_area *area = container_of(res,
+ struct nfp_cpp_area,
+ resource);
+
+ dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
+ NFP_CPP_ID_TARGET_of(res->cpp_id),
+ NFP_CPP_ID_ACTION_of(res->cpp_id),
+ NFP_CPP_ID_TOKEN_of(res->cpp_id),
+ res->start, res->end,
+ res->name ? " " : "",
+ res->name ? res->name : "");
+
+ if (area->cpp->op->area_release)
+ area->cpp->op->area_release(area);
+
+ __release_cpp_area(&area->kref);
+ }
+
+ if (cpp->op->free)
+ cpp->op->free(cpp);
+
+ kfree(cpp->hwinfo);
+ kfree(cpp->rtsym);
+
+ device_unregister(&cpp->dev);
+
+ kfree(cpp);
+}
+
+/**
+ * nfp_cpp_model() - Retrieve the Model ID of the NFP
+ * @cpp: NFP CPP handle
+ *
+ * Return: NFP CPP Model ID
+ */
+u32 nfp_cpp_model(struct nfp_cpp *cpp)
+{
+ return cpp->model;
+}
+
+/**
+ * nfp_cpp_interface() - Retrieve the Interface ID of the NFP
+ * @cpp: NFP CPP handle
+ *
+ * Return: NFP CPP Interface ID
+ */
+u16 nfp_cpp_interface(struct nfp_cpp *cpp)
+{
+ return cpp->interface;
+}
+
+/**
+ * nfp_cpp_serial() - Retrieve the Serial ID of the NFP
+ * @cpp: NFP CPP handle
+ * @serial: Pointer to NFP serial number
+ *
+ * Return: Length of NFP serial number
+ */
+int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
+{
+ *serial = &cpp->serial[0];
+ return sizeof(cpp->serial);
+}
+
+void *nfp_hwinfo_cache(struct nfp_cpp *cpp)
+{
+ return cpp->hwinfo;
+}
+
+void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val)
+{
+ cpp->hwinfo = val;
+}
+
+void *nfp_rtsym_cache(struct nfp_cpp *cpp)
+{
+ return cpp->rtsym;
+}
+
+void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val)
+{
+ cpp->rtsym = val;
+}
+
+/**
+ * nfp_nffw_cache_flush() - Flush cached firmware information
+ * @cpp: NFP CPP handle
+ *
+ * Flush cached firmware information. This function should be called
+ * every time firmware is loaded on unloaded.
+ */
+void nfp_nffw_cache_flush(struct nfp_cpp *cpp)
+{
+ kfree(nfp_rtsym_cache(cpp));
+ nfp_rtsym_cache_set(cpp, NULL);
+}
+
+/**
+ * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
+ * @cpp: CPP device handle
+ * @dest: NFP CPP ID
+ * @name: Name of region
+ * @address: Address of region
+ * @size: Size of region
+ *
+ * Allocate and initialize a CPP area structure. The area must later
+ * be locked down with an 'acquire' before it can be safely accessed.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * Return: NFP CPP area handle, or NULL
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_cpp_area *area;
+ u64 tmp64 = address;
+ int err, name_len;
+
+ /* Remap from cpp_island to cpp_target */
+ err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
+ if (err < 0)
+ return NULL;
+
+ address = tmp64;
+
+ if (!name)
+ name = "(reserved)";
+
+ name_len = strlen(name) + 1;
+ area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
+ GFP_KERNEL);
+ if (!area)
+ return NULL;
+
+ area->cpp = cpp;
+ area->resource.name = (void *)area + sizeof(*area) +
+ cpp->op->area_priv_size;
+ memcpy((char *)area->resource.name, name, name_len);
+
+ area->resource.cpp_id = dest;
+ area->resource.start = address;
+ area->resource.end = area->resource.start + size - 1;
+ INIT_LIST_HEAD(&area->resource.list);
+
+ atomic_set(&area->refcount, 0);
+ kref_init(&area->kref);
+ mutex_init(&area->mutex);
+
+ if (cpp->op->area_init) {
+ int err;
+
+ err = cpp->op->area_init(area, dest, address, size);
+ if (err < 0) {
+ kfree(area);
+ return NULL;
+ }
+ }
+
+ write_lock(&cpp->resource_lock);
+ __resource_add(&cpp->resource_list, &area->resource);
+ write_unlock(&cpp->resource_lock);
+
+ area->offset = address;
+ area->size = size;
+
+ return area;
+}
+
+/**
+ * nfp_cpp_area_alloc() - allocate a new CPP area
+ * @cpp: CPP handle
+ * @dest: CPP id
+ * @address: Start address on CPP target
+ * @size: Size of area in bytes
+ *
+ * Allocate and initialize a CPP area structure. The area must later
+ * be locked down with an 'acquire' before it can be safely accessed.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * Return: NFP CPP Area handle, or NULL
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
+ unsigned long long address, unsigned long size)
+{
+ return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
+}
+
+/**
+ * nfp_cpp_area_free() - free up the CPP area
+ * @area: CPP area handle
+ *
+ * Frees up memory resources held by the CPP area.
+ */
+void nfp_cpp_area_free(struct nfp_cpp_area *area)
+{
+ nfp_cpp_area_put(area);
+}
+
+/**
+ * nfp_cpp_area_acquire() - lock down a CPP area for access
+ * @area: CPP area handle
+ *
+ * Locks down the CPP area for a potential long term activity. Area
+ * must always be locked down before being accessed.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+ mutex_lock(&area->mutex);
+ if (atomic_inc_return(&area->refcount) == 1) {
+ int (*a_a)(struct nfp_cpp_area *);
+
+ a_a = area->cpp->op->area_acquire;
+ if (a_a) {
+ int err;
+
+ wait_event_interruptible(area->cpp->waitq,
+ (err = a_a(area)) != -EAGAIN);
+ if (err < 0) {
+ atomic_dec(&area->refcount);
+ mutex_unlock(&area->mutex);
+ return err;
+ }
+ }
+ }
+ mutex_unlock(&area->mutex);
+
+ nfp_cpp_area_get(area);
+ return 0;
+}
+
+/**
+ * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
+ * @area: CPP area handle
+ *
+ * Locks down the CPP area for a potential long term activity. Area
+ * must always be locked down before being accessed.
+ *
+ * NOTE: Returns -EAGAIN is no area is available
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
+{
+ mutex_lock(&area->mutex);
+ if (atomic_inc_return(&area->refcount) == 1) {
+ if (area->cpp->op->area_acquire) {
+ int err;
+
+ err = area->cpp->op->area_acquire(area);
+ if (err < 0) {
+ atomic_dec(&area->refcount);
+ mutex_unlock(&area->mutex);
+ return err;
+ }
+ }
+ }
+ mutex_unlock(&area->mutex);
+
+ nfp_cpp_area_get(area);
+ return 0;
+}
+
+/**
+ * nfp_cpp_area_release() - release a locked down CPP area
+ * @area: CPP area handle
+ *
+ * Releases a previously locked down CPP area.
+ */
+void nfp_cpp_area_release(struct nfp_cpp_area *area)
+{
+ mutex_lock(&area->mutex);
+ /* Only call the release on refcount == 0 */
+ if (atomic_dec_and_test(&area->refcount)) {
+ if (area->cpp->op->area_release) {
+ area->cpp->op->area_release(area);
+ /* Let anyone waiting for a BAR try to get one.. */
+ wake_up_interruptible_all(&area->cpp->waitq);
+ }
+ }
+ mutex_unlock(&area->mutex);
+
+ nfp_cpp_area_put(area);
+}
+
+/**
+ * nfp_cpp_area_release_free() - release CPP area and free it
+ * @area: CPP area handle
+ *
+ * Releases CPP area and frees up memory resources held by the it.
+ */
+void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
+{
+ nfp_cpp_area_release(area);
+ nfp_cpp_area_free(area);
+}
+
+/**
+ * nfp_cpp_area_read() - read data from CPP area
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to put data into
+ * @length: number of bytes to read
+ *
+ * Read data from indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_area_read(struct nfp_cpp_area *area,
+ unsigned long offset, void *kernel_vaddr,
+ size_t length)
+{
+ return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
+}
+
+/**
+ * nfp_cpp_area_write() - write data to CPP area
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to read data from
+ * @length: number of bytes to write
+ *
+ * Write data to indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_area_write(struct nfp_cpp_area *area,
+ unsigned long offset, const void *kernel_vaddr,
+ size_t length)
+{
+ return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
+}
+
+/**
+ * nfp_cpp_area_check_range() - check if address range fits in CPP area
+ * @area: CPP area handle
+ * @offset: offset into CPP target
+ * @length: size of address range in bytes
+ *
+ * Check if address range fits within CPP area. Return 0 if area
+ * fits or -EFAULT on error.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
+ unsigned long long offset, unsigned long length)
+{
+ if (offset < area->offset ||
+ offset + length > area->offset + area->size)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_area_name() - return name of a CPP area
+ * @cpp_area: CPP area handle
+ *
+ * Return: Name of the area, or NULL
+ */
+const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->resource.name;
+}
+
+/**
+ * nfp_cpp_area_priv() - return private struct for CPP area
+ * @cpp_area: CPP area handle
+ *
+ * Return: Private data for the CPP area
+ */
+void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
+{
+ return &cpp_area[1];
+}
+
+/**
+ * nfp_cpp_area_cpp() - return CPP handle for CPP area
+ * @cpp_area: CPP area handle
+ *
+ * Return: NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->cpp;
+}
+
+/**
+ * nfp_cpp_area_resource() - get resource
+ * @area: CPP area handle
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: struct resource pointer, or NULL
+ */
+struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
+{
+ struct resource *res = NULL;
+
+ if (area->cpp->op->area_resource)
+ res = area->cpp->op->area_resource(area);
+
+ return res;
+}
+
+/**
+ * nfp_cpp_area_phys() - get physical address of CPP area
+ * @area: CPP area handle
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: phy_addr_t of the area, or NULL
+ */
+phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
+{
+ phys_addr_t addr = ~0;
+
+ if (area->cpp->op->area_phys)
+ addr = area->cpp->op->area_phys(area);
+
+ return addr;
+}
+
+/**
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ * @area: CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style
+ * operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: __iomem pointer to the area, or NULL
+ */
+void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
+{
+ void __iomem *iomem = NULL;
+
+ if (area->cpp->op->area_iomem)
+ iomem = area->cpp->op->area_iomem(area);
+
+ return iomem;
+}
+
+/**
+ * nfp_cpp_area_readl() - Read a u32 word from an area
+ * @area: CPP Area handle
+ * @offset: Offset into area
+ * @value: Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_readl(struct nfp_cpp_area *area,
+ unsigned long offset, u32 *value)
+{
+ u8 tmp[4];
+ int err;
+
+ err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = get_unaligned_le32(tmp);
+
+ return err;
+}
+
+/**
+ * nfp_cpp_area_writel() - Write a u32 word to an area
+ * @area: CPP Area handle
+ * @offset: Offset into area
+ * @value: Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_writel(struct nfp_cpp_area *area,
+ unsigned long offset, u32 value)
+{
+ u8 tmp[4];
+
+ put_unaligned_le32(value, tmp);
+
+ return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+}
+
+/**
+ * nfp_cpp_area_readq() - Read a u64 word from an area
+ * @area: CPP Area handle
+ * @offset: Offset into area
+ * @value: Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_readq(struct nfp_cpp_area *area,
+ unsigned long offset, u64 *value)
+{
+ u8 tmp[8];
+ int err;
+
+ err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = get_unaligned_le64(tmp);
+
+ return err;
+}
+
+/**
+ * nfp_cpp_area_writeq() - Write a u64 word to an area
+ * @area: CPP Area handle
+ * @offset: Offset into area
+ * @value: Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
+ unsigned long offset, u64 value)
+{
+ u8 tmp[8];
+
+ put_unaligned_le64(value, tmp);
+
+ return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+}
+
+/**
+ * nfp_cpp_area_fill() - fill a CPP area with a value
+ * @area: CPP area
+ * @offset: offset into CPP area
+ * @value: value to fill with
+ * @length: length of area to fill
+ *
+ * Fill indicated area with given value.
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_area_fill(struct nfp_cpp_area *area,
+ unsigned long offset, u32 value, size_t length)
+{
+ u8 tmp[4];
+ size_t i;
+ int k;
+
+ put_unaligned_le32(value, tmp);
+
+ if (offset % sizeof(tmp) || length % sizeof(tmp))
+ return -EINVAL;
+
+ for (i = 0; i < length; i += sizeof(tmp)) {
+ k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
+ if (k < 0)
+ return k;
+ }
+
+ return i;
+}
+
+/**
+ * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
+ * @cpp: NFP CPP handle
+ * @size: Size of the area - MUST BE A POWER OF 2.
+ */
+int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
+{
+ struct nfp_cpp_area_cache *cache;
+ struct nfp_cpp_area *area;
+
+ /* Allocate an area - we use the MU target's base as a placeholder,
+ * as all supported chips have a MU.
+ */
+ area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
+ 0, size);
+ if (!area)
+ return -ENOMEM;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return -ENOMEM;
+
+ cache->id = 0;
+ cache->addr = 0;
+ cache->size = size;
+ cache->area = area;
+ mutex_lock(&cpp->area_cache_mutex);
+ list_add_tail(&cache->entry, &cpp->area_cache_list);
+ mutex_unlock(&cpp->area_cache_mutex);
+
+ return 0;
+}
+
+static struct nfp_cpp_area_cache *
+area_cache_get(struct nfp_cpp *cpp, u32 id,
+ u64 addr, unsigned long *offset, size_t length)
+{
+ struct nfp_cpp_area_cache *cache;
+ int err;
+
+ /* Early exit when length == 0, which prevents
+ * the need for special case code below when
+ * checking against available cache size.
+ */
+ if (length == 0)
+ return NULL;
+
+ if (list_empty(&cpp->area_cache_list) || id == 0)
+ return NULL;
+
+ /* Remap from cpp_island to cpp_target */
+ err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
+ if (err < 0)
+ return NULL;
+
+ addr += *offset;
+
+ mutex_lock(&cpp->area_cache_mutex);
+
+ /* See if we have a match */
+ list_for_each_entry(cache, &cpp->area_cache_list, entry) {
+ if (id == cache->id &&
+ addr >= cache->addr &&
+ addr + length <= cache->addr + cache->size)
+ goto exit;
+ }
+
+ /* No matches - inspect the tail of the LRU */
+ cache = list_entry(cpp->area_cache_list.prev,
+ struct nfp_cpp_area_cache, entry);
+
+ /* Can we fit in the cache entry? */
+ if (round_down(addr + length - 1, cache->size) !=
+ round_down(addr, cache->size)) {
+ mutex_unlock(&cpp->area_cache_mutex);
+ return NULL;
+ }
+
+ /* If id != 0, we will need to release it */
+ if (cache->id) {
+ nfp_cpp_area_release(cache->area);
+ cache->id = 0;
+ cache->addr = 0;
+ }
+
+ /* Adjust the start address to be cache size aligned */
+ cache->id = id;
+ cache->addr = addr & ~(u64)(cache->size - 1);
+
+ /* Re-init to the new ID and address */
+ if (cpp->op->area_init) {
+ err = cpp->op->area_init(cache->area,
+ id, cache->addr, cache->size);
+ if (err < 0) {
+ mutex_unlock(&cpp->area_cache_mutex);
+ return NULL;
+ }
+ }
+
+ /* Attempt to acquire */
+ err = nfp_cpp_area_acquire(cache->area);
+ if (err < 0) {
+ mutex_unlock(&cpp->area_cache_mutex);
+ return NULL;
+ }
+
+exit:
+ /* Adjust offset */
+ *offset = addr - cache->addr;
+ return cache;
+}
+
+static void
+area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
+{
+ if (!cache)
+ return;
+
+ /* Move to front of LRU */
+ list_del(&cache->entry);
+ list_add(&cache->entry, &cpp->area_cache_list);
+
+ mutex_unlock(&cpp->area_cache_mutex);
+}
+
+/**
+ * nfp_cpp_read() - read from CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer for result
+ * @length: number of bytes to read
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address, void *kernel_vaddr, size_t length)
+{
+ struct nfp_cpp_area_cache *cache;
+ struct nfp_cpp_area *area;
+ unsigned long offset = 0;
+ int err;
+
+ cache = area_cache_get(cpp, destination, address, &offset, length);
+ if (cache) {
+ area = cache->area;
+ } else {
+ area = nfp_cpp_area_alloc(cpp, destination, address, length);
+ if (!area)
+ return -ENOMEM;
+
+ err = nfp_cpp_area_acquire(area);
+ if (err)
+ goto out;
+ }
+
+ err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
+out:
+ if (cache)
+ area_cache_put(cpp, cache);
+ else
+ nfp_cpp_area_release_free(area);
+
+ return err;
+}
+
+/**
+ * nfp_cpp_write() - write to CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer to read from
+ * @length: number of bytes to write
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address,
+ const void *kernel_vaddr, size_t length)
+{
+ struct nfp_cpp_area_cache *cache;
+ struct nfp_cpp_area *area;
+ unsigned long offset = 0;
+ int err;
+
+ cache = area_cache_get(cpp, destination, address, &offset, length);
+ if (cache) {
+ area = cache->area;
+ } else {
+ area = nfp_cpp_area_alloc(cpp, destination, address, length);
+ if (!area)
+ return -ENOMEM;
+
+ err = nfp_cpp_area_acquire(area);
+ if (err)
+ goto out;
+ }
+
+ err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
+
+out:
+ if (cache)
+ area_cache_put(cpp, cache);
+ else
+ nfp_cpp_area_release_free(area);
+
+ return err;
+}
+
+/* Return the correct CPP address, and fixup xpb_addr as needed. */
+static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
+{
+ int island;
+ u32 xpb;
+
+ xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
+ /* Ensure that non-local XPB accesses go
+ * out through the global XPBM bus.
+ */
+ island = (*xpb_addr >> 24) & 0x3f;
+ if (!island)
+ return xpb;
+
+ if (island != 1) {
+ *xpb_addr |= 1 << 30;
+ return xpb;
+ }
+
+ /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
+ *xpb_addr &= ~0x7f000000;
+ if (*xpb_addr < 0x60000) {
+ *xpb_addr |= 1 << 30;
+ } else {
+ /* And only non-ARM interfaces use the island id = 1 */
+ if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
+ != NFP_CPP_INTERFACE_TYPE_ARM)
+ *xpb_addr |= 1 << 24;
+ }
+
+ return xpb;
+}
+
+/**
+ * nfp_xpb_readl() - Read a u32 word from a XPB location
+ * @cpp: CPP device handle
+ * @xpb_addr: Address for operation
+ * @value: Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
+{
+ u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
+}
+
+/**
+ * nfp_xpb_writel() - Write a u32 word to a XPB location
+ * @cpp: CPP device handle
+ * @xpb_addr: Address for operation
+ * @value: Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
+{
+ u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
+}
+
+/**
+ * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
+ * @cpp: NFP CPP device handle
+ * @xpb_tgt: XPB target and address
+ * @mask: mask of bits to alter
+ * @value: value to modify
+ *
+ * KERNEL: This operation is safe to call in interrupt or softirq context.
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
+ u32 mask, u32 value)
+{
+ int err;
+ u32 tmp;
+
+ err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+ if (err < 0)
+ return err;
+
+ tmp &= ~mask;
+ tmp |= mask & value;
+ return nfp_xpb_writel(cpp, xpb_tgt, tmp);
+}
+
+/* Lockdep markers */
+static struct lock_class_key nfp_cpp_resource_lock_key;
+
+static void nfp_cpp_dev_release(struct device *dev)
+{
+ /* Nothing to do here - it just makes the kernel happy */
+}
+
+/**
+ * nfp_cpp_from_operations() - Create a NFP CPP handle
+ * from an operations structure
+ * @ops: NFP CPP operations structure
+ * @parent: Parent device
+ * @priv: Private data of low-level implementation
+ *
+ * NOTE: On failure, cpp_ops->free will be called!
+ *
+ * Return: NFP CPP handle on success, ERR_PTR on failure
+ */
+struct nfp_cpp *
+nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
+ struct device *parent, void *priv)
+{
+ const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
+ struct nfp_cpp *cpp;
+ u32 mask[2];
+ u32 xpbaddr;
+ size_t tgt;
+ int err;
+
+ cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
+ if (!cpp) {
+ err = -ENOMEM;
+ goto err_malloc;
+ }
+
+ cpp->op = ops;
+ cpp->priv = priv;
+ cpp->interface = ops->get_interface(parent);
+ if (ops->read_serial)
+ ops->read_serial(parent, cpp->serial);
+ rwlock_init(&cpp->resource_lock);
+ init_waitqueue_head(&cpp->waitq);
+ lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
+ INIT_LIST_HEAD(&cpp->mutex_cache);
+ INIT_LIST_HEAD(&cpp->resource_list);
+ INIT_LIST_HEAD(&cpp->area_cache_list);
+ mutex_init(&cpp->area_cache_mutex);
+ cpp->dev.init_name = "cpp";
+ cpp->dev.parent = parent;
+ cpp->dev.release = nfp_cpp_dev_release;
+ err = device_register(&cpp->dev);
+ if (err < 0) {
+ put_device(&cpp->dev);
+ goto err_dev;
+ }
+
+ dev_set_drvdata(&cpp->dev, cpp);
+
+ /* NOTE: cpp_lock is NOT locked for op->init,
+ * since it may call NFP CPP API operations
+ */
+ if (cpp->op->init) {
+ err = cpp->op->init(cpp);
+ if (err < 0) {
+ dev_err(parent,
+ "NFP interface initialization failed\n");
+ goto err_out;
+ }
+ }
+
+ err = nfp_cpp_model_autodetect(cpp, &cpp->model);
+ if (err < 0) {
+ dev_err(parent, "NFP model detection failed\n");
+ goto err_out;
+ }
+
+ for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + (tgt * 4);
+ err = nfp_xpb_readl(cpp, xpbaddr,
+ &cpp->imb_cat_table[tgt]);
+ if (err < 0) {
+ dev_err(parent,
+ "Can't read CPP mapping from device\n");
+ goto err_out;
+ }
+ }
+
+ nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
+ &mask[0]);
+ nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
+ &mask[1]);
+
+ dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
+ nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
+
+ return cpp;
+
+err_out:
+ device_unregister(&cpp->dev);
+err_dev:
+ kfree(cpp);
+err_malloc:
+ return ERR_PTR(err);
+}
+
+/**
+ * nfp_cpp_priv() - Get the operations private data of a CPP handle
+ * @cpp: CPP handle
+ *
+ * Return: Private data for the NFP CPP handle
+ */
+void *nfp_cpp_priv(struct nfp_cpp *cpp)
+{
+ return cpp->priv;
+}
+
+/**
+ * nfp_cpp_device() - Get the Linux device handle of a CPP handle
+ * @cpp: CPP handle
+ *
+ * Return: Device for the NFP CPP bus
+ */
+struct device *nfp_cpp_device(struct nfp_cpp *cpp)
+{
+ return &cpp->dev;
+}
+
+#define NFP_EXPL_OP(func, expl, args...) \
+ ({ \
+ struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
+ int err = -ENODEV; \
+ \
+ if (cpp->op->func) \
+ err = cpp->op->func(expl, ##args); \
+ err; \
+ })
+
+#define NFP_EXPL_OP_NR(func, expl, args...) \
+ ({ \
+ struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
+ \
+ if (cpp->op->func) \
+ cpp->op->func(expl, ##args); \
+ \
+ })
+
+/**
+ * nfp_cpp_explicit_acquire() - Acquire explicit access handle
+ * @cpp: NFP CPP handle
+ *
+ * The 'data_ref' and 'signal_ref' values are useful when
+ * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
+ *
+ * Return: NFP CPP explicit handle
+ */
+struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
+{
+ struct nfp_cpp_explicit *expl;
+ int err;
+
+ expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
+ if (!expl)
+ return NULL;
+
+ expl->cpp = cpp;
+ err = NFP_EXPL_OP(explicit_acquire, expl);
+ if (err < 0) {
+ kfree(expl);
+ return NULL;
+ }
+
+ return expl;
+}
+
+/**
+ * nfp_cpp_explicit_set_target() - Set target fields for explicit
+ * @expl: Explicit handle
+ * @cpp_id: CPP ID field
+ * @len: CPP Length field
+ * @mask: CPP Mask field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
+ u32 cpp_id, u8 len, u8 mask)
+{
+ expl->cmd.cpp_id = cpp_id;
+ expl->cmd.len = len;
+ expl->cmd.byte_mask = mask;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_explicit_set_data() - Set data fields for explicit
+ * @expl: Explicit handle
+ * @data_master: CPP Data Master field
+ * @data_ref: CPP Data Ref field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
+ u8 data_master, u16 data_ref)
+{
+ expl->cmd.data_master = data_master;
+ expl->cmd.data_ref = data_ref;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_explicit_set_signal() - Set signal fields for explicit
+ * @expl: Explicit handle
+ * @signal_master: CPP Signal Master field
+ * @signal_ref: CPP Signal Ref field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
+ u8 signal_master, u8 signal_ref)
+{
+ expl->cmd.signal_master = signal_master;
+ expl->cmd.signal_ref = signal_ref;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_explicit_set_posted() - Set completion fields for explicit
+ * @expl: Explicit handle
+ * @posted: True for signaled completion, false otherwise
+ * @siga: CPP Signal A field
+ * @siga_mode: CPP Signal A Mode field
+ * @sigb: CPP Signal B field
+ * @sigb_mode: CPP Signal B Mode field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
+ u8 siga,
+ enum nfp_cpp_explicit_signal_mode siga_mode,
+ u8 sigb,
+ enum nfp_cpp_explicit_signal_mode sigb_mode)
+{
+ expl->cmd.posted = posted;
+ expl->cmd.siga = siga;
+ expl->cmd.sigb = sigb;
+ expl->cmd.siga_mode = siga_mode;
+ expl->cmd.sigb_mode = sigb_mode;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
+ * @expl: NFP CPP Explicit handle
+ * @buff: Data to have the target pull in the transaction
+ * @len: Length of data, in bytes
+ *
+ * The 'len' parameter must be less than or equal to 128 bytes.
+ *
+ * If this function is called before the configuration
+ * registers are set, it will return -EINVAL.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
+ const void *buff, size_t len)
+{
+ return NFP_EXPL_OP(explicit_put, expl, buff, len);
+}
+
+/**
+ * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
+ * @expl: NFP CPP Explicit handle
+ * @address: Address to send in the explicit transaction
+ *
+ * If this function is called before the configuration
+ * registers are set, it will return -1, with an errno of EINVAL.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
+{
+ return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
+}
+
+/**
+ * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
+ * @expl: NFP CPP Explicit handle
+ * @buff: Data that the target pushed in the transaction
+ * @len: Length of data, in bytes
+ *
+ * The 'len' parameter must be less than or equal to 128 bytes.
+ *
+ * If this function is called before all three configuration
+ * registers are set, it will return -1, with an errno of EINVAL.
+ *
+ * If this function is called before nfp_cpp_explicit_do()
+ * has completed, it will return -1, with an errno of EBUSY.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
+{
+ return NFP_EXPL_OP(explicit_get, expl, buff, len);
+}
+
+/**
+ * nfp_cpp_explicit_release() - Release explicit access handle
+ * @expl: NFP CPP Explicit handle
+ *
+ */
+void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
+{
+ NFP_EXPL_OP_NR(explicit_release, expl);
+ kfree(expl);
+}
+
+/**
+ * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
+ * @cpp_explicit: CPP explicit handle
+ *
+ * Return: NFP CPP handle of the explicit
+ */
+struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
+{
+ return cpp_explicit->cpp;
+}
+
+/**
+ * nfp_cpp_explicit_priv() - return private struct for CPP explicit
+ * @cpp_explicit: CPP explicit handle
+ *
+ * Return: private data of the explicit, or NULL
+ */
+void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
+{
+ return &cpp_explicit[1];
+}
+
+/* THIS FUNCTION IS NOT EXPORTED */
+static u32 nfp_mutex_locked(u16 interface)
+{
+ return (u32)interface << 16 | 0x000f;
+}
+
+static u32 nfp_mutex_unlocked(u16 interface)
+{
+ return (u32)interface << 16 | 0x0000;
+}
+
+static bool nfp_mutex_is_locked(u32 val)
+{
+ return (val & 0xffff) == 0x000f;
+}
+
+static bool nfp_mutex_is_unlocked(u32 val)
+{
+ return (val & 0xffff) == 0000;
+}
+
+/* If you need more than 65536 recursive locks, please rethink your code. */
+#define MUTEX_DEPTH_MAX 0xffff
+
+static int
+nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
+{
+ /* Not permitted on invalid interfaces */
+ if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
+ NFP_CPP_INTERFACE_TYPE_INVALID)
+ return -EINVAL;
+
+ /* Address must be 64-bit aligned */
+ if (address & 7)
+ return -EINVAL;
+
+ if (*target != NFP_CPP_TARGET_MU)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_init() - Initialize a mutex location
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ * @key: Unique 32-bit value for this mutex
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
+ int target, unsigned long long address, u32 key)
+{
+ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ u16 interface = nfp_cpp_interface(cpp);
+ int err;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address + 4, key);
+ if (err)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_alloc() - Create a mutex handle
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ * @key: 32-bit unique key (must match the key at this location)
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine's CmpAndSwap32 command are supported.
+ *
+ * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address, u32 key)
+{
+ const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ u16 interface = nfp_cpp_interface(cpp);
+ struct nfp_cpp_mutex *mutex;
+ int err;
+ u32 tmp;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return NULL;
+
+ /* Look for mutex on cache list */
+ list_for_each_entry(mutex, &cpp->mutex_cache, list) {
+ if (mutex->target == target && mutex->address == address) {
+ mutex->usage++;
+ return mutex;
+ }
+ }
+
+ err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+ if (err < 0)
+ return NULL;
+
+ if (tmp != key)
+ return NULL;
+
+ mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
+ if (!mutex)
+ return NULL;
+
+ mutex->cpp = cpp;
+ mutex->target = target;
+ mutex->address = address;
+ mutex->key = key;
+ mutex->depth = 0;
+ mutex->usage = 1;
+
+ /* Add mutex to cache list */
+ list_add(&mutex->list, &cpp->mutex_cache);
+
+ return mutex;
+}
+
+/**
+ * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
+ * @mutex: NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+ if (--mutex->usage)
+ return;
+
+ /* Remove mutex from cache */
+ list_del(&mutex->list);
+ kfree(mutex);
+}
+
+/**
+ * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+ unsigned long warn_at = jiffies + 15 * HZ;
+ unsigned int timeout_ms = 1;
+ int err;
+
+ /* We can't use a waitqueue here, because the unlocker
+ * might be on a separate CPU.
+ *
+ * So just wait for now.
+ */
+ for (;;) {
+ err = nfp_cpp_mutex_trylock(mutex);
+ if (err != -EBUSY)
+ break;
+
+ err = msleep_interruptible(timeout_ms);
+ if (err != 0)
+ return -ERESTARTSYS;
+
+ if (time_is_before_eq_jiffies(warn_at)) {
+ warn_at = jiffies + 60 * HZ;
+ dev_warn(mutex->cpp->dev.parent,
+ "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n",
+ mutex->usage, mutex->depth,
+ mutex->target, mutex->address, mutex->key);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+ const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ u32 key, value;
+ u16 interface;
+ int err;
+
+ interface = nfp_cpp_interface(cpp);
+
+ if (mutex->depth > 1) {
+ mutex->depth--;
+ return 0;
+ }
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return -EPERM;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ return err;
+
+ if (value != nfp_mutex_locked(interface))
+ return -EACCES;
+
+ err = nfp_cpp_writel(cpp, muw, mutex->address,
+ nfp_mutex_unlocked(interface));
+ if (err < 0)
+ return err;
+
+ mutex->depth = 0;
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 if the lock succeeded, -errno on failure
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+ const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
+ const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ u32 key, value, tmp;
+ int err;
+
+ if (mutex->depth > 0) {
+ if (mutex->depth == MUTEX_DEPTH_MAX)
+ return -E2BIG;
+ mutex->depth++;
+ return 0;
+ }
+
+ /* Verify that the lock marker is not damaged */
+ err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return -EPERM;
+
+ /* Compare against the unlocked state, and if true,
+ * write the interface id into the top 16 bits, and
+ * mark as locked.
+ */
+ value = nfp_mutex_locked(nfp_cpp_interface(cpp));
+
+ /* We use test_set_imm here, as it implies a read
+ * of the current state, and sets the bits in the
+ * bytemask of the command to 1s. Since the mutex
+ * is guaranteed to be 64-bit aligned, the bytemask
+ * of this 32-bit command is ensured to be 8'b00001111,
+ * which implies that the lower 4 bits will be set to
+ * ones regardless of the initial state.
+ *
+ * Since this is a 'Readback' operation, with no Pull
+ * data, we can treat this as a normal Push (read)
+ * atomic, which returns the original value.
+ */
+ err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+ if (err < 0)
+ return err;
+
+ /* Was it unlocked? */
+ if (nfp_mutex_is_unlocked(tmp)) {
+ /* The read value can only be 0x....0000 in the unlocked state.
+ * If there was another contending for this lock, then
+ * the lock state would be 0x....000f
+ */
+
+ /* Write our owner ID into the lock
+ * While not strictly necessary, this helps with
+ * debug and bookkeeping.
+ */
+ err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+ if (err < 0)
+ return err;
+
+ mutex->depth = 1;
+ return 0;
+ }
+
+ /* Already locked by us? Success! */
+ if (tmp == value) {
+ mutex->depth = 1;
+ return 0;
+ }
+
+ return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
new file mode 100644
index 000000000000..0ba0379b8f75
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_cpplib.c
+ * Library of functions to access the NFP's CPP bus
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp6000/nfp_xpb.h"
+
+/* NFP6000 PL */
+#define NFP_PL_DEVICE_ID 0x00000004
+#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0)
+
+#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144
+
+/**
+ * nfp_cpp_readl() - Read a u32 word from a CPP location
+ * @cpp: CPP device handle
+ * @cpp_id: CPP ID for operation
+ * @address: Address for operation
+ * @value: Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u32 *value)
+{
+ u8 tmp[4];
+ int err;
+
+ err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ *value = get_unaligned_le32(tmp);
+
+ return err;
+}
+
+/**
+ * nfp_cpp_writel() - Write a u32 word to a CPP location
+ * @cpp: CPP device handle
+ * @cpp_id: CPP ID for operation
+ * @address: Address for operation
+ * @value: Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u32 value)
+{
+ u8 tmp[4];
+
+ put_unaligned_le32(value, tmp);
+ return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+}
+
+/**
+ * nfp_cpp_readq() - Read a u64 word from a CPP location
+ * @cpp: CPP device handle
+ * @cpp_id: CPP ID for operation
+ * @address: Address for operation
+ * @value: Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u64 *value)
+{
+ u8 tmp[8];
+ int err;
+
+ err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ *value = get_unaligned_le64(tmp);
+
+ return err;
+}
+
+/**
+ * nfp_cpp_writeq() - Write a u64 word to a CPP location
+ * @cpp: CPP device handle
+ * @cpp_id: CPP ID for operation
+ * @address: Address for operation
+ * @value: Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
+ unsigned long long address, u64 value)
+{
+ u8 tmp[8];
+
+ put_unaligned_le64(value, tmp);
+ return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+}
+
+/* NOTE: This code should not use nfp_xpb_* functions,
+ * as those are model-specific
+ */
+int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
+{
+ const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
+ u32 reg;
+ int err;
+
+ err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model);
+ if (err < 0)
+ return err;
+
+ /* The PL's PluDeviceID revision code is authoratative */
+ *model &= ~0xff;
+ err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID,
+ &reg);
+ if (err < 0)
+ return err;
+
+ *model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10;
+
+ return 0;
+}
+
+static u8 nfp_bytemask(int width, u64 addr)
+{
+ if (width == 8)
+ return 0xff;
+ else if (width == 4)
+ return 0x0f << (addr & 4);
+ else if (width == 2)
+ return 0x03 << (addr & 6);
+ else if (width == 1)
+ return 0x01 << (addr & 7);
+ else
+ return 0;
+}
+
+int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id,
+ u64 addr, void *buff, size_t len, int width_read)
+{
+ struct nfp_cpp_explicit *expl;
+ char *tmp = buff;
+ int err, i, incr;
+ u8 byte_mask;
+
+ if (len & (width_read - 1))
+ return -EINVAL;
+
+ expl = nfp_cpp_explicit_acquire(cpp);
+ if (!expl)
+ return -EBUSY;
+
+ incr = min_t(int, 16 * width_read, 128);
+ incr = min_t(int, incr, len);
+
+ /* Translate a NFP_CPP_ACTION_RW to action 0 */
+ if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW)
+ cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 0,
+ NFP_CPP_ID_TOKEN_of(cpp_id));
+
+ byte_mask = nfp_bytemask(width_read, addr);
+
+ nfp_cpp_explicit_set_target(expl, cpp_id,
+ incr / width_read - 1, byte_mask);
+ nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PUSH,
+ 0, NFP_SIGNAL_NONE);
+
+ for (i = 0; i < len; i += incr, addr += incr, tmp += incr) {
+ if (i + incr > len) {
+ incr = len - i;
+ nfp_cpp_explicit_set_target(expl, cpp_id,
+ incr / width_read - 1,
+ 0xff);
+ }
+
+ err = nfp_cpp_explicit_do(expl, addr);
+ if (err < 0)
+ goto exit_release;
+
+ err = nfp_cpp_explicit_get(expl, tmp, incr);
+ if (err < 0)
+ goto exit_release;
+ }
+ err = len;
+exit_release:
+ nfp_cpp_explicit_release(expl);
+
+ return err;
+}
+
+int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, u64 addr,
+ const void *buff, size_t len, int width_write)
+{
+ struct nfp_cpp_explicit *expl;
+ const char *tmp = buff;
+ int err, i, incr;
+ u8 byte_mask;
+
+ if (len & (width_write - 1))
+ return -EINVAL;
+
+ expl = nfp_cpp_explicit_acquire(cpp);
+ if (!expl)
+ return -EBUSY;
+
+ incr = min_t(int, 16 * width_write, 128);
+ incr = min_t(int, incr, len);
+
+ /* Translate a NFP_CPP_ACTION_RW to action 1 */
+ if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW)
+ cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 1,
+ NFP_CPP_ID_TOKEN_of(cpp_id));
+
+ byte_mask = nfp_bytemask(width_write, addr);
+
+ nfp_cpp_explicit_set_target(expl, cpp_id,
+ incr / width_write - 1, byte_mask);
+ nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PULL,
+ 0, NFP_SIGNAL_NONE);
+
+ for (i = 0; i < len; i += incr, addr += incr, tmp += incr) {
+ if (i + incr > len) {
+ incr = len - i;
+ nfp_cpp_explicit_set_target(expl, cpp_id,
+ incr / width_write - 1,
+ 0xff);
+ }
+
+ err = nfp_cpp_explicit_put(expl, tmp, incr);
+ if (err < 0)
+ goto exit_release;
+
+ err = nfp_cpp_explicit_do(expl, addr);
+ if (err < 0)
+ goto exit_release;
+ }
+ err = len;
+exit_release:
+ nfp_cpp_explicit_release(expl);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
new file mode 100644
index 000000000000..8d8f311ffa6e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
+ * after chip reset.
+ *
+ * Examples of the fields:
+ * me.count = 40
+ * me.mask = 0x7f_ffff_ffff
+ *
+ * me.count is the total number of MEs on the system.
+ * me.mask is the bitmask of MEs that are available for application usage.
+ *
+ * (ie, in this example, ME 39 has been reserved by boardconfig.)
+ */
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define NFP_SUBSYS "nfp_hwinfo"
+
+#include "crc32.h"
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define HWINFO_SIZE_MIN 0x100
+#define HWINFO_WAIT 20 /* seconds */
+
+/* The Hardware Info Table defines the properties of the system.
+ *
+ * HWInfo v1 Table (fixed size)
+ *
+ * 0x0000: u32 version Hardware Info Table version (1.0)
+ * 0x0004: u32 size Total size of the table, including
+ * the CRC32 (IEEE 802.3)
+ * 0x0008: u32 jumptab Offset of key/value table
+ * 0x000c: u32 keys Total number of keys in the key/value table
+ * NNNNNN: Key/value jump table and string data
+ * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * HWInfo v2 Table (variable size)
+ *
+ * 0x0000: u32 version Hardware Info Table version (2.0)
+ * 0x0004: u32 size Current size of the data area, excluding CRC32
+ * 0x0008: u32 limit Maximum size of the table
+ * 0x000c: u32 reserved Unused, set to zero
+ * NNNNNN: Key/value data
+ * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * If the HWInfo table is in the process of being updated, the low bit
+ * of version will be set.
+ *
+ * HWInfo v1 Key/Value Table
+ * -------------------------
+ *
+ * The key/value table is a set of offsets to ASCIIZ strings which have
+ * been strcmp(3) sorted (yes, please use bsearch(3) on the table).
+ *
+ * All keys are guaranteed to be unique.
+ *
+ * N+0: u32 key_1 Offset to the first key
+ * N+4: u32 val_1 Offset to the first value
+ * N+8: u32 key_2 Offset to the second key
+ * N+c: u32 val_2 Offset to the second value
+ * ...
+ *
+ * HWInfo v2 Key/Value Table
+ * -------------------------
+ *
+ * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000'
+ *
+ * Unsorted.
+ */
+
+#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_UPDATING BIT(0)
+
+struct nfp_hwinfo {
+ u8 start[0];
+
+ __le32 version;
+ __le32 size;
+
+ /* v2 specific fields */
+ __le32 limit;
+ __le32 resv;
+
+ char data[];
+};
+
+static bool nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo)
+{
+ return le32_to_cpu(hwinfo->version) & NFP_HWINFO_VERSION_UPDATING;
+}
+
+static int
+hwinfo_db_walk(struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, u32 size)
+{
+ const char *key, *val, *end = hwinfo->data + size;
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+
+ val = key + strlen(key) + 1;
+ if (val >= end) {
+ nfp_warn(cpp, "Bad HWINFO - overflowing key\n");
+ return -EINVAL;
+ }
+
+ if (val + strlen(val) + 1 > end) {
+ nfp_warn(cpp, "Bad HWINFO - overflowing value\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len)
+{
+ u32 size, crc;
+
+ size = le32_to_cpu(db->size);
+ if (size > len) {
+ nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len);
+ return -EINVAL;
+ }
+
+ size -= sizeof(u32);
+ crc = crc32_posix(db, size);
+ if (crc != get_unaligned_le32(db->start + size)) {
+ nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n",
+ crc, get_unaligned_le32(db->start + size));
+
+ return -EINVAL;
+ }
+
+ return hwinfo_db_walk(cpp, db, size);
+}
+
+static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+{
+ struct nfp_hwinfo *header;
+ struct nfp_resource *res;
+ u64 cpp_addr;
+ u32 cpp_id;
+ int err;
+ u8 *db;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
+ if (!IS_ERR(res)) {
+ cpp_id = nfp_resource_cpp_id(res);
+ cpp_addr = nfp_resource_address(res);
+ *cpp_size = nfp_resource_size(res);
+
+ nfp_resource_release(res);
+
+ if (*cpp_size < HWINFO_SIZE_MIN)
+ return -ENOENT;
+ } else if (PTR_ERR(res) == -ENOENT) {
+ /* Try getting the HWInfo table from the 'classic' location */
+ cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU,
+ NFP_CPP_ACTION_RW, 0, 1);
+ cpp_addr = 0x30000;
+ *cpp_size = 0x0e000;
+ } else {
+ return PTR_ERR(res);
+ }
+
+ db = kmalloc(*cpp_size + 1, GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+
+ err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
+ if (err != *cpp_size) {
+ kfree(db);
+ return err < 0 ? err : -EIO;
+ }
+
+ header = (void *)db;
+ if (nfp_hwinfo_is_updating(header)) {
+ kfree(db);
+ return -EBUSY;
+ }
+
+ if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) {
+ nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n",
+ le32_to_cpu(header->version));
+ kfree(db);
+ return -EINVAL;
+ }
+
+ /* NULL-terminate for safety */
+ db[*cpp_size] = '\0';
+
+ nfp_hwinfo_cache_set(cpp, db);
+
+ return 0;
+}
+
+static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+{
+ const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ;
+ int err;
+
+ for (;;) {
+ const unsigned long start_time = jiffies;
+
+ err = hwinfo_try_fetch(cpp, hwdb_size);
+ if (!err)
+ return 0;
+
+ err = msleep_interruptible(100);
+ if (err || time_after(start_time, wait_until)) {
+ nfp_err(cpp, "NFP access error\n");
+ return -EIO;
+ }
+ }
+}
+
+static int nfp_hwinfo_load(struct nfp_cpp *cpp)
+{
+ struct nfp_hwinfo *db;
+ size_t hwdb_size = 0;
+ int err;
+
+ err = hwinfo_fetch(cpp, &hwdb_size);
+ if (err)
+ return err;
+
+ db = nfp_hwinfo_cache(cpp);
+ err = hwinfo_db_validate(cpp, db, hwdb_size);
+ if (err) {
+ kfree(db);
+ nfp_hwinfo_cache_set(cpp, NULL);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
+ * @cpp: NFP CPP handle
+ * @lookup: HWInfo name to search for
+ *
+ * Return: Value of the HWInfo name, or NULL
+ */
+const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup)
+{
+ const char *key, *val, *end;
+ struct nfp_hwinfo *hwinfo;
+ int err;
+
+ hwinfo = nfp_hwinfo_cache(cpp);
+ if (!hwinfo) {
+ err = nfp_hwinfo_load(cpp);
+ if (err)
+ return NULL;
+ hwinfo = nfp_hwinfo_cache(cpp);
+ }
+
+ if (!hwinfo || !lookup)
+ return NULL;
+
+ end = hwinfo->data + le32_to_cpu(hwinfo->size) - sizeof(u32);
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+
+ val = key + strlen(key) + 1;
+
+ if (strcmp(key, lookup) == 0)
+ return val;
+ }
+
+ return NULL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
new file mode 100644
index 000000000000..3d15dd03647e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_mip.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Espen Skoglund <espen.skoglund@netronome.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+
+#define NFP_MIP_SIGNATURE cpu_to_le32(0x0050494d) /* "MIP\0" */
+#define NFP_MIP_VERSION cpu_to_le32(1)
+#define NFP_MIP_MAX_OFFSET (256 * 1024)
+
+struct nfp_mip {
+ __le32 signature;
+ __le32 mip_version;
+ __le32 mip_size;
+ __le32 first_entry;
+
+ __le32 version;
+ __le32 buildnum;
+ __le32 buildtime;
+ __le32 loadtime;
+
+ __le32 symtab_addr;
+ __le32 symtab_size;
+ __le32 strtab_addr;
+ __le32 strtab_size;
+
+ char name[16];
+ char toolchain[32];
+};
+
+/* Read memory and check if it could be a valid MIP */
+static int
+nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip)
+{
+ int ret;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
+ if (ret != sizeof(*mip)) {
+ nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n",
+ ret, sizeof(*mip));
+ return -EIO;
+ }
+ if (mip->signature != NFP_MIP_SIGNATURE) {
+ nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n",
+ le32_to_cpu(mip->signature));
+ return -EINVAL;
+ }
+ if (mip->mip_version != NFP_MIP_VERSION) {
+ nfp_warn(cpp, "Unsupported MIP version (%d)\n",
+ le32_to_cpu(mip->mip_version));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Try to locate MIP using the resource table */
+static int nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip)
+{
+ struct nfp_nffw_info *nffw_info;
+ u32 cpp_id;
+ u64 addr;
+ int err;
+
+ nffw_info = nfp_nffw_info_open(cpp);
+ if (IS_ERR(nffw_info))
+ return PTR_ERR(nffw_info);
+
+ err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr);
+ if (err)
+ goto exit_close_nffw;
+
+ err = nfp_mip_try_read(cpp, cpp_id, addr, mip);
+exit_close_nffw:
+ nfp_nffw_info_close(nffw_info);
+ return err;
+}
+
+/**
+ * nfp_mip_open() - Get device MIP structure
+ * @cpp: NFP CPP Handle
+ *
+ * Copy MIP structure from NFP device and return it. The returned
+ * structure is handled internally by the library and should be
+ * freed by calling nfp_mip_close().
+ *
+ * Return: pointer to mip, NULL on failure.
+ */
+const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp)
+{
+ struct nfp_mip *mip;
+ int err;
+
+ mip = kmalloc(sizeof(*mip), GFP_KERNEL);
+ if (!mip)
+ return NULL;
+
+ err = nfp_mip_read_resource(cpp, mip);
+ if (err) {
+ kfree(mip);
+ return NULL;
+ }
+
+ return mip;
+}
+
+void nfp_mip_close(const struct nfp_mip *mip)
+{
+ kfree(mip);
+}
+
+/**
+ * nfp_mip_symtab() - Get the address and size of the MIP symbol table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol table
+ * @size: Location for size of MIP symbol table
+ */
+void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size)
+{
+ *addr = le32_to_cpu(mip->symtab_addr);
+ *size = le32_to_cpu(mip->symtab_size);
+}
+
+/**
+ * nfp_mip_strtab() - Get the address and size of the MIP symbol name table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol name table
+ * @size: Location for size of MIP symbol name table
+ */
+void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size)
+{
+ *addr = le32_to_cpu(mip->strtab_addr);
+ *size = le32_to_cpu(mip->strtab_size);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
new file mode 100644
index 000000000000..cd34097b79f1
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_nffw.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Francois H. Theron <francois.theron@netronome.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+#include "nfp6000/nfp6000.h"
+
+/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE 4
+
+#define NFFW_FWID_ALL 255
+
+/**
+ * NFFW_INFO_VERSION history:
+ * 0: This was never actually used (before versioning), but it refers to
+ * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later
+ * changed to 200.
+ * 1: First versioned struct, with
+ * FWINFO_CNT = 120
+ * MEINFO_CNT = 120
+ * 2: FWINFO_CNT = 200
+ * MEINFO_CNT = 200
+ */
+#define NFFW_INFO_VERSION_CURRENT 2
+
+/* Enough for all current chip families */
+#define NFFW_MEINFO_CNT_V1 120
+#define NFFW_FWINFO_CNT_V1 120
+#define NFFW_MEINFO_CNT_V2 200
+#define NFFW_FWINFO_CNT_V2 200
+
+/* Work in 32-bit words to make cross-platform endianness easier to handle */
+
+/** nfp.nffw meinfo **/
+struct nffw_meinfo {
+ __le32 ctxmask__fwid__meid;
+};
+
+struct nffw_fwinfo {
+ __le32 loaded__mu_da__mip_off_hi;
+ __le32 mip_cppid; /* 0 means no MIP */
+ __le32 mip_offset_lo;
+};
+
+struct nfp_nffw_info_v1 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1];
+};
+
+struct nfp_nffw_info_v2 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2];
+};
+
+/** Resource: nfp.nffw main **/
+struct nfp_nffw_info_data {
+ __le32 flags[2];
+ union {
+ struct nfp_nffw_info_v1 v1;
+ struct nfp_nffw_info_v2 v2;
+ } info;
+};
+
+struct nfp_nffw_info {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+
+ struct nfp_nffw_info_data fwinf;
+};
+
+/* flg_info_version = flags[0]<27:16>
+ * This is a small version counter intended only to detect if the current
+ * implementation can read the current struct. Struct changes should be very
+ * rare and as such a 12-bit counter should cover large spans of time. By the
+ * time it wraps around, we don't expect to have 4096 versions of this struct
+ * to be in use at the same time.
+ */
+static u32 nffw_res_info_version_get(const struct nfp_nffw_info_data *res)
+{
+ return (le32_to_cpu(res->flags[0]) >> 16) & 0xfff;
+}
+
+/* flg_init = flags[0]<0> */
+static u32 nffw_res_flg_init_get(const struct nfp_nffw_info_data *res)
+{
+ return (le32_to_cpu(res->flags[0]) >> 0) & 1;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<31:31> */
+static u32 nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi)
+{
+ return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 31) & 1;
+}
+
+/* mip_cppid = mip_cppid */
+static u32 nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi)
+{
+ return le32_to_cpu(fi->mip_cppid);
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<8:8> */
+static u32 nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi)
+{
+ return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 8) & 1;
+}
+
+/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */
+static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi)
+{
+ u64 mip_off_hi = le32_to_cpu(fi->loaded__mu_da__mip_off_hi);
+
+ return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo);
+}
+
+#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12)
+
+static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+ unsigned int mode, addr40;
+ u32 xpbaddr, imbcppat;
+ int err;
+
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4;
+ err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat);
+ if (err < 0)
+ return err;
+
+ mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
+ addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
+
+ return nfp_cppat_mu_locality_lsb(mode, addr40);
+}
+
+static unsigned int
+nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
+{
+ /* For the this code, version 0 is most likely to be
+ * version 1 in this case. Since the kernel driver
+ * does not take responsibility for initialising the
+ * nfp.nffw resource, any previous code (CA firmware or
+ * userspace) that left the version 0 and did set
+ * the init flag is going to be version 1.
+ */
+ switch (nffw_res_info_version_get(fwinf)) {
+ case 0:
+ case 1:
+ *arr = &fwinf->info.v1.fwinfo[0];
+ return NFFW_FWINFO_CNT_V1;
+ case 2:
+ *arr = &fwinf->info.v2.fwinfo[0];
+ return NFFW_FWINFO_CNT_V2;
+ default:
+ *arr = NULL;
+ return 0;
+ }
+}
+
+/**
+ * nfp_nffw_info_open() - Acquire the lock on the NFFW table
+ * @cpp: NFP CPP handle
+ *
+ * Return: 0, or -ERRNO
+ */
+struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
+{
+ struct nfp_nffw_info_data *fwinf;
+ struct nfp_nffw_info *state;
+ u32 info_ver;
+ int err;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW);
+ if (IS_ERR(state->res))
+ goto err_free;
+
+ fwinf = &state->fwinf;
+
+ if (sizeof(*fwinf) > nfp_resource_size(state->res))
+ goto err_release;
+
+ err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
+ nfp_resource_address(state->res),
+ fwinf, sizeof(*fwinf));
+ if (err < sizeof(*fwinf))
+ goto err_release;
+
+ if (!nffw_res_flg_init_get(fwinf))
+ goto err_release;
+
+ info_ver = nffw_res_info_version_get(fwinf);
+ if (info_ver > NFFW_INFO_VERSION_CURRENT)
+ goto err_release;
+
+ state->cpp = cpp;
+ return state;
+
+err_release:
+ nfp_resource_release(state->res);
+err_free:
+ kfree(state);
+ return ERR_PTR(-EIO);
+}
+
+/**
+ * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * @state: NFP FW info state
+ *
+ * Return: 0, or -ERRNO
+ */
+void nfp_nffw_info_close(struct nfp_nffw_info *state)
+{
+ nfp_resource_release(state->res);
+ kfree(state);
+}
+
+/**
+ * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW
+ * @state: NFP FW info state
+ *
+ * Return: First NFFW firmware info, NULL on failure
+ */
+static struct nffw_fwinfo *nfp_nffw_info_fwid_first(struct nfp_nffw_info *state)
+{
+ struct nffw_fwinfo *fwinfo;
+ unsigned int cnt, i;
+
+ cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo);
+ if (!cnt)
+ return NULL;
+
+ for (i = 0; i < cnt; i++)
+ if (nffw_fwinfo_loaded_get(&fwinfo[i]))
+ return &fwinfo[i];
+
+ return NULL;
+}
+
+/**
+ * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP
+ * @state: NFP FW info state
+ * @cpp_id: Pointer to the CPP ID of the MIP
+ * @off: Pointer to the CPP Address of the MIP
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off)
+{
+ struct nffw_fwinfo *fwinfo;
+
+ fwinfo = nfp_nffw_info_fwid_first(state);
+ if (!fwinfo)
+ return -EINVAL;
+
+ *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo);
+ *off = nffw_fwinfo_mip_offset_get(fwinfo);
+
+ if (nffw_fwinfo_mip_mu_da_get(fwinfo)) {
+ int locality_off;
+
+ if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU)
+ return 0;
+
+ locality_off = nfp_mip_mu_locality_lsb(state->cpp);
+ if (locality_off < 0)
+ return locality_off;
+
+ *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
+ *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
new file mode 100644
index 000000000000..988badd230d1
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_nffw.h
+ * Authors: Jason McMullan <jason.mcmullan@netronome.com>
+ * Francois H. Theron <francois.theron@netronome.com>
+ */
+
+#ifndef NFP_NFFW_H
+#define NFP_NFFW_H
+
+/* Implemented in nfp_nffw.c */
+
+struct nfp_nffw_info;
+
+struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp);
+void nfp_nffw_info_close(struct nfp_nffw_info *state);
+int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off);
+
+/* Implemented in nfp_mip.c */
+
+struct nfp_mip;
+
+const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
+void nfp_mip_close(const struct nfp_mip *mip);
+
+void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
+void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
+
+/* Implemented in nfp_rtsym.c */
+
+#define NFP_RTSYM_TYPE_NONE 0
+#define NFP_RTSYM_TYPE_OBJECT 1
+#define NFP_RTSYM_TYPE_FUNCTION 2
+#define NFP_RTSYM_TYPE_ABS 3
+
+#define NFP_RTSYM_TARGET_NONE 0
+#define NFP_RTSYM_TARGET_LMEM -1
+#define NFP_RTSYM_TARGET_EMU_CACHE -7
+
+/**
+ * struct nfp_rtsym - RTSYM descriptor
+ * @name: Symbol name
+ * @addr: Address in the domain/target's address space
+ * @size: Size (in bytes) of the symbol
+ * @type: NFP_RTSYM_TYPE_* of the symbol
+ * @target: CPP Target identifier, or NFP_RTSYM_TARGET_*
+ * @domain: CPP Target Domain (island)
+ */
+struct nfp_rtsym {
+ const char *name;
+ u64 addr;
+ u64 size;
+ int type;
+ int target;
+ int domain;
+};
+
+int nfp_rtsym_count(struct nfp_cpp *cpp);
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx);
+const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name);
+u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error);
+
+#endif /* NFP_NFFW_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
new file mode 100644
index 000000000000..34c50987c377
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_nsp.c
+ * Author: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#define NFP_SUBSYS "nfp_nsp"
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+
+/* Offsets relative to the CSR base */
+#define NSP_STATUS 0x00
+#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
+#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44)
+#define NSP_STATUS_MINOR GENMASK_ULL(43, 32)
+#define NSP_STATUS_CODE GENMASK_ULL(31, 16)
+#define NSP_STATUS_RESULT GENMASK_ULL(15, 8)
+#define NSP_STATUS_BUSY BIT_ULL(0)
+
+#define NSP_COMMAND 0x08
+#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32)
+#define NSP_COMMAND_CODE GENMASK_ULL(31, 16)
+#define NSP_COMMAND_START BIT_ULL(0)
+
+/* CPP address to retrieve the data from */
+#define NSP_BUFFER 0x10
+#define NSP_BUFFER_CPP GENMASK_ULL(63, 40)
+#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38)
+#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0)
+
+#define NSP_DFLT_BUFFER 0x18
+
+#define NSP_DFLT_BUFFER_CONFIG 0x20
+#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0)
+
+#define NSP_MAGIC 0xab10
+#define NSP_MAJOR 0
+#define NSP_MINOR (__MAX_SPCODE - 1)
+
+#define NSP_CODE_MAJOR GENMASK(15, 12)
+#define NSP_CODE_MINOR GENMASK(11, 0)
+
+enum nfp_nsp_cmd {
+ SPCODE_NOOP = 0, /* No operation */
+ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
+ SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */
+ SPCODE_PHY_INIT = 3, /* Initialize the PHY */
+ SPCODE_MAC_INIT = 4, /* Initialize the MAC */
+ SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */
+ SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
+ SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
+ SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+
+ __MAX_SPCODE,
+};
+
+struct nfp_nsp {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+ struct {
+ u16 major;
+ u16 minor;
+ } ver;
+};
+
+static int nfp_nsp_check(struct nfp_nsp *state)
+{
+ struct nfp_cpp *cpp = state->cpp;
+ u64 nsp_status, reg;
+ u32 nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_status = nfp_resource_address(state->res) + NSP_STATUS;
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &reg);
+ if (err < 0)
+ return err;
+
+ if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) {
+ nfp_err(cpp, "Cannot detect NFP Service Processor\n");
+ return -ENODEV;
+ }
+
+ state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg);
+ state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
+
+ if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) {
+ nfp_err(cpp, "Unsupported ABI %hu.%hu\n",
+ state->ver.major, state->ver.minor);
+ return -EINVAL;
+ }
+
+ if (reg & NSP_STATUS_BUSY) {
+ nfp_err(cpp, "Service processor busy!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * nfp_nsp_open() - Prepare for communication and lock the NSP resource.
+ * @cpp: NFP CPP Handle
+ */
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp)
+{
+ struct nfp_resource *res;
+ struct nfp_nsp *state;
+ int err;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
+ if (IS_ERR(res))
+ return (void *)res;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state) {
+ nfp_resource_release(res);
+ return ERR_PTR(-ENOMEM);
+ }
+ state->cpp = cpp;
+ state->res = res;
+
+ err = nfp_nsp_check(state);
+ if (err) {
+ nfp_nsp_close(state);
+ return ERR_PTR(err);
+ }
+
+ return state;
+}
+
+/**
+ * nfp_nsp_close() - Clean up and unlock the NSP resource.
+ * @state: NFP SP state
+ */
+void nfp_nsp_close(struct nfp_nsp *state)
+{
+ nfp_resource_release(state->res);
+ kfree(state);
+}
+
+u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state)
+{
+ return state->ver.major;
+}
+
+u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
+{
+ return state->ver.minor;
+}
+
+static int
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
+ u32 nsp_cpp, u64 addr, u64 mask, u64 val)
+{
+ const unsigned long wait_until = jiffies + 30 * HZ;
+ int err;
+
+ for (;;) {
+ const unsigned long start_time = jiffies;
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg);
+ if (err < 0)
+ return err;
+
+ if ((*reg & mask) == val)
+ return 0;
+
+ err = msleep_interruptible(100);
+ if (err)
+ return err;
+
+ if (time_after(start_time, wait_until))
+ return -ETIMEDOUT;
+ }
+}
+
+/**
+ * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * @state: NFP SP state
+ * @code: NFP SP Command Code
+ * @option: NFP SP Command Argument
+ * @buff_cpp: NFP SP Buffer CPP Address info
+ * @buff_addr: NFP SP Buffer Host address
+ *
+ * Return: 0 for success with no result
+ *
+ * 1..255 for NSP completion with a result code
+ *
+ * -EAGAIN if the NSP is not yet present
+ * -ENODEV if the NSP is not a supported model
+ * -EBUSY if the NSP is stuck
+ * -EINTR if interrupted while waiting for completion
+ * -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ */
+static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
+ u32 buff_cpp, u64 buff_addr)
+{
+ u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command;
+ struct nfp_cpp *cpp = state->cpp;
+ u32 nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_base = nfp_resource_address(state->res);
+ nsp_status = nsp_base + NSP_STATUS;
+ nsp_command = nsp_base + NSP_COMMAND;
+ nsp_buffer = nsp_base + NSP_BUFFER;
+
+ err = nfp_nsp_check(state);
+ if (err)
+ return err;
+
+ if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) ||
+ !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) {
+ nfp_err(cpp, "Host buffer out of reach %08x %016llx\n",
+ buff_cpp, buff_addr);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
+ FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) |
+ FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr));
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
+ FIELD_PREP(NSP_COMMAND_OPTION, option) |
+ FIELD_PREP(NSP_COMMAND_CODE, code) |
+ FIELD_PREP(NSP_COMMAND_START, 1));
+ if (err < 0)
+ return err;
+
+ /* Wait for NSP_COMMAND_START to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg,
+ nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
+ if (err) {
+ nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
+ err, code);
+ return err;
+ }
+
+ /* Wait for NSP_STATUS_BUSY to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg,
+ nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
+ if (err) {
+ nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
+ err, code);
+ return err;
+ }
+
+ err = FIELD_GET(NSP_STATUS_RESULT, reg);
+ if (err) {
+ nfp_warn(cpp, "Result (error) code set: %d command: %d\n",
+ -err, code);
+ return -err;
+ }
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &reg);
+ if (err < 0)
+ return err;
+
+ return FIELD_GET(NSP_COMMAND_OPTION, reg);
+}
+
+static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ const void *in_buf, unsigned int in_size,
+ void *out_buf, unsigned int out_size)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ unsigned int max_size;
+ u64 reg, cpp_buf;
+ int ret, err;
+ u32 cpp_id;
+
+ if (nsp->ver.minor < 13) {
+ nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n",
+ code, nsp->ver.major, nsp->ver.minor);
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER_CONFIG,
+ &reg);
+ if (err < 0)
+ return err;
+
+ max_size = max(in_size, out_size);
+ if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
+ nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n",
+ code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
+ max_size);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER,
+ &reg);
+ if (err < 0)
+ return err;
+
+ cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
+ cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
+
+ if (in_buf && in_size) {
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+ if (err < 0)
+ return err;
+ }
+
+ ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+ if (ret < 0)
+ return ret;
+
+ if (out_buf && out_size) {
+ err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size);
+ if (err < 0)
+ return err;
+ }
+
+ return ret;
+}
+
+int nfp_nsp_wait(struct nfp_nsp *state)
+{
+ const unsigned long wait_until = jiffies + 30 * HZ;
+ int err;
+
+ nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
+
+ for (;;) {
+ const unsigned long start_time = jiffies;
+
+ err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0);
+ if (err != -EAGAIN)
+ break;
+
+ err = msleep_interruptible(100);
+ if (err)
+ break;
+
+ if (time_after(start_time, wait_until)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (err)
+ nfp_err(state->cpp, "NSP failed to respond %d\n", err);
+
+ return err;
+}
+
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
+{
+ int err;
+
+ err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
+
+ nfp_nffw_cache_flush(state->cpp);
+
+ return err;
+}
+
+int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
+{
+ return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data,
+ fw->size, NULL, 0);
+}
+
+int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
+ buf, size);
+}
+
+int nfp_nsp_write_eth_table(struct nfp_nsp *state,
+ const void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
+ NULL, 0);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
new file mode 100644
index 000000000000..1ece1f8ae4b3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Authors: David Brunecz <david.brunecz@netronome.com>
+ * Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason Mcmullan <jason.mcmullan@netronome.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "nfp.h"
+#include "nfp_nsp_eth.h"
+#include "nfp6000/nfp6000.h"
+
+#define NSP_ETH_NBI_PORT_COUNT 24
+#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
+#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \
+ sizeof(struct eth_table_entry))
+
+#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0)
+#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
+#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48)
+#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
+
+#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
+
+#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
+#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8)
+
+#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
+#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
+
+enum nfp_eth_rate {
+ RATE_INVALID = 0,
+ RATE_10M,
+ RATE_100M,
+ RATE_1G,
+ RATE_10G,
+ RATE_25G,
+};
+
+struct eth_table_entry {
+ __le64 port;
+ __le64 state;
+ u8 mac_addr[6];
+ u8 resv[2];
+ __le64 control;
+};
+
+static unsigned int nfp_eth_rate(enum nfp_eth_rate rate)
+{
+ unsigned int rate_xlate[] = {
+ [RATE_INVALID] = 0,
+ [RATE_10M] = SPEED_10,
+ [RATE_100M] = SPEED_100,
+ [RATE_1G] = SPEED_1000,
+ [RATE_10G] = SPEED_10000,
+ [RATE_25G] = SPEED_25000,
+ };
+
+ if (rate >= ARRAY_SIZE(rate_xlate))
+ return 0;
+
+ return rate_xlate[rate];
+}
+
+static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[ETH_ALEN - i - 1] = src[i];
+}
+
+static void
+nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
+ struct nfp_eth_table_port *dst)
+{
+ unsigned int rate;
+ u64 port, state;
+
+ port = le64_to_cpu(src->port);
+ state = le64_to_cpu(src->state);
+
+ dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port);
+ dst->index = index;
+ dst->nbi = index / NSP_ETH_NBI_PORT_COUNT;
+ dst->base = index % NSP_ETH_NBI_PORT_COUNT;
+ dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port);
+
+ dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state);
+ dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
+ dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
+
+ rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state));
+ dst->speed = dst->lanes * rate;
+
+ nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
+
+ snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu",
+ FIELD_GET(NSP_ETH_PORT_PHYLABEL, port),
+ FIELD_GET(NSP_ETH_PORT_LABEL, port));
+}
+
+/**
+ * nfp_eth_read_ports() - retrieve port information
+ * @cpp: NFP CPP handle
+ *
+ * Read the port information from the device. Returned structure should
+ * be freed with kfree() once no longer needed.
+ *
+ * Return: populated ETH table or NULL on error.
+ */
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp)
+{
+ struct nfp_eth_table *ret;
+ struct nfp_nsp *nsp;
+
+ nsp = nfp_nsp_open(cpp);
+ if (IS_ERR(nsp))
+ return NULL;
+
+ ret = __nfp_eth_read_ports(cpp, nsp);
+ nfp_nsp_close(nsp);
+
+ return ret;
+}
+
+struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
+{
+ struct eth_table_entry *entries;
+ struct nfp_eth_table *table;
+ unsigned int cnt;
+ int i, j, ret;
+
+ entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
+ if (!entries)
+ return NULL;
+
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ nfp_err(cpp, "reading port table failed %d\n", ret);
+ kfree(entries);
+ return NULL;
+ }
+
+ /* Some versions of flash will give us 0 instead of port count */
+ cnt = ret;
+ if (!cnt) {
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ cnt++;
+ }
+
+ table = kzalloc(sizeof(*table) +
+ sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
+ if (!table) {
+ kfree(entries);
+ return NULL;
+ }
+
+ table->count = cnt;
+ for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ nfp_eth_port_translate(&entries[i], i,
+ &table->ports[j++]);
+
+ kfree(entries);
+
+ return table;
+}
+
+/**
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @enable: Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+{
+ struct eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+ int ret;
+
+ entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ nsp = nfp_nsp_open(cpp);
+ if (IS_ERR(nsp)) {
+ kfree(entries);
+ return PTR_ERR(nsp);
+ }
+
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ nfp_err(cpp, "reading port table failed %d\n", ret);
+ goto exit_close_nsp;
+ }
+
+ if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
+ nfp_warn(cpp, "trying to set port state on disabled port %d\n",
+ idx);
+ ret = -EINVAL;
+ goto exit_close_nsp;
+ }
+
+ /* Check if we are already in requested state */
+ reg = le64_to_cpu(entries[idx].state);
+ if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+ ret = 0;
+ goto exit_close_nsp;
+ }
+
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_ENABLED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+ entries[idx].control = cpu_to_le64(reg);
+
+ ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ kfree(entries);
+
+ return ret < 0 ? ret : 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
new file mode 100644
index 000000000000..edf703d319c8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NSP_NSP_ETH_H
+#define NSP_NSP_ETH_H 1
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count: number of table entries
+ * @ports: table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index: chip-wide first channel index
+ * @nbi: NBI index
+ * @base: first channel index (within NBI)
+ * @lanes: number of channels
+ * @speed: interface speed (in Mbps)
+ * @mac_addr: interface MAC address
+ * @label: interface id string
+ * @enabled: is enabled?
+ * @tx_enabled: is TX enabled?
+ * @rx_enabled: is RX enabled?
+ */
+struct nfp_eth_table {
+ unsigned int count;
+ struct nfp_eth_table_port {
+ unsigned int eth_index;
+ unsigned int index;
+ unsigned int nbi;
+ unsigned int base;
+ unsigned int lanes;
+ unsigned int speed;
+
+ u8 mac_addr[ETH_ALEN];
+ char label[8];
+
+ bool enabled;
+ bool tx_enabled;
+ bool rx_enabled;
+ } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
new file mode 100644
index 000000000000..a2850344f8b4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_resource.c
+ * Author: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "crc32.h"
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define NFP_RESOURCE_ENTRY_NAME_SZ 8
+
+/**
+ * struct nfp_resource_entry - Resource table entry
+ * @owner: NFP CPP Lock, interface owner
+ * @key: NFP CPP Lock, posix_crc32(name, 8)
+ * @region: Memory region descriptor
+ * @name: ASCII, zero padded name
+ * @reserved
+ * @cpp_action: CPP Action
+ * @cpp_token: CPP Token
+ * @cpp_target: CPP Target ID
+ * @page_offset: 256-byte page offset into target's CPP address
+ * @page_size: size, in 256-byte pages
+ */
+struct nfp_resource_entry {
+ struct nfp_resource_entry_mutex {
+ u32 owner;
+ u32 key;
+ } mutex;
+ struct nfp_resource_entry_region {
+ u8 name[NFP_RESOURCE_ENTRY_NAME_SZ];
+ u8 reserved[5];
+ u8 cpp_action;
+ u8 cpp_token;
+ u8 cpp_target;
+ u32 page_offset;
+ u32 page_size;
+ } region;
+};
+
+#define NFP_RESOURCE_TBL_SIZE 4096
+#define NFP_RESOURCE_TBL_ENTRIES (NFP_RESOURCE_TBL_SIZE / \
+ sizeof(struct nfp_resource_entry))
+
+struct nfp_resource {
+ char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1];
+ u32 cpp_id;
+ u64 addr;
+ u64 size;
+ struct nfp_cpp_mutex *mutex;
+};
+
+static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
+{
+ char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {};
+ struct nfp_resource_entry entry;
+ u32 cpp_id, key;
+ int ret, i;
+
+ cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */
+
+ strncpy(name_pad, res->name, sizeof(name_pad));
+
+ /* Search for a matching entry */
+ key = NFP_RESOURCE_TBL_KEY;
+ if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
+ key = crc32_posix(name_pad, sizeof(name_pad));
+
+ for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+ u64 addr = NFP_RESOURCE_TBL_BASE +
+ sizeof(struct nfp_resource_entry) * i;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry));
+ if (ret != sizeof(entry))
+ return -EIO;
+
+ if (entry.mutex.key != key)
+ continue;
+
+ /* Found key! */
+ res->mutex =
+ nfp_cpp_mutex_alloc(cpp,
+ NFP_RESOURCE_TBL_TARGET, addr, key);
+ res->cpp_id = NFP_CPP_ID(entry.region.cpp_target,
+ entry.region.cpp_action,
+ entry.region.cpp_token);
+ res->addr = (u64)entry.region.page_offset << 8;
+ res->size = (u64)entry.region.page_size << 8;
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res,
+ struct nfp_cpp_mutex *dev_mutex)
+{
+ int err;
+
+ if (nfp_cpp_mutex_lock(dev_mutex))
+ return -EINVAL;
+
+ err = nfp_cpp_resource_find(cpp, res);
+ if (err)
+ goto err_unlock_dev;
+
+ err = nfp_cpp_mutex_trylock(res->mutex);
+ if (err)
+ goto err_res_mutex_free;
+
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return 0;
+
+err_res_mutex_free:
+ nfp_cpp_mutex_free(res->mutex);
+err_unlock_dev:
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return err;
+}
+
+/**
+ * nfp_resource_acquire() - Acquire a resource handle
+ * @cpp: NFP CPP handle
+ * @name: Name of the resource
+ *
+ * NOTE: This function locks the acquired resource
+ *
+ * Return: NFP Resource handle, or ERR_PTR()
+ */
+struct nfp_resource *
+nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
+{
+ unsigned long warn_at = jiffies + 15 * HZ;
+ struct nfp_cpp_mutex *dev_mutex;
+ struct nfp_resource *res;
+ int err;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+ dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE,
+ NFP_RESOURCE_TBL_KEY);
+ if (!dev_mutex) {
+ kfree(res);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (;;) {
+ err = nfp_resource_try_acquire(cpp, res, dev_mutex);
+ if (!err)
+ break;
+ if (err != -EBUSY)
+ goto err_free;
+
+ err = msleep_interruptible(1);
+ if (err != 0) {
+ err = -ERESTARTSYS;
+ goto err_free;
+ }
+
+ if (time_is_before_eq_jiffies(warn_at)) {
+ warn_at = jiffies + 60 * HZ;
+ nfp_warn(cpp, "Warning: waiting for NFP resource %s\n",
+ name);
+ }
+ }
+
+ nfp_cpp_mutex_free(dev_mutex);
+
+ return res;
+
+err_free:
+ nfp_cpp_mutex_free(dev_mutex);
+ kfree(res);
+ return ERR_PTR(err);
+}
+
+/**
+ * nfp_resource_release() - Release a NFP Resource handle
+ * @res: NFP Resource handle
+ *
+ * NOTE: This function implictly unlocks the resource handle
+ */
+void nfp_resource_release(struct nfp_resource *res)
+{
+ nfp_cpp_mutex_unlock(res->mutex);
+ nfp_cpp_mutex_free(res->mutex);
+ kfree(res);
+}
+
+/**
+ * nfp_resource_cpp_id() - Return the cpp_id of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: NFP CPP ID
+ */
+u32 nfp_resource_cpp_id(struct nfp_resource *res)
+{
+ return res->cpp_id;
+}
+
+/**
+ * nfp_resource_name() - Return the name of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: const char pointer to the name of the resource
+ */
+const char *nfp_resource_name(struct nfp_resource *res)
+{
+ return res->name;
+}
+
+/**
+ * nfp_resource_address() - Return the address of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Address of the resource
+ */
+u64 nfp_resource_address(struct nfp_resource *res)
+{
+ return res->addr;
+}
+
+/**
+ * nfp_resource_size() - Return the size in bytes of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Size of the resource in bytes
+ */
+u64 nfp_resource_size(struct nfp_resource *res)
+{
+ return res->size;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
new file mode 100644
index 000000000000..0e3870ecfb8c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_rtsym.c
+ * Interface for accessing run-time symbol table
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Espen Skoglund <espen.skoglund@netronome.com>
+ * Francois H. Theron <francois.theron@netronome.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+#include "nfp6000/nfp6000.h"
+
+/* These need to match the linker */
+#define SYM_TGT_LMEM 0
+#define SYM_TGT_EMU_CACHE 0x17
+
+struct nfp_rtsym_entry {
+ u8 type;
+ u8 target;
+ u8 island;
+ u8 addr_hi;
+ __le32 addr_lo;
+ __le16 name;
+ u8 menum;
+ u8 size_hi;
+ __le32 size_lo;
+};
+
+struct nfp_rtsym_cache {
+ int num;
+ char *strtab;
+ struct nfp_rtsym symtab[];
+};
+
+static int nfp_meid(u8 island_id, u8 menum)
+{
+ return (island_id & 0x3F) == island_id && menum < 12 ?
+ (island_id << 4) | (menum + 4) : -1;
+}
+
+static void
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
+ struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
+{
+ sw->type = fw->type;
+ sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size;
+ sw->addr = ((u64)fw->addr_hi << 32) | le32_to_cpu(fw->addr_lo);
+ sw->size = ((u64)fw->size_hi << 32) | le32_to_cpu(fw->size_lo);
+
+ switch (fw->target) {
+ case SYM_TGT_LMEM:
+ sw->target = NFP_RTSYM_TARGET_LMEM;
+ break;
+ case SYM_TGT_EMU_CACHE:
+ sw->target = NFP_RTSYM_TARGET_EMU_CACHE;
+ break;
+ default:
+ sw->target = fw->target;
+ break;
+ }
+
+ if (fw->menum != 0xff)
+ sw->domain = nfp_meid(fw->island, fw->menum);
+ else if (fw->island != 0xff)
+ sw->domain = fw->island;
+ else
+ sw->domain = -1;
+}
+
+static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
+{
+ const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
+ NFP_ISL_EMEM0;
+ u32 strtab_addr, symtab_addr, strtab_size, symtab_size;
+ struct nfp_rtsym_entry *rtsymtab;
+ struct nfp_rtsym_cache *cache;
+ const struct nfp_mip *mip;
+ int err, n, size;
+
+ mip = nfp_mip_open(cpp);
+ if (!mip)
+ return -EIO;
+
+ nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
+ nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
+ nfp_mip_close(mip);
+
+ if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
+ return -ENXIO;
+
+ /* Align to 64 bits */
+ symtab_size = round_up(symtab_size, 8);
+ strtab_size = round_up(strtab_size, 8);
+
+ rtsymtab = kmalloc(symtab_size, GFP_KERNEL);
+ if (!rtsymtab)
+ return -ENOMEM;
+
+ size = sizeof(*cache);
+ size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
+ size += strtab_size + 1;
+ cache = kmalloc(size, GFP_KERNEL);
+ if (!cache) {
+ err = -ENOMEM;
+ goto err_free_rtsym_raw;
+ }
+
+ cache->num = symtab_size / sizeof(*rtsymtab);
+ cache->strtab = (void *)&cache->symtab[cache->num];
+
+ err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
+ if (err != symtab_size)
+ goto err_free_cache;
+
+ err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
+ if (err != strtab_size)
+ goto err_free_cache;
+ cache->strtab[strtab_size] = '\0';
+
+ for (n = 0; n < cache->num; n++)
+ nfp_rtsym_sw_entry_init(cache, strtab_size,
+ &cache->symtab[n], &rtsymtab[n]);
+
+ kfree(rtsymtab);
+ nfp_rtsym_cache_set(cpp, cache);
+ return 0;
+
+err_free_cache:
+ kfree(cache);
+err_free_rtsym_raw:
+ kfree(rtsymtab);
+ return err;
+}
+
+static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp)
+{
+ struct nfp_rtsym_cache *cache;
+ int err;
+
+ cache = nfp_rtsym_cache(cpp);
+ if (cache)
+ return cache;
+
+ err = nfp_rtsymtab_probe(cpp);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return nfp_rtsym_cache(cpp);
+}
+
+/**
+ * nfp_rtsym_count() - Get the number of RTSYM descriptors
+ * @cpp: NFP CPP handle
+ *
+ * Return: Number of RTSYM descriptors, or -ERRNO
+ */
+int nfp_rtsym_count(struct nfp_cpp *cpp)
+{
+ struct nfp_rtsym_cache *cache;
+
+ cache = nfp_rtsym(cpp);
+ if (IS_ERR(cache))
+ return PTR_ERR(cache);
+
+ return cache->num;
+}
+
+/**
+ * nfp_rtsym_get() - Get the Nth RTSYM descriptor
+ * @cpp: NFP CPP handle
+ * @idx: Index (0-based) of the RTSYM descriptor
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx)
+{
+ struct nfp_rtsym_cache *cache;
+
+ cache = nfp_rtsym(cpp);
+ if (IS_ERR(cache))
+ return NULL;
+
+ if (idx >= cache->num)
+ return NULL;
+
+ return &cache->symtab[idx];
+}
+
+/**
+ * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
+ * @cpp: NFP CPP handle
+ * @name: Symbol name
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
+{
+ struct nfp_rtsym_cache *cache;
+ int n;
+
+ cache = nfp_rtsym(cpp);
+ if (IS_ERR(cache))
+ return NULL;
+
+ for (n = 0; n < cache->num; n++) {
+ if (strcmp(name, cache->symtab[n].name) == 0)
+ return &cache->symtab[n];
+ }
+
+ return NULL;
+}
+
+/**
+ * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
+ * @cpp: NFP CPP handle
+ * @name: Symbol name
+ * @error: Poniter to error code (optional)
+ *
+ * Lookup a symbol, map, read it and return it's value. Value of the symbol
+ * will be interpreted as a simple little-endian unsigned value. Symbol can
+ * be 4 or 8 bytes in size.
+ *
+ * Return: value read, on error sets the error and returns ~0ULL.
+ */
+u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
+{
+ const struct nfp_rtsym *sym;
+ u32 val32, id;
+ u64 val;
+ int err;
+
+ sym = nfp_rtsym_lookup(cpp, name);
+ if (!sym) {
+ err = -ENOENT;
+ goto exit;
+ }
+
+ id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
+
+ switch (sym->size) {
+ case 4:
+ err = nfp_cpp_readl(cpp, id, sym->addr, &val32);
+ val = val32;
+ break;
+ case 8:
+ err = nfp_cpp_readq(cpp, id, sym->addr, &val);
+ break;
+ default:
+ nfp_err(cpp,
+ "rtsym '%s' unsupported or non-scalar size: %lld\n",
+ name, sym->size);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err == sym->size)
+ err = 0;
+ else if (err >= 0)
+ err = -EIO;
+exit:
+ if (error)
+ *error = err;
+
+ if (err)
+ return ~0ULL;
+ return val;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
new file mode 100644
index 000000000000..4ea1e585d945
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
@@ -0,0 +1,764 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_target.c
+ * CPP Access Width Decoder
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ * Jason McMullan <jason.mcmullan@netronome.com>
+ * Francois H. Theron <francois.theron@netronome.com>
+ */
+
+#include <linux/bitops.h>
+
+#include "nfp_cpp.h"
+
+#include "nfp6000/nfp6000.h"
+
+#define P32 1
+#define P64 2
+
+/* This structure ONLY includes items that can be done with a read or write of
+ * 32-bit or 64-bit words. All others are not listed.
+ */
+
+#define AT(_action, _token, _pull, _push) \
+ case NFP_CPP_ID(0, (_action), (_token)): \
+ return PUSHPULL((_pull), (_push))
+
+static int target_rw(u32 cpp_id, int pp, int start, int len)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 0, 0, pp);
+ AT(1, 0, pp, 0);
+ AT(NFP_CPP_ACTION_RW, 0, pp, pp);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp6000_nbi_dma(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 0, 0, P64); /* ReadNbiDma */
+ AT(1, 0, P64, 0); /* WriteNbiDma */
+ AT(NFP_CPP_ACTION_RW, 0, P64, P64);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp6000_nbi_stats(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 0, 0, P32); /* ReadNbiStats */
+ AT(1, 0, P32, 0); /* WriteNbiStats */
+ AT(NFP_CPP_ACTION_RW, 0, P32, P32);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp6000_nbi_tm(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 0, 0, P64); /* ReadNbiTM */
+ AT(1, 0, P64, 0); /* WriteNbiTM */
+ AT(NFP_CPP_ACTION_RW, 0, P64, P64);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp6000_nbi_ppc(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 0, 0, P64); /* ReadNbiPreclassifier */
+ AT(1, 0, P64, 0); /* WriteNbiPreclassifier */
+ AT(NFP_CPP_ACTION_RW, 0, P64, P64);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp6000_nbi(u32 cpp_id, u64 address)
+{
+ u64 rel_addr = address & 0x3fFFFF;
+
+ if (rel_addr < (1 << 20))
+ return nfp6000_nbi_dma(cpp_id);
+ if (rel_addr < (2 << 20))
+ return nfp6000_nbi_stats(cpp_id);
+ if (rel_addr < (3 << 20))
+ return nfp6000_nbi_tm(cpp_id);
+ return nfp6000_nbi_ppc(cpp_id);
+}
+
+/* This structure ONLY includes items that can be done with a read or write of
+ * 32-bit or 64-bit words. All others are not listed.
+ */
+static int nfp6000_mu_common(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(NFP_CPP_ACTION_RW, 0, P64, P64); /* read_be/write_be */
+ AT(NFP_CPP_ACTION_RW, 1, P64, P64); /* read_le/write_le */
+ AT(NFP_CPP_ACTION_RW, 2, P64, P64); /* read_swap_be/write_swap_be */
+ AT(NFP_CPP_ACTION_RW, 3, P64, P64); /* read_swap_le/write_swap_le */
+ AT(0, 0, 0, P64); /* read_be */
+ AT(0, 1, 0, P64); /* read_le */
+ AT(0, 2, 0, P64); /* read_swap_be */
+ AT(0, 3, 0, P64); /* read_swap_le */
+ AT(1, 0, P64, 0); /* write_be */
+ AT(1, 1, P64, 0); /* write_le */
+ AT(1, 2, P64, 0); /* write_swap_be */
+ AT(1, 3, P64, 0); /* write_swap_le */
+ AT(3, 0, 0, P32); /* atomic_read */
+ AT(3, 2, P32, 0); /* mask_compare_write */
+ AT(4, 0, P32, 0); /* atomic_write */
+ AT(4, 2, 0, 0); /* atomic_write_imm */
+ AT(4, 3, 0, P32); /* swap_imm */
+ AT(5, 0, P32, 0); /* set */
+ AT(5, 3, 0, P32); /* test_set_imm */
+ AT(6, 0, P32, 0); /* clr */
+ AT(6, 3, 0, P32); /* test_clr_imm */
+ AT(7, 0, P32, 0); /* add */
+ AT(7, 3, 0, P32); /* test_add_imm */
+ AT(8, 0, P32, 0); /* addsat */
+ AT(8, 3, 0, P32); /* test_subsat_imm */
+ AT(9, 0, P32, 0); /* sub */
+ AT(9, 3, 0, P32); /* test_sub_imm */
+ AT(10, 0, P32, 0); /* subsat */
+ AT(10, 3, 0, P32); /* test_subsat_imm */
+ AT(13, 0, 0, P32); /* microq128_get */
+ AT(13, 1, 0, P32); /* microq128_pop */
+ AT(13, 2, P32, 0); /* microq128_put */
+ AT(15, 0, P32, 0); /* xor */
+ AT(15, 3, 0, P32); /* test_xor_imm */
+ AT(28, 0, 0, P32); /* read32_be */
+ AT(28, 1, 0, P32); /* read32_le */
+ AT(28, 2, 0, P32); /* read32_swap_be */
+ AT(28, 3, 0, P32); /* read32_swap_le */
+ AT(31, 0, P32, 0); /* write32_be */
+ AT(31, 1, P32, 0); /* write32_le */
+ AT(31, 2, P32, 0); /* write32_swap_be */
+ AT(31, 3, P32, 0); /* write32_swap_le */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp6000_mu_ctm(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(16, 1, 0, P32); /* packet_read_packet_status */
+ AT(17, 1, 0, P32); /* packet_credit_get */
+ AT(17, 3, 0, P64); /* packet_add_thread */
+ AT(18, 2, 0, P64); /* packet_free_and_return_pointer */
+ AT(18, 3, 0, P64); /* packet_return_pointer */
+ AT(21, 0, 0, P64); /* pe_dma_to_memory_indirect */
+ AT(21, 1, 0, P64); /* pe_dma_to_memory_indirect_swap */
+ AT(21, 2, 0, P64); /* pe_dma_to_memory_indirect_free */
+ AT(21, 3, 0, P64); /* pe_dma_to_memory_indirect_free_swap */
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static int nfp6000_mu_emu(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(18, 0, 0, P32); /* read_queue */
+ AT(18, 1, 0, P32); /* read_queue_ring */
+ AT(18, 2, P32, 0); /* write_queue */
+ AT(18, 3, P32, 0); /* write_queue_ring */
+ AT(20, 2, P32, 0); /* journal */
+ AT(21, 0, 0, P32); /* get */
+ AT(21, 1, 0, P32); /* get_eop */
+ AT(21, 2, 0, P32); /* get_freely */
+ AT(22, 0, 0, P32); /* pop */
+ AT(22, 1, 0, P32); /* pop_eop */
+ AT(22, 2, 0, P32); /* pop_freely */
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static int nfp6000_mu_imu(u32 cpp_id)
+{
+ return nfp6000_mu_common(cpp_id);
+}
+
+static int nfp6000_mu(u32 cpp_id, u64 address)
+{
+ int pp;
+
+ if (address < 0x2000000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x8000000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0x9800000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x9C00000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0xA000000000ULL)
+ pp = nfp6000_mu_imu(cpp_id);
+ else
+ pp = nfp6000_mu_ctm(cpp_id);
+
+ return pp;
+}
+
+static int nfp6000_ila(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 1, 0, P32); /* read_check_error */
+ AT(2, 0, 0, P32); /* read_int */
+ AT(3, 0, P32, 0); /* write_int */
+ default:
+ return target_rw(cpp_id, P32, 48, 4);
+ }
+}
+
+static int nfp6000_pci(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(2, 0, 0, P32);
+ AT(3, 0, P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 4, 4);
+ }
+}
+
+static int nfp6000_crypto(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(2, 0, P64, 0);
+ default:
+ return target_rw(cpp_id, P64, 12, 4);
+ }
+}
+
+static int nfp6000_cap_xpb(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 1, 0, P32); /* RingGet */
+ AT(0, 2, P32, 0); /* Interthread Signal */
+ AT(1, 1, P32, 0); /* RingPut */
+ AT(1, 2, P32, 0); /* CTNNWr */
+ AT(2, 0, 0, P32); /* ReflectRd, signal none */
+ AT(2, 1, 0, P32); /* ReflectRd, signal self */
+ AT(2, 2, 0, P32); /* ReflectRd, signal remote */
+ AT(2, 3, 0, P32); /* ReflectRd, signal both */
+ AT(3, 0, P32, 0); /* ReflectWr, signal none */
+ AT(3, 1, P32, 0); /* ReflectWr, signal self */
+ AT(3, 2, P32, 0); /* ReflectWr, signal remote */
+ AT(3, 3, P32, 0); /* ReflectWr, signal both */
+ AT(NFP_CPP_ACTION_RW, 1, P32, P32);
+ default:
+ return target_rw(cpp_id, P32, 1, 63);
+ }
+}
+
+static int nfp6000_cls(u32 cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ AT(0, 3, P32, 0); /* xor */
+ AT(2, 0, P32, 0); /* set */
+ AT(2, 1, P32, 0); /* clr */
+ AT(4, 0, P32, 0); /* add */
+ AT(4, 1, P32, 0); /* add64 */
+ AT(6, 0, P32, 0); /* sub */
+ AT(6, 1, P32, 0); /* sub64 */
+ AT(6, 2, P32, 0); /* subsat */
+ AT(8, 2, P32, 0); /* hash_mask */
+ AT(8, 3, P32, 0); /* hash_clear */
+ AT(9, 0, 0, P32); /* ring_get */
+ AT(9, 1, 0, P32); /* ring_pop */
+ AT(9, 2, 0, P32); /* ring_get_freely */
+ AT(9, 3, 0, P32); /* ring_pop_freely */
+ AT(10, 0, P32, 0); /* ring_put */
+ AT(10, 2, P32, 0); /* ring_journal */
+ AT(14, 0, P32, 0); /* reflect_write_sig_local */
+ AT(15, 1, 0, P32); /* reflect_read_sig_local */
+ AT(17, 2, P32, 0); /* statisic */
+ AT(24, 0, 0, P32); /* ring_read */
+ AT(24, 1, P32, 0); /* ring_write */
+ AT(25, 0, 0, P32); /* ring_workq_add_thread */
+ AT(25, 1, P32, 0); /* ring_workq_add_work */
+ default:
+ return target_rw(cpp_id, P32, 0, 64);
+ }
+}
+
+int nfp_target_pushpull(u32 cpp_id, u64 address)
+{
+ switch (NFP_CPP_ID_TARGET_of(cpp_id)) {
+ case NFP_CPP_TARGET_NBI:
+ return nfp6000_nbi(cpp_id, address);
+ case NFP_CPP_TARGET_QDR:
+ return target_rw(cpp_id, P32, 24, 4);
+ case NFP_CPP_TARGET_ILA:
+ return nfp6000_ila(cpp_id);
+ case NFP_CPP_TARGET_MU:
+ return nfp6000_mu(cpp_id, address);
+ case NFP_CPP_TARGET_PCIE:
+ return nfp6000_pci(cpp_id);
+ case NFP_CPP_TARGET_ARM:
+ if (address < 0x10000)
+ return target_rw(cpp_id, P64, 1, 1);
+ else
+ return target_rw(cpp_id, P32, 1, 1);
+ case NFP_CPP_TARGET_CRYPTO:
+ return nfp6000_crypto(cpp_id);
+ case NFP_CPP_TARGET_CT_XPB:
+ return nfp6000_cap_xpb(cpp_id);
+ case NFP_CPP_TARGET_CLS:
+ return nfp6000_cls(cpp_id);
+ case 0:
+ return target_rw(cpp_id, P32, 4, 4);
+ default:
+ return -EINVAL;
+ }
+}
+
+#undef AT
+#undef P32
+#undef P64
+
+/* All magic NFP-6xxx IMB 'mode' numbers here are from:
+ * Databook (1 August 2013)
+ * - System Overview and Connectivity
+ * -- Internal Connectivity
+ * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus
+ * ---- CPP addressing
+ * ----- Table 3.6. CPP Address Translation Mode Commands
+ */
+
+#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2
+
+static int nfp_decode_basic(u64 addr, int *dest_island, int cpp_tgt,
+ int mode, bool addr40, int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb;
+
+ /* This function doesn't handle MU or CTXBP */
+ if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB)
+ return -EINVAL;
+
+ switch (mode) {
+ case 0:
+ /* For VQDR, in this mode for 32-bit addressing
+ * it would be islands 0, 16, 32 and 48 depending on channel
+ * and upper address bits.
+ * Since those are not all valid islands, most decode
+ * cases would result in bad island IDs, but we do them
+ * anyway since this is decoding an address that is already
+ * assumed to be used as-is to get to sram.
+ */
+ iid_lsb = addr40 ? 34 : 26;
+ *dest_island = (addr >> iid_lsb) & 0x3F;
+ return 0;
+ case 1:
+ /* For VQDR 32-bit, this would decode as:
+ * Channel 0: island#0
+ * Channel 1: island#0
+ * Channel 2: island#1
+ * Channel 3: island#1
+ * That would be valid as long as both islands
+ * have VQDR. Let's allow this.
+ */
+ idx_lsb = addr40 ? 39 : 31;
+ if (addr & BIT_ULL(idx_lsb))
+ *dest_island = isld1;
+ else
+ *dest_island = isld0;
+
+ return 0;
+ case 2:
+ /* For VQDR 32-bit:
+ * Channel 0: (island#0 | 0)
+ * Channel 1: (island#0 | 1)
+ * Channel 2: (island#1 | 0)
+ * Channel 3: (island#1 | 1)
+ *
+ * Make sure we compare against isldN values
+ * by clearing the LSB.
+ * This is what the silicon does.
+ */
+ isld0 &= ~1;
+ isld1 &= ~1;
+
+ idx_lsb = addr40 ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ if (addr & BIT_ULL(idx_lsb))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+ return 0;
+ case 3:
+ /* In this mode the data address starts to affect the island ID
+ * so rather not allow it. In some really specific case
+ * one could use this to send the upper half of the
+ * VQDR channel to another MU, but this is getting very
+ * specific.
+ * However, as above for mode 0, this is the decoder
+ * and the caller should validate the resulting IID.
+ * This blindly does what the silicon would do.
+ */
+ isld0 &= ~3;
+ isld1 &= ~3;
+
+ idx_lsb = addr40 ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ if (addr & BIT_ULL(idx_lsb))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp_encode_basic_qdr(u64 addr, int dest_island, int cpp_tgt,
+ int mode, bool addr40, int isld1, int isld0)
+{
+ int v, ret;
+
+ /* Full Island ID and channel bits overlap? */
+ ret = nfp_decode_basic(addr, &v, cpp_tgt, mode, addr40, isld1, isld0);
+ if (ret)
+ return ret;
+
+ /* The current address won't go where expected? */
+ if (dest_island != -1 && dest_island != v)
+ return -EINVAL;
+
+ /* If dest_island was -1, we don't care where it goes. */
+ return 0;
+}
+
+/* Try each option, take first one that fits.
+ * Not sure if we would want to do some smarter
+ * searching and prefer 0 or non-0 island IDs.
+ */
+static int nfp_encode_basic_search(u64 *addr, int dest_island, int *isld,
+ int iid_lsb, int idx_lsb, int v_max)
+{
+ int i, v;
+
+ for (i = 0; i < 2; i++)
+ for (v = 0; v < v_max; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+
+ *addr &= ~GENMASK_ULL(idx_lsb, iid_lsb);
+ *addr |= ((u64)i << idx_lsb);
+ *addr |= ((u64)v << iid_lsb);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/* For VQDR, we may not modify the Channel bits, which might overlap
+ * with the Index bit. When it does, we need to ensure that isld0 == isld1.
+ */
+static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt,
+ int mode, bool addr40, int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb;
+ int isld[2];
+ u64 v64;
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+
+ /* This function doesn't handle MU or CTXBP */
+ if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB)
+ return -EINVAL;
+
+ switch (mode) {
+ case 0:
+ if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+ /* In this specific mode we'd rather not modify
+ * the address but we can verify if the existing
+ * contents will point to a valid island.
+ */
+ return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ mode, addr40, isld1, isld0);
+
+ iid_lsb = addr40 ? 34 : 26;
+ /* <39:34> or <31:26> */
+ v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+ *addr &= ~v64;
+ *addr |= ((u64)dest_island << iid_lsb) & v64;
+ return 0;
+ case 1:
+ if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+ return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ mode, addr40, isld1, isld0);
+
+ idx_lsb = addr40 ? 39 : 31;
+ if (dest_island == isld0) {
+ /* Only need to clear the Index bit */
+ *addr &= ~BIT_ULL(idx_lsb);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ /* Only need to set the Index bit */
+ *addr |= BIT_ULL(idx_lsb);
+ return 0;
+ }
+
+ return -ENODEV;
+ case 2:
+ /* iid<0> = addr<30> = channel<0>
+ * channel<1> = addr<31> = Index
+ */
+ if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+ /* Special case where we allow channel bits to
+ * be set before hand and with them select an island.
+ * So we need to confirm that it's at least plausible.
+ */
+ return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ mode, addr40, isld1, isld0);
+
+ /* Make sure we compare against isldN values
+ * by clearing the LSB.
+ * This is what the silicon does.
+ */
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = addr40 ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ return nfp_encode_basic_search(addr, dest_island, isld,
+ iid_lsb, idx_lsb, 2);
+ case 3:
+ if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+ /* iid<0> = addr<29> = data
+ * iid<1> = addr<30> = channel<0>
+ * channel<1> = addr<31> = Index
+ */
+ return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ mode, addr40, isld1, isld0);
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = addr40 ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ return nfp_encode_basic_search(addr, dest_island, isld,
+ iid_lsb, idx_lsb, 4);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp_encode_mu(u64 *addr, int dest_island, int mode,
+ bool addr40, int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb, locality_lsb;
+ int isld[2];
+ u64 v64;
+ int da;
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+ locality_lsb = nfp_cppat_mu_locality_lsb(mode, addr40);
+
+ if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+ da = 1;
+ else
+ da = 0;
+
+ switch (mode) {
+ case 0:
+ iid_lsb = addr40 ? 32 : 24;
+ v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+ *addr &= ~v64;
+ *addr |= (((u64)dest_island) << iid_lsb) & v64;
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = addr40 ? 32 : 24;
+ v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+ *addr &= ~v64;
+ *addr |= (((u64)dest_island) << iid_lsb) & v64;
+ return 0;
+ }
+
+ idx_lsb = addr40 ? 37 : 29;
+ if (dest_island == isld0) {
+ *addr &= ~BIT_ULL(idx_lsb);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ *addr |= BIT_ULL(idx_lsb);
+ return 0;
+ }
+
+ return -ENODEV;
+ case 2:
+ if (da) {
+ iid_lsb = addr40 ? 32 : 24;
+ v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+ *addr &= ~v64;
+ *addr |= (((u64)dest_island) << iid_lsb) & v64;
+ return 0;
+ }
+
+ /* Make sure we compare against isldN values
+ * by clearing the LSB.
+ * This is what the silicon does.
+ */
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = addr40 ? 37 : 29;
+ iid_lsb = idx_lsb - 1;
+
+ return nfp_encode_basic_search(addr, dest_island, isld,
+ iid_lsb, idx_lsb, 2);
+ case 3:
+ /* Only the EMU will use 40 bit addressing. Silently
+ * set the direct locality bit for everyone else.
+ * The SDK toolchain uses dest_island <= 0 to test
+ * for atypical address encodings to support access
+ * to local-island CTM with a 32-but address (high-locality
+ * is effewctively ignored and just used for
+ * routing to island #0).
+ */
+ if (dest_island > 0 && (dest_island < 24 || dest_island > 26)) {
+ *addr |= ((u64)_NIC_NFP6000_MU_LOCALITY_DIRECT)
+ << locality_lsb;
+ da = 1;
+ }
+
+ if (da) {
+ iid_lsb = addr40 ? 32 : 24;
+ v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+ *addr &= ~v64;
+ *addr |= (((u64)dest_island) << iid_lsb) & v64;
+ return 0;
+ }
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = addr40 ? 37 : 29;
+ iid_lsb = idx_lsb - 2;
+
+ return nfp_encode_basic_search(addr, dest_island, isld,
+ iid_lsb, idx_lsb, 4);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nfp_cppat_addr_encode(u64 *addr, int dest_island, int cpp_tgt,
+ int mode, bool addr40, int isld1, int isld0)
+{
+ switch (cpp_tgt) {
+ case NFP_CPP_TARGET_NBI:
+ case NFP_CPP_TARGET_QDR:
+ case NFP_CPP_TARGET_ILA:
+ case NFP_CPP_TARGET_PCIE:
+ case NFP_CPP_TARGET_ARM:
+ case NFP_CPP_TARGET_CRYPTO:
+ case NFP_CPP_TARGET_CLS:
+ return nfp_encode_basic(addr, dest_island, cpp_tgt, mode,
+ addr40, isld1, isld0);
+
+ case NFP_CPP_TARGET_MU:
+ return nfp_encode_mu(addr, dest_island, mode,
+ addr40, isld1, isld0);
+
+ case NFP_CPP_TARGET_CT_XPB:
+ if (mode != 1 || addr40)
+ return -EINVAL;
+ *addr &= ~GENMASK_ULL(29, 24);
+ *addr |= ((u64)dest_island << 24) & GENMASK_ULL(29, 24);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
+ u32 *cpp_target_id, u64 *cpp_target_address,
+ const u32 *imb_table)
+{
+ const int island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
+ const int target = NFP_CPP_ID_TARGET_of(cpp_island_id);
+ u32 imb;
+ int err;
+
+ if (target < 0 || target >= 16)
+ return -EINVAL;
+
+ if (island == 0) {
+ /* Already translated */
+ *cpp_target_id = cpp_island_id;
+ *cpp_target_address = cpp_island_address;
+ return 0;
+ }
+
+ /* CPP + Island only allowed on systems with IMB tables */
+ if (!imb_table)
+ return -EINVAL;
+
+ imb = imb_table[target];
+
+ *cpp_target_address = cpp_island_address;
+ err = nfp_cppat_addr_encode(cpp_target_address, island, target,
+ ((imb >> 13) & 7), ((imb >> 12) & 1),
+ ((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f));
+ if (err)
+ return err;
+
+ *cpp_target_id = NFP_CPP_ID(target,
+ NFP_CPP_ID_ACTION_of(cpp_island_id),
+ NFP_CPP_ID_TOKEN_of(cpp_island_id));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 119f6dca71f0..9709c8ca0774 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -874,16 +874,18 @@ static void w90p910_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int w90p910_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct w90p910_ether *ether = netdev_priv(dev);
- return mii_ethtool_gset(&ether->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&ether->mii, cmd);
}
-static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int w90p910_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct w90p910_ether *ether = netdev_priv(dev);
- return mii_ethtool_sset(&ether->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&ether->mii, cmd);
}
static int w90p910_nway_reset(struct net_device *dev)
@@ -899,11 +901,11 @@ static u32 w90p910_get_link(struct net_device *dev)
}
static const struct ethtool_ops w90p910_ether_ethtool_ops = {
- .get_settings = w90p910_get_settings,
- .set_settings = w90p910_set_settings,
.get_drvinfo = w90p910_get_drvinfo,
.nway_reset = w90p910_nway_reset,
.get_link = w90p910_get_link,
+ .get_link_ksettings = w90p910_get_link_ksettings,
+ .set_link_ksettings = w90p910_set_link_ksettings,
};
static const struct net_device_ops w90p910_ether_netdev_ops = {
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 3913f07279d2..92367a06491a 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1733,7 +1733,7 @@ static void nv_update_stats(struct net_device *dev)
* Called with read_lock(&dev_base_lock) held for read -
* only synchronized against unregister_netdevice.
*/
-static struct rtnl_link_stats64*
+static void
nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
@@ -1793,8 +1793,6 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
spin_unlock_bh(&np->hwstats_lock);
}
-
- return storage;
}
/*
@@ -3751,7 +3749,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
if (rx_work < budget) {
/* re-enable interrupts
(msix not enabled in napi) */
- napi_complete(napi);
+ napi_complete_done(napi, rx_work);
writel(np->irqmask, base + NvRegIrqMask);
}
@@ -4239,14 +4237,15 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
return 0;
}
-static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int nv_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct fe_priv *np = netdev_priv(dev);
- u32 speed;
+ u32 speed, supported, advertising;
int adv;
spin_lock_irq(&np->lock);
- ecmd->port = PORT_MII;
+ cmd->base.port = PORT_MII;
if (!netif_running(dev)) {
/* We do not track link speed / duplex setting if the
* interface is disabled. Force a link check */
@@ -4274,64 +4273,71 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
speed = -1;
break;
}
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
if (np->duplex)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
speed = SPEED_UNKNOWN;
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->autoneg = np->autoneg;
+ cmd->base.speed = speed;
+ cmd->base.autoneg = np->autoneg;
- ecmd->advertising = ADVERTISED_MII;
+ advertising = ADVERTISED_MII;
if (np->autoneg) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
if (adv & ADVERTISE_10HALF)
- ecmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if (adv & ADVERTISE_10FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (adv & ADVERTISE_100HALF)
- ecmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if (adv & ADVERTISE_100FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
if (adv & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
}
- ecmd->supported = (SUPPORTED_Autoneg |
+ supported = (SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_MII);
if (np->gigabit == PHY_GIGABIT)
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
+
+ cmd->base.phy_address = np->phyaddr;
- ecmd->phy_address = np->phyaddr;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
/* ignore maxtxpkt, maxrxpkt for now */
spin_unlock_irq(&np->lock);
return 0;
}
-static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int nv_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct fe_priv *np = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
+ u32 advertising;
- if (ecmd->port != PORT_MII)
- return -EINVAL;
- if (ecmd->transceiver != XCVR_EXTERNAL)
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.port != PORT_MII)
return -EINVAL;
- if (ecmd->phy_address != np->phyaddr) {
+ if (cmd->base.phy_address != np->phyaddr) {
/* TODO: support switching between multiple phys. Should be
* trivial, but not enabled due to lack of test hardware. */
return -EINVAL;
}
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 mask;
mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
@@ -4339,16 +4345,17 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->gigabit == PHY_GIGABIT)
mask |= ADVERTISED_1000baseT_Full;
- if ((ecmd->advertising & mask) == 0)
+ if ((advertising & mask) == 0)
return -EINVAL;
- } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+ } else if (cmd->base.autoneg == AUTONEG_DISABLE) {
/* Note: autonegotiation disable, speed 1000 intentionally
* forbidden - no one should need that. */
if (speed != SPEED_10 && speed != SPEED_100)
return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ if (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else {
return -EINVAL;
@@ -4378,7 +4385,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
netif_tx_unlock_bh(dev);
}
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
int adv, bmcr;
np->autoneg = 1;
@@ -4386,13 +4393,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
/* advertise only what has been requested */
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
@@ -4403,7 +4410,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ if (advertising & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
@@ -4430,13 +4437,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
adv |= ADVERTISE_10HALF;
- if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+ if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
adv |= ADVERTISE_10FULL;
- if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
adv |= ADVERTISE_100HALF;
- if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+ if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
adv |= ADVERTISE_100FULL;
np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
@@ -5243,8 +5250,6 @@ static const struct ethtool_ops ops = {
.get_link = ethtool_op_get_link,
.get_wol = nv_get_wol,
.set_wol = nv_set_wol,
- .get_settings = nv_get_settings,
- .set_settings = nv_set_settings,
.get_regs_len = nv_get_regs_len,
.get_regs = nv_get_regs,
.nway_reset = nv_nway_reset,
@@ -5257,6 +5262,8 @@ static const struct ethtool_ops ops = {
.get_sset_count = nv_get_sset_count,
.self_test = nv_self_test,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = nv_get_link_ksettings,
+ .set_link_ksettings = nv_set_link_ksettings,
};
/* The mgmt unit and driver use a semaphore to access the phy during init */
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index dd6b0d0f7fa5..9c7ffd649e9a 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_struct *napi, int budget)
rx_done = __lpc_handle_recv(ndev, budget);
if (rx_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
lpc_eth_enable_int(pldat->net_base);
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index b19be7c6c1f4..21093276d2b7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -73,62 +73,80 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
#define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4)
#define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN)
/**
- * pch_gbe_get_settings - Get device-specific settings
+ * pch_gbe_get_link_ksettings - Get device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
* Returns:
* 0: Successful.
* Negative value: Failed.
*/
-static int pch_gbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int pch_gbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ u32 supported, advertising;
int ret;
- ret = mii_ethtool_gset(&adapter->mii, ecmd);
- ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
- ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
+ ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd);
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ ecmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ ecmd->link_modes.advertising);
+
+ supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
+ advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+ advertising);
if (!netif_carrier_ok(adapter->netdev))
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->base.speed = SPEED_UNKNOWN;
return ret;
}
/**
- * pch_gbe_set_settings - Set device-specific settings
+ * pch_gbe_set_link_ksettings - Set device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
* Returns:
* 0: Successful.
* Negative value: Failed.
*/
-static int pch_gbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int pch_gbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
- u32 speed = ethtool_cmd_speed(ecmd);
+ struct ethtool_link_ksettings copy_ecmd;
+ u32 speed = ecmd->base.speed;
+ u32 advertising;
int ret;
pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+ memcpy(&copy_ecmd, ecmd, sizeof(*ecmd));
+
/* when set_settings() is called with a ethtool_cmd previously
* filled by get_settings() on a down link, speed is -1: */
if (speed == UINT_MAX) {
speed = SPEED_1000;
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = DUPLEX_FULL;
+ copy_ecmd.base.speed = speed;
+ copy_ecmd.base.duplex = DUPLEX_FULL;
}
- ret = mii_ethtool_sset(&adapter->mii, ecmd);
+ ret = mii_ethtool_set_link_ksettings(&adapter->mii, &copy_ecmd);
if (ret) {
- netdev_err(netdev, "Error: mii_ethtool_sset\n");
+ netdev_err(netdev, "Error: mii_ethtool_set_link_ksettings\n");
return ret;
}
hw->mac.link_speed = speed;
- hw->mac.link_duplex = ecmd->duplex;
- hw->phy.autoneg_advertised = ecmd->advertising;
- hw->mac.autoneg = ecmd->autoneg;
+ hw->mac.link_duplex = copy_ecmd.base.duplex;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, copy_ecmd.link_modes.advertising);
+ hw->phy.autoneg_advertised = advertising;
+ hw->mac.autoneg = copy_ecmd.base.autoneg;
/* reset the link */
if (netif_running(adapter->netdev)) {
@@ -487,8 +505,6 @@ static int pch_gbe_get_sset_count(struct net_device *netdev, int sset)
}
static const struct ethtool_ops pch_gbe_ethtool_ops = {
- .get_settings = pch_gbe_get_settings,
- .set_settings = pch_gbe_set_settings,
.get_drvinfo = pch_gbe_get_drvinfo,
.get_regs_len = pch_gbe_get_regs_len,
.get_regs = pch_gbe_get_regs,
@@ -503,6 +519,8 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
.get_strings = pch_gbe_get_strings,
.get_ethtool_stats = pch_gbe_get_ethtool_stats,
.get_sset_count = pch_gbe_get_sset_count,
+ .get_link_ksettings = pch_gbe_get_link_ksettings,
+ .set_link_ksettings = pch_gbe_set_link_ksettings,
};
void pch_gbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index d461f419948e..5ae9681a2da7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2149,17 +2149,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
/**
- * pch_gbe_get_stats - Get System Network Statistics
- * @netdev: Network interface device structure
- * Returns: The current stats
- */
-static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
-{
- /* only return the current stats */
- return &netdev->stats;
-}
-
-/**
* pch_gbe_set_multi - Multicast and Promiscuous mode set
* @netdev: Network interface device structure
*/
@@ -2385,7 +2374,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
poll_end_flag = true;
if (poll_end_flag) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
pch_gbe_irq_enable(adapter);
}
@@ -2420,7 +2409,6 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
.ndo_open = pch_gbe_open,
.ndo_stop = pch_gbe_stop,
.ndo_start_xmit = pch_gbe_xmit_frame,
- .ndo_get_stats = pch_gbe_get_stats,
.ndo_set_mac_address = pch_gbe_set_mac,
.ndo_tx_timeout = pch_gbe_tx_timeout,
.ndo_change_mtu = pch_gbe_change_mtu,
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index baff744b560e..8b026dbf0d8d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1811,21 +1811,23 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int hamachi_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct hamachi_private *np = netdev_priv(dev);
spin_lock_irq(&np->lock);
- mii_ethtool_gset(&np->mii_if, ecmd);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
return 0;
}
-static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int hamachi_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct hamachi_private *np = netdev_priv(dev);
int res;
spin_lock_irq(&np->lock);
- res = mii_ethtool_sset(&np->mii_if, ecmd);
+ res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
return res;
}
@@ -1845,10 +1847,10 @@ static u32 hamachi_get_link(struct net_device *dev)
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = hamachi_get_drvinfo,
- .get_settings = hamachi_get_settings,
- .set_settings = hamachi_set_settings,
.nway_reset = hamachi_nway_reset,
.get_link = hamachi_get_link,
+ .get_link_ksettings = hamachi_get_link_ksettings,
+ .set_link_ksettings = hamachi_set_link_ksettings,
};
static const struct ethtool_ops ethtool_ops_no_mii = {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index badfa1d562a4..49591d9c2e1b 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
if (pkts < budget) {
/* all done, no more packets present */
- napi_complete(napi);
+ napi_complete_done(napi, pkts);
pasemi_mac_restart_rx_intr(mac);
pasemi_mac_restart_tx_intr(mac);
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 3cfd10503446..c2e24afbaeb2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -104,6 +104,7 @@ config QED_SRIOV
config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED
+ imply PTP_1588_CLOCK
---help---
This enables the support for ...
@@ -113,4 +114,7 @@ config QED_RDMA
config QED_ISCSI
bool
+config QED_FCOE
+ bool
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index f9034467736c..3157f97dd782 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -96,69 +96,70 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
}
static int
-netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+netxen_nic_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int check_sfp_module = 0;
+ u32 supported, advertising;
/* read which mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
- ecmd->advertising = (ADVERTISED_100baseT_Half |
+ advertising = (ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full);
- ecmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
- ecmd->duplex = adapter->link_duplex;
- ecmd->autoneg = adapter->link_autoneg;
+ cmd->base.speed = adapter->link_speed;
+ cmd->base.duplex = adapter->link_duplex;
+ cmd->base.autoneg = adapter->link_autoneg;
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
u32 val;
val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
if (val == NETXEN_PORT_MODE_802_3_AP) {
- ecmd->supported = SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_1000baseT_Full;
+ supported = SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_1000baseT_Full;
} else {
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
}
if (netif_running(dev) && adapter->has_link_events) {
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
- ecmd->autoneg = adapter->link_autoneg;
- ecmd->duplex = adapter->link_duplex;
+ cmd->base.speed = adapter->link_speed;
+ cmd->base.autoneg = adapter->link_autoneg;
+ cmd->base.duplex = adapter->link_duplex;
goto skip;
}
- ecmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
u16 pcifn = adapter->ahw.pci_func;
val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
- ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ *
- P3_LINK_SPEED_VAL(pcifn, val));
+ cmd->base.speed = P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val);
} else
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
- ecmd->duplex = DUPLEX_FULL;
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
} else
return -EIO;
skip:
- ecmd->phy_address = adapter->physical_port;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.phy_address = adapter->physical_port;
switch (adapter->ahw.board_type) {
case NETXEN_BRDTYPE_P2_SB35_4G:
@@ -167,16 +168,16 @@ skip:
case NETXEN_BRDTYPE_P3_4_GB:
case NETXEN_BRDTYPE_P3_4_GB_MM:
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_Autoneg;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
case NETXEN_BRDTYPE_P3_10000_BASE_T:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- ecmd->autoneg = (adapter->ahw.board_type ==
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
+ cmd->base.autoneg = (adapter->ahw.board_type ==
NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
(AUTONEG_DISABLE) : (adapter->link_autoneg);
break;
@@ -185,39 +186,39 @@ skip:
case NETXEN_BRDTYPE_P3_IMEZ:
case NETXEN_BRDTYPE_P3_XG_LOM:
case NETXEN_BRDTYPE_P3_HMEZ:
- ecmd->supported |= SUPPORTED_MII;
- ecmd->advertising |= ADVERTISED_MII;
- ecmd->port = PORT_MII;
- ecmd->autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_MII;
+ advertising |= ADVERTISED_MII;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_DISABLE;
break;
case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
case NETXEN_BRDTYPE_P3_10G_SFP_CT:
case NETXEN_BRDTYPE_P3_10G_SFP_QT:
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_XFP:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
break;
case NETXEN_BRDTYPE_P3_10G_TP:
if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
- ecmd->advertising |=
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ advertising |=
(ADVERTISED_FIBRE | ADVERTISED_TP);
- ecmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
} else {
- ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
- ecmd->advertising |=
+ supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ advertising |=
(ADVERTISED_TP | ADVERTISED_Autoneg);
- ecmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
}
break;
default:
@@ -232,31 +233,37 @@ skip:
case LINKEVENT_MODULE_OPTICAL_SRLR:
case LINKEVENT_MODULE_OPTICAL_LRM:
case LINKEVENT_MODULE_OPTICAL_SFP_1G:
- ecmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
case LINKEVENT_MODULE_TWINAX:
- ecmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
break;
default:
- ecmd->port = -1;
+ cmd->base.port = -1;
}
}
if (!netif_running(dev) || !adapter->ahw.linkup) {
- ecmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
static int
-netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+netxen_nic_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
int ret;
if (adapter->ahw.port_type != NETXEN_NIC_GBE)
@@ -265,16 +272,16 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
return -EOPNOTSUPP;
- ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex,
- ecmd->autoneg);
+ ret = nx_fw_cmd_set_gbe_port(adapter, speed, cmd->base.duplex,
+ cmd->base.autoneg);
if (ret == NX_RCODE_NOT_SUPPORTED)
return -EOPNOTSUPP;
else if (ret)
return -EIO;
adapter->link_speed = speed;
- adapter->link_duplex = ecmd->duplex;
- adapter->link_autoneg = ecmd->autoneg;
+ adapter->link_duplex = cmd->base.duplex;
+ adapter->link_autoneg = cmd->base.autoneg;
if (!netif_running(dev))
return 0;
@@ -931,8 +938,6 @@ netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
}
const struct ethtool_ops netxen_nic_ethtool_ops = {
- .get_settings = netxen_nic_get_settings,
- .set_settings = netxen_nic_set_settings,
.get_drvinfo = netxen_nic_get_drvinfo,
.get_regs_len = netxen_nic_get_regs_len,
.get_regs = netxen_nic_get_regs,
@@ -954,4 +959,6 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
.get_dump_flag = netxen_get_dump_flag,
.get_dump_data = netxen_get_dump_data,
.set_dump = netxen_set_dump,
+ .get_link_ksettings = netxen_nic_get_link_ksettings,
+ .set_link_ksettings = netxen_nic_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 561fb94c7267..3b5d7cfa2321 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -90,8 +90,8 @@ static irqreturn_t netxen_msix_intr(int irq, void *data);
static void netxen_free_ip_list(struct netxen_adapter *, bool);
static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+static void netxen_nic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int netxen_nic_set_mac(struct net_device *netdev, void *p);
/* PCI Device ID Table */
@@ -2302,8 +2302,8 @@ request_reset:
clear_bit(__NX_RESETTING, &adapter->state);
}
-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void netxen_nic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
@@ -2313,8 +2313,6 @@ static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
stats->tx_bytes = adapter->stats.txbytes;
stats->rx_dropped = adapter->stats.rxdropped;
stats->tx_dropped = adapter->stats.txdropped;
-
- return stats;
}
static irqreturn_t netxen_intr(int irq, void *data)
@@ -2398,7 +2396,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
}
@@ -3266,7 +3264,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter,
cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
if (cur == NULL)
goto out;
- if (dev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
cur->master = !!netif_is_bond_master(dev);
cur->ip_addr = ifa->ifa_address;
@@ -3376,7 +3374,7 @@ static void netxen_config_master(struct net_device *dev, unsigned long event)
!netif_is_bond_slave(dev)) {
netxen_config_indev_addr(adapter, master, event);
for_each_netdev_rcu(&init_net, slave)
- if (slave->priv_flags & IFF_802_1Q_VLAN &&
+ if (is_vlan_dev(slave) &&
vlan_dev_real_dev(slave) == master)
netxen_config_indev_addr(adapter, slave, event);
}
@@ -3402,7 +3400,7 @@ recheck:
if (dev == NULL)
goto done;
- if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(dev)) {
dev = vlan_dev_real_dev(dev);
goto recheck;
}
@@ -3447,7 +3445,7 @@ recheck:
if (dev == NULL)
goto done;
- if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(dev)) {
dev = vlan_dev_real_dev(dev);
goto recheck;
}
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 729e43768e99..974929dcc74e 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -2,8 +2,9 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o qed_debug.o
+ qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
+qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 44c184ebe3b0..61a9cd5be497 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_H
@@ -27,7 +51,7 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.9.20"
+#define DRV_MODULE_VERSION "8.10.10.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -36,6 +60,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
#define ISCSI_BDQ_ID(_port_id) (_port_id)
+#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
#define QED_WID_SIZE (1024)
#define QED_PF_DEMS_SIZE (4)
@@ -143,6 +168,7 @@ struct qed_tunn_update_params {
*/
enum qed_pci_personality {
QED_PCI_ETH,
+ QED_PCI_FCOE,
QED_PCI_ISCSI,
QED_PCI_ETH_ROCE,
QED_PCI_DEFAULT /* default in shmem */
@@ -180,6 +206,7 @@ enum QED_FEATURE {
QED_VF,
QED_RDMA_CNQ,
QED_VF_L2_QUE,
+ QED_FCOE_CQ,
QED_MAX_FEATURES,
};
@@ -197,6 +224,7 @@ enum QED_PORT_MODE {
enum qed_dev_cap {
QED_DEV_CAP_ETH,
+ QED_DEV_CAP_FCOE,
QED_DEV_CAP_ISCSI,
QED_DEV_CAP_ROCE,
};
@@ -231,6 +259,10 @@ struct qed_hw_info {
u32 part_num[4];
unsigned char hw_mac_addr[ETH_ALEN];
+ u64 node_wwn;
+ u64 port_wwn;
+
+ u16 num_fcoe_conns;
struct qed_igu_info *p_igu_info;
@@ -386,6 +418,7 @@ struct qed_hwfn {
struct qed_ooo_info *p_ooo_info;
struct qed_rdma_info *p_rdma_info;
struct qed_iscsi_info *p_iscsi_info;
+ struct qed_fcoe_info *p_fcoe_info;
struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs;
@@ -432,6 +465,8 @@ struct qed_hwfn {
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
+ /* p_ptp_ptt is valid for leading HWFN only */
+ struct qed_ptt *p_ptp_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -594,11 +629,13 @@ struct qed_dev {
u8 protocol;
#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
+#define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
/* Callbacks to protocol driver */
union {
struct qed_common_cb_ops *common;
struct qed_eth_cb_ops *eth;
+ struct qed_fcoe_cb_ops *fcoe;
struct qed_iscsi_cb_ops *iscsi;
} protocol_ops;
void *ops_cookie;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0c42c240b5cf..d42d03df751a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -66,12 +90,14 @@ union conn_context {
struct core_conn_context core_ctx;
struct eth_conn_context eth_ctx;
struct iscsi_conn_context iscsi_ctx;
+ struct fcoe_conn_context fcoe_ctx;
struct roce_conn_context roce_ctx;
};
-/* TYPE-0 task context - iSCSI */
+/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
struct iscsi_task_context iscsi_ctx;
+ struct fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
@@ -216,15 +242,22 @@ struct qed_cxt_mngr {
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_FCOE ||
type == PROTOCOLID_ROCE;
}
static bool tm_cid_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_FCOE ||
type == PROTOCOLID_ROCE;
}
+static bool tm_tid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_FCOE;
+}
+
/* counts the iids for the CDU/CDUC ILT client configuration */
struct qed_cdu_iids {
u32 pf_cids;
@@ -283,6 +316,22 @@ static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
iids->pf_cids += p_cfg->cid_count;
iids->per_vf_cids += p_cfg->cids_per_vf;
}
+
+ if (tm_tid_proto(i)) {
+ struct qed_tid_seg *segs = p_cfg->tid_seg;
+
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->pf_tids[j] += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
}
iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
@@ -1670,9 +1719,42 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
/* @@@TBD how to enable the scan for the VFs */
}
+static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
+{
+ if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
+ p_hwfn->pf_params.fcoe_pf_params.is_target)
+ STORE_RT_REG(p_hwfn,
+ PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
+}
+
+static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_conn_type_cfg *p_fcoe;
+ struct qed_tid_seg *p_tid;
+
+ p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+
+ /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
+ if (!p_fcoe->cid_count)
+ return;
+
+ p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
+ if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
+ p_tid->count);
+ } else {
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+ p_tid->count);
+ }
+}
+
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
{
qed_cdu_init_common(p_hwfn);
+ qed_prs_init_common(p_hwfn);
}
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
@@ -1684,6 +1766,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
qed_ilt_init_pf(p_hwfn);
qed_src_init_pf(p_hwfn);
qed_tm_init_pf(p_hwfn);
+ qed_prs_init_pf(p_hwfn);
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
@@ -1861,6 +1944,27 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
p_params->num_cons, 1);
break;
}
+ case QED_PCI_FCOE:
+ {
+ struct qed_fcoe_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.fcoe_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_FCOE,
+ p_params->num_cons,
+ 0);
+
+ qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
+ QED_CXT_FCOE_TID_SEG, 0,
+ p_params->num_tasks, true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "Fcoe personality used without setting params!\n");
+ }
+ break;
+ }
case QED_PCI_ISCSI:
{
struct qed_iscsi_pf_params *p_params;
@@ -1903,6 +2007,10 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
/* Verify the personality */
switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_FCOE:
+ proto = PROTOCOLID_FCOE;
+ seg = QED_CXT_FCOE_TID_SEG;
+ break;
case QED_PCI_ISCSI:
proto = PROTOCOLID_ISCSI;
seg = QED_CXT_ISCSI_TID_SEG;
@@ -2191,15 +2299,19 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli;
- struct qed_ilt_cli_blk *p_seg;
struct qed_tid_seg *p_seg_info;
- u32 proto, seg;
- u32 total_lines;
- u32 tid_size, ilt_idx;
+ struct qed_ilt_cli_blk *p_seg;
u32 num_tids_per_block;
+ u32 tid_size, ilt_idx;
+ u32 total_lines;
+ u32 proto, seg;
/* Verify the personality */
switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_FCOE:
+ proto = PROTOCOLID_FCOE;
+ seg = QED_CXT_FCOE_TID_SEG;
+ break;
case QED_PCI_ISCSI:
proto = PROTOCOLID_ISCSI;
seg = QED_CXT_ISCSI_TID_SEG;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 2b8bdaa77800..8b010324268a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_CXT_H
@@ -67,6 +91,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
+#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE
enum qed_cxt_elem_type {
QED_ELEM_CXT,
QED_ELEM_SRQ,
@@ -180,4 +205,6 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
#define QED_CTX_FL_MEM 1
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+ u32 tid, u8 ctx_type, void **task_ctx);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index a4789a93b692..5bd36a4a8fcd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -408,7 +432,6 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
return rc;
}
-#ifdef CONFIG_DCB
static void
qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
struct qed_dcbx_app_prio *p_prio,
@@ -725,7 +748,6 @@ qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return 0;
}
-#endif
static int
qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -840,6 +862,15 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
return rc;
}
+void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
+{
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+ void *cookie = hwfn->cdev->ops_cookie;
+
+ if (cookie && op->dcbx_aen)
+ op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type);
+}
+
/* Read updated MIB.
* Reconfigure QM and invoke PF update ramrod command if operational MIB
* change is detected.
@@ -866,6 +897,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
qed_sp_pf_update(p_hwfn);
}
}
+ qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+ qed_dcbx_aen(p_hwfn, type);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 9ba681643d05..0fabe97f998d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_DCBX_H
@@ -33,7 +57,6 @@ struct qed_dcbx_app_data {
u8 tc; /* Traffic Class */
};
-#ifdef CONFIG_DCB
#define QED_DCBX_VERSION_DISABLED 0
#define QED_DCBX_VERSION_IEEE 1
#define QED_DCBX_VERSION_CEE 2
@@ -49,7 +72,6 @@ struct qed_dcbx_set {
struct qed_dcbx_admin_params config;
u32 ver_num;
};
-#endif
struct qed_dcbx_results {
bool dcbx_enabled;
@@ -73,9 +95,8 @@ struct qed_dcbx_info {
struct qed_dcbx_results results;
struct dcbx_mib operational;
struct dcbx_mib remote;
-#ifdef CONFIG_DCB
struct qed_dcbx_set set;
-#endif
+ struct qed_dcbx_get get;
u8 dcbx_cap;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 3b2250021c5f..d6c5a8165b5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -25,6 +49,7 @@
#include "qed_cxt.h"
#include "qed_dcbx.h"
#include "qed_dev_api.h"
+#include "qed_fcoe.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
@@ -148,6 +173,9 @@ void qed_resc_free(struct qed_dev *cdev)
#ifdef CONFIG_QED_LL2
qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
#endif
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info);
+
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
@@ -409,6 +437,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_iscsi_info *p_iscsi_info;
+ struct qed_fcoe_info *p_fcoe_info;
struct qed_ooo_info *p_ooo_info;
#ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info;
@@ -515,6 +544,14 @@ int qed_resc_alloc(struct qed_dev *cdev)
p_hwfn->p_ll2_info = p_ll2_info;
}
#endif
+
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
+ p_fcoe_info = qed_fcoe_alloc(p_hwfn);
+ if (!p_fcoe_info)
+ goto alloc_no_mem;
+ p_hwfn->p_fcoe_info = p_fcoe_info;
+ }
+
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
p_iscsi_info = qed_iscsi_alloc(p_hwfn);
if (!p_iscsi_info)
@@ -578,6 +615,9 @@ void qed_resc_setup(struct qed_dev *cdev)
if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
#endif
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info);
+
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
@@ -873,7 +913,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Either EDPM is mandatory, or we are attempting to allocate a
* WID per CPU.
*/
- n_cpus = num_active_cpus();
+ n_cpus = num_present_cpus();
rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
@@ -970,7 +1010,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Protocl Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
- STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
+ (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* Cleanup chip from previous driver if such remains exist */
@@ -1002,8 +1043,16 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
- if (rc)
+ if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+ return rc;
+ }
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2));
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
+ }
}
return rc;
}
@@ -1763,8 +1812,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
struct qed_mcp_link_params *link;
/* Read global nvm_cfg address */
@@ -1910,6 +1959,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
__set_bit(QED_DEV_CAP_ETH,
&p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
+ __set_bit(QED_DEV_CAP_FCOE,
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
__set_bit(QED_DEV_CAP_ISCSI,
&p_hwfn->hw_info.device_capabilities);
@@ -2647,6 +2699,177 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
}
+int
+qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port, enum qed_llh_port_filter_type_t type)
+{
+ u32 high = 0, low = 0, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return 0;
+
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Non valid LLH protocol filter type %d\n", type);
+ return -EINVAL;
+ }
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 1 << type);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an empty LLH filter to utilize\n");
+ return -EINVAL;
+ }
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "ETH type %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "TCP src port %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "UDP src port %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "TCP dst port %x is added at %d\n", dest_port, i);
+ break;
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "UDP dst port %x is added at %d\n", dest_port, i);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "TCP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, i);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "UDP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, i);
+ break;
+ }
+ return 0;
+}
+
+void
+qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum qed_llh_port_filter_type_t type)
+{
+ u32 high = 0, low = 0;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Non valid LLH protocol filter type %d\n", type);
+ return;
+ }
+
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
+ continue;
+ if (!qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
+ continue;
+ if (!(qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32)) & BIT(type)))
+ continue;
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
+}
+
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index b6711c106597..6812003411cd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_DEV_API_H
@@ -329,6 +353,48 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *p_filter);
+enum qed_llh_port_filter_type_t {
+ QED_LLH_FILTER_ETHERTYPE,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
+ QED_LLH_FILTER_UDP_SRC_PORT,
+ QED_LLH_FILTER_UDP_DEST_PORT,
+ QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
+};
+
+/**
+ * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+int
+qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum qed_llh_port_filter_type_t type);
+
+/**
+ * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+void
+qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum qed_llh_port_filter_type_t type);
+
/**
* *@brief Cleanup of previous driver remains prior to load
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
new file mode 100644
index 000000000000..cbc81412174f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -0,0 +1,1014 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#define __PREVENT_DUMP_MEM_ARR__
+#define __PREVENT_PXP_GLOBAL_WIN__
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_fcoe.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include <linux/qed/qed_fcoe_if.h>
+
+struct qed_fcoe_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+ u8 layer_code;
+
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t sq_curr_page_addr;
+ dma_addr_t sq_next_page_addr;
+ dma_addr_t xferq_pbl_addr;
+ void *xferq_pbl_addr_virt_addr;
+ dma_addr_t xferq_addr[4];
+ void *xferq_addr_virt_addr[4];
+ dma_addr_t confq_pbl_addr;
+ void *confq_pbl_addr_virt_addr;
+ dma_addr_t confq_addr[2];
+ void *confq_addr_virt_addr[2];
+
+ dma_addr_t terminate_params;
+
+ u16 dst_mac_addr_lo;
+ u16 dst_mac_addr_mid;
+ u16 dst_mac_addr_hi;
+ u16 src_mac_addr_lo;
+ u16 src_mac_addr_mid;
+ u16 src_mac_addr_hi;
+
+ u16 tx_max_fc_pay_len;
+ u16 e_d_tov_timer_val;
+ u16 rec_tov_timer_val;
+ u16 rx_max_fc_pay_len;
+ u16 vlan_tag;
+ u16 physical_q0;
+
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+ u8 def_q_idx;
+};
+
+static int
+qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
+ struct fcoe_init_ramrod_params *p_ramrod = NULL;
+ struct fcoe_init_func_ramrod_data *p_data;
+ struct fcoe_conn_context *p_cxt = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ struct qed_cxt_info cxt_info;
+ u32 dummy_cid;
+ int rc = 0;
+ u16 tmp;
+ u8 i;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.fcoe_init;
+ p_data = &p_ramrod->init_ramrod_data;
+ fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
+
+ p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
+ tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
+ p_data->sq_num_pages_in_pbl = tmp;
+
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
+ if (rc)
+ return rc;
+
+ cxt_info.iid = dummy_cid;
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
+ dummy_cid);
+ return rc;
+ }
+ p_cxt = cxt_info.p_cxt;
+ SET_FIELD(p_cxt->tstorm_ag_context.flags3,
+ TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+
+ fcoe_pf_params->dummy_icid = (u16)dummy_cid;
+
+ tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
+ p_data->func_params.num_tasks = tmp;
+ p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
+ p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
+
+ DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
+ fcoe_pf_params->glbl_q_params_addr);
+
+ tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
+ p_data->q_params.cq_num_entries = tmp;
+
+ tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
+ p_data->q_params.cmdq_num_entries = tmp;
+
+ tmp = fcoe_pf_params->num_cqs;
+ p_data->q_params.num_queues = (u8)tmp;
+
+ tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
+ p_data->q_params.queue_relative_offset = (u8)tmp;
+
+ for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
+ tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
+ p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
+ }
+
+ p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
+ p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
+
+ p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id);
+
+ DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
+ fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
+ p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+ fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
+ tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
+ p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
+ tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
+ p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
+
+ DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
+ fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
+ p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
+ fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
+ tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
+ p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
+ tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
+ p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
+ tmp = fcoe_pf_params->rq_buffer_size;
+ p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
+
+ if (fcoe_pf_params->is_target) {
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
+ } else {
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ return rc;
+}
+
+static int
+qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
+ struct fcoe_conn_offload_ramrod_data *p_data;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 pq_id = 0, tmp;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
+ p_data = &p_ramrod->offload_ramrod_data;
+
+ /* Transmission PQ is the first of the PF */
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL);
+ p_conn->physical_q0 = cpu_to_le16(pq_id);
+ p_data->physical_q0 = cpu_to_le16(pq_id);
+
+ p_data->conn_id = cpu_to_le16(p_conn->conn_id);
+ DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
+ DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
+ DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
+ DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
+ DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
+ DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
+
+ DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
+ DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
+ DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
+
+ p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
+ p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
+ p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
+ p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
+ p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
+ p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
+
+ tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
+ p_data->tx_max_fc_pay_len = tmp;
+ tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
+ p_data->e_d_tov_timer_val = tmp;
+ tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
+ p_data->rec_rr_tov_timer_val = tmp;
+ tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
+ p_data->rx_max_fc_pay_len = tmp;
+
+ p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
+ p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
+ p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
+ p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
+ p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
+ p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
+ p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
+ p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
+ p_data->flags = p_conn->flags;
+ p_data->def_q_idx = p_conn->def_q_idx;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
+ DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
+ p_conn->terminate_params);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u32 active_segs = 0;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
+ active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn **p_out_conn)
+{
+ struct qed_fcoe_conn *p_conn = NULL;
+ void *p_addr;
+ u32 i;
+
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
+ p_conn =
+ list_first_entry(&p_hwfn->p_fcoe_info->free_list,
+ struct qed_fcoe_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+ *p_out_conn = p_conn;
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->xferq_pbl_addr, GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_pbl_xferq;
+ p_conn->xferq_pbl_addr_virt_addr = p_addr;
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->xferq_addr[i], GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_xferq;
+ p_conn->xferq_addr_virt_addr[i] = p_addr;
+
+ p_addr = p_conn->xferq_pbl_addr_virt_addr;
+ ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
+ }
+
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->confq_pbl_addr, GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_xferq;
+ p_conn->confq_pbl_addr_virt_addr = p_addr;
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->confq_addr[i], GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_confq;
+ p_conn->confq_addr_virt_addr[i] = p_addr;
+
+ p_addr = p_conn->confq_pbl_addr_virt_addr;
+ ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
+ }
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+ return 0;
+
+nomem_confq:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_pbl_addr_virt_addr,
+ p_conn->confq_pbl_addr);
+ for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
+ if (p_conn->confq_addr_virt_addr[i])
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_addr_virt_addr[i],
+ p_conn->confq_addr[i]);
+nomem_xferq:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_pbl_addr_virt_addr,
+ p_conn->xferq_pbl_addr);
+ for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
+ if (p_conn->xferq_addr_virt_addr[i])
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_addr_virt_addr[i],
+ p_conn->xferq_addr[i]);
+nomem_pbl_xferq:
+ kfree(p_conn);
+ return -ENOMEM;
+}
+
+static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn)
+{
+ u32 i;
+
+ if (!p_conn)
+ return;
+
+ if (p_conn->confq_pbl_addr_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_pbl_addr_virt_addr,
+ p_conn->confq_pbl_addr);
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
+ if (!p_conn->confq_addr_virt_addr[i])
+ continue;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_addr_virt_addr[i],
+ p_conn->confq_addr[i]);
+ }
+
+ if (p_conn->xferq_pbl_addr_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_pbl_addr_virt_addr,
+ p_conn->xferq_pbl_addr);
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
+ if (!p_conn->xferq_addr_virt_addr[i])
+ continue;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_addr_virt_addr[i],
+ p_conn->xferq_addr[i]);
+ }
+ kfree(p_conn);
+}
+
+static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
+
+ return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+}
+
+static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
+
+ return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+}
+
+struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_fcoe_info *p_fcoe_info;
+
+ /* Allocate LL2's set struct */
+ p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
+ if (!p_fcoe_info) {
+ DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&p_fcoe_info->free_list);
+ return p_fcoe_info;
+}
+
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+{
+ struct fcoe_task_context *p_task_ctx = NULL;
+ int rc;
+ u32 i;
+
+ spin_lock_init(&p_fcoe_info->lock);
+ for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
+ rc = qed_cxt_get_task_ctx(p_hwfn, i,
+ QED_CTX_WORKING_MEM,
+ (void **)&p_task_ctx);
+ if (rc)
+ continue;
+
+ memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+ SET_FIELD(p_task_ctx->timer_context.logical_client_0,
+ TIMERS_CONTEXT_VALIDLC0, 1);
+ SET_FIELD(p_task_ctx->timer_context.logical_client_1,
+ TIMERS_CONTEXT_VALIDLC1, 1);
+ SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ }
+}
+
+void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+{
+ struct qed_fcoe_conn *p_conn = NULL;
+
+ if (!p_fcoe_info)
+ return;
+
+ while (!list_empty(&p_fcoe_info->free_list)) {
+ p_conn = list_first_entry(&p_fcoe_info->free_list,
+ struct qed_fcoe_conn, list_entry);
+ if (!p_conn)
+ break;
+ list_del(&p_conn->list_entry);
+ qed_fcoe_free_connection(p_hwfn, p_conn);
+ }
+
+ kfree(p_fcoe_info);
+}
+
+static int
+qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_in_conn,
+ struct qed_fcoe_conn **p_out_conn)
+{
+ struct qed_fcoe_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+ if (rc)
+ return rc;
+
+ /* Use input connection [if provided] or allocate a new one */
+ if (p_in_conn) {
+ p_conn = p_in_conn;
+ } else {
+ rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+ return rc;
+ }
+ }
+
+ p_conn->icid = icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+}
+
+static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_fcoe_stats *p_stats)
+{
+ struct fcoe_rx_stat tstats;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
+ p_stats->fcoe_rx_data_pkt_cnt =
+ HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
+ p_stats->fcoe_rx_xfer_pkt_cnt =
+ HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
+ p_stats->fcoe_rx_other_pkt_cnt =
+ HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
+
+ p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
+ p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
+ p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
+ p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
+ p_stats->fcoe_silent_drop_total_pkt_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
+}
+
+static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_fcoe_stats *p_stats)
+{
+ struct fcoe_tx_stat pstats;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
+ p_stats->fcoe_tx_data_pkt_cnt =
+ HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
+ p_stats->fcoe_tx_xfer_pkt_cnt =
+ HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
+ p_stats->fcoe_tx_other_pkt_cnt =
+ HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
+}
+
+static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_stats *p_stats)
+{
+ struct qed_ptt *p_ptt;
+
+ memset(p_stats, 0, sizeof(*p_stats));
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EINVAL;
+ }
+
+ _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
+ _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+struct qed_hash_fcoe_con {
+ struct hlist_node node;
+ struct qed_fcoe_conn *con;
+};
+
+static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
+ struct qed_dev_fcoe_info *info)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+
+ info->primary_dbq_rq_addr =
+ qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->secondary_bdq_rq_addr =
+ qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+
+ return rc;
+}
+
+static void qed_register_fcoe_ops(struct qed_dev *cdev,
+ struct qed_fcoe_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.fcoe = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_fcoe_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || (hash_con->con->icid != handle))
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_fcoe_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "fcoe already stopped\n");
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop fcoe - not all connections were returned\n");
+ return -EINVAL;
+ }
+
+ /* Stop the fcoe */
+ rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+ return rc;
+}
+
+static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
+{
+ int rc;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "fcoe already started;\n");
+ return 0;
+ }
+
+ rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start fcoe\n");
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (tasks) {
+ struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
+ GFP_ATOMIC);
+
+ if (!tid_info) {
+ DP_NOTICE(cdev,
+ "Failed to allocate tasks information\n");
+ qed_fcoe_stop(cdev);
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_fcoe_stop(cdev);
+ kfree(tid_info);
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
+
+ kfree(tid_info);
+ }
+
+ return 0;
+}
+
+static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_fcoe_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
+ return -ENOMEM;
+ }
+
+ /* Acquire the connection */
+ rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+
+ if (p_doorbell)
+ *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_fcoe_con *hash_con;
+
+ hash_con = qed_fcoe_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_fcoe_offload_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_fcoe_params_offload *conn_info)
+{
+ struct qed_hash_fcoe_con *hash_con;
+ struct qed_fcoe_conn *con;
+
+ hash_con = qed_fcoe_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
+ con->sq_next_page_addr = conn_info->sq_next_page_addr;
+ con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
+ con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
+ con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
+ con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
+ con->vlan_tag = conn_info->vlan_tag;
+ con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
+ con->flags = conn_info->flags;
+ con->def_q_idx = conn_info->def_q_idx;
+
+ con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
+ conn_info->src_mac[4];
+ con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
+ conn_info->src_mac[2];
+ con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
+ conn_info->src_mac[0];
+ con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
+ conn_info->dst_mac[4];
+ con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
+ conn_info->dst_mac[2];
+ con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
+ conn_info->dst_mac[0];
+
+ con->s_id.addr_hi = conn_info->s_id.addr_hi;
+ con->s_id.addr_mid = conn_info->s_id.addr_mid;
+ con->s_id.addr_lo = conn_info->s_id.addr_lo;
+ con->d_id.addr_hi = conn_info->d_id.addr_hi;
+ con->d_id.addr_mid = conn_info->d_id.addr_mid;
+ con->d_id.addr_lo = conn_info->d_id.addr_lo;
+
+ return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
+ u32 handle, dma_addr_t terminate_params)
+{
+ struct qed_hash_fcoe_con *hash_con;
+ struct qed_fcoe_conn *con;
+
+ hash_con = qed_fcoe_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ con->terminate_params = terminate_params;
+
+ return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
+{
+ return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
+}
+
+void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+ struct qed_mcp_fcoe_stats *stats)
+{
+ struct qed_fcoe_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+ if (qed_fcoe_stats(cdev, &proto_stats)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect FCoE statistics\n");
+ return;
+ }
+
+ /* Translate FW statistics into struct */
+ stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
+ proto_stats.fcoe_rx_xfer_pkt_cnt +
+ proto_stats.fcoe_rx_other_pkt_cnt;
+ stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
+ proto_stats.fcoe_tx_xfer_pkt_cnt +
+ proto_stats.fcoe_tx_other_pkt_cnt;
+ stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
+
+ /* Request protocol driver to fill-in the rest */
+ if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
+ struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
+ void *cookie = cdev->ops_cookie;
+
+ if (ops->get_login_failures)
+ stats->login_failure = ops->get_login_failures(cookie);
+ }
+}
+
+static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_fcoe_dev_info,
+ .start = &qed_fcoe_start,
+ .stop = &qed_fcoe_stop,
+ .register_ops = &qed_register_fcoe_ops,
+ .acquire_conn = &qed_fcoe_acquire_conn,
+ .release_conn = &qed_fcoe_release_conn,
+ .offload_conn = &qed_fcoe_offload_conn,
+ .destroy_conn = &qed_fcoe_destroy_conn,
+ .get_stats = &qed_fcoe_stats,
+};
+
+const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
+{
+ return &qed_fcoe_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_fcoe_ops);
+
+void qed_put_fcoe_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_fcoe_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
new file mode 100644
index 000000000000..472af34a171d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
@@ -0,0 +1,87 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QED_FCOE_H
+#define _QED_FCOE_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_fcoe_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+struct qed_fcoe_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+};
+
+#if IS_ENABLED(CONFIG_QED_FCOE)
+struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+
+void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+ struct qed_mcp_fcoe_stats *stats);
+#else /* CONFIG_QED_FCOE */
+static inline struct qed_fcoe_info *
+qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+{
+ return NULL;
+}
+
+static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_info *p_fcoe_info)
+{
+}
+
+static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_info *p_fcoe_info)
+{
+}
+
+static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+ struct qed_mcp_fcoe_stats *stats)
+{
+}
+#endif /* CONFIG_QED_FCOE */
+
+#ifdef CONFIG_QED_LL2
+extern const struct qed_common_ops qed_common_ops_pass;
+extern const struct qed_ll2_ops qed_ll2_ops_pass;
+#endif
+
+#endif /* _QED_FCOE_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 785ab03683eb..37c2bfb663bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_HSI_H
@@ -19,10 +43,12 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/storage_common.h>
#include <linux/qed/tcp_common.h>
+#include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h>
#include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h>
+#include <linux/qed/qed_fcoe_if.h>
struct qed_hwfn;
struct qed_ptt;
@@ -913,7 +939,7 @@ struct mstorm_vf_zone {
enum personality_type {
BAD_PERSONALITY_TYP,
PERSONALITY_ISCSI,
- PERSONALITY_RESERVED2,
+ PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RESERVED3,
PERSONALITY_CORE,
@@ -3449,6 +3475,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
(IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[44].base + ((pf_id) * IRO[44].m1))
static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
@@ -7383,6 +7413,769 @@ struct ystorm_roce_resp_conn_ag_ctx {
__le32 reg3;
};
+struct ystorm_fcoe_conn_st_ctx {
+ u8 func_mode;
+ u8 cos;
+ u8 conf_version;
+ u8 eth_hdr_size;
+ __le16 stat_ram_addr;
+ __le16 mtu;
+ __le16 max_fc_payload_len;
+ __le16 tx_max_fc_pay_len;
+ u8 fcp_cmd_size;
+ u8 fcp_rsp_size;
+ __le16 mss;
+ struct regpair reserved;
+ u8 protection_info_flags;
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
+ u8 dst_protection_per_mss;
+ u8 src_protection_per_mss;
+ u8 ptu_log_page_size;
+ u8 flags;
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
+ u8 fcp_xfer_size;
+ u8 reserved3[2];
+};
+
+struct fcoe_vlan_fields {
+ __le16 fields;
+#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+union fcoe_vlan_field_union {
+ struct fcoe_vlan_fields fields;
+ __le16 val;
+};
+
+union fcoe_vlan_vif_field_union {
+ union fcoe_vlan_field_union vlan;
+ __le16 vif;
+};
+
+struct pstorm_fcoe_eth_context_section {
+ u8 remote_addr_3;
+ u8 remote_addr_2;
+ u8 remote_addr_1;
+ u8 remote_addr_0;
+ u8 local_addr_1;
+ u8 local_addr_0;
+ u8 remote_addr_5;
+ u8 remote_addr_4;
+ u8 local_addr_5;
+ u8 local_addr_4;
+ u8 local_addr_3;
+ u8 local_addr_2;
+ union fcoe_vlan_vif_field_union vif_outer_vlan;
+ __le16 vif_outer_eth_type;
+ union fcoe_vlan_vif_field_union inner_vlan;
+ __le16 inner_eth_type;
+};
+
+struct pstorm_fcoe_conn_st_ctx {
+ u8 func_mode;
+ u8 cos;
+ u8 conf_version;
+ u8 rsrv;
+ __le16 stat_ram_addr;
+ __le16 mss;
+ struct regpair abts_cleanup_addr;
+ struct pstorm_fcoe_eth_context_section eth;
+ u8 sid_2;
+ u8 sid_1;
+ u8 sid_0;
+ u8 flags;
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4
+ u8 did_2;
+ u8 did_1;
+ u8 did_0;
+ u8 src_mac_index;
+ __le16 rec_rr_tov_val;
+ u8 q_relative_offset;
+ u8 reserved1;
+};
+
+struct xstorm_fcoe_conn_st_ctx {
+ u8 func_mode;
+ u8 src_mac_index;
+ u8 conf_version;
+ u8 cached_wqes_avail;
+ __le16 stat_ram_addr;
+ u8 flags;
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
+ u8 cached_wqes_offset;
+ u8 reserved2;
+ u8 eth_hdr_size;
+ u8 seq_id;
+ u8 max_conc_seqs;
+ __le16 num_pages_in_pbl;
+ __le16 reserved;
+ struct regpair sq_pbl_addr;
+ struct regpair sq_curr_page_addr;
+ struct regpair sq_next_page_addr;
+ struct regpair xferq_pbl_addr;
+ struct regpair xferq_curr_page_addr;
+ struct regpair xferq_next_page_addr;
+ struct regpair respq_pbl_addr;
+ struct regpair respq_curr_page_addr;
+ struct regpair respq_next_page_addr;
+ __le16 mtu;
+ __le16 tx_max_fc_pay_len;
+ __le16 max_fc_payload_len;
+ __le16 min_frame_size;
+ __le16 sq_pbl_next_index;
+ __le16 respq_pbl_next_index;
+ u8 fcp_cmd_byte_credit;
+ u8 fcp_rsp_byte_credit;
+ __le16 protection_info;
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+ __le16 xferq_pbl_next_index;
+ __le16 page_size;
+ u8 mid_seq;
+ u8 fcp_xfer_byte_credit;
+ u8 reserved1[2];
+ struct fcoe_wqe cached_wqes[16];
+};
+
+struct xstorm_fcoe_conn_ag_ctx {
+ u8 reserved0;
+ u8 fcoe_state;
+ u8 flags0;
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+ u8 flags2;
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 xferq_prod;
+ __le16 xferq_cons;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 remain_io;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 respq_prod;
+ __le16 respq_cons;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+};
+
+struct ustorm_fcoe_conn_st_ctx {
+ struct regpair respq_pbl_addr;
+ __le16 num_pages_in_pbl;
+ u8 ptu_log_page_size;
+ u8 log_page_size;
+ __le16 respq_prod;
+ u8 reserved[2];
+};
+
+struct tstorm_fcoe_conn_ag_ctx {
+ u8 reserved0;
+ u8 fcoe_state;
+ u8 flags0;
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ustorm_fcoe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct tstorm_fcoe_conn_st_ctx {
+ __le16 stat_ram_addr;
+ __le16 rx_max_fc_payload_len;
+ __le16 e_d_tov_val;
+ u8 flags;
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
+ u8 timers_cleanup_invocation_cnt;
+ __le32 reserved1[2];
+ __le32 dst_mac_address_bytes0to3;
+ __le16 dst_mac_address_bytes4to5;
+ __le16 ramrod_echo;
+ u8 flags1;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
+ u8 q_relative_offset;
+ u8 bdq_resource_id;
+ u8 reserved0[5];
+};
+
+struct mstorm_fcoe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
+ __le16 xfer_prod;
+ __le16 reserved1;
+ u8 protection_info;
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_MASK 0x1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_SHIFT 1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_MASK 0x3F
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_SHIFT 2
+ u8 q_relative_offset;
+ u8 reserved2[2];
+};
+
+struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
+ __le16 conn_id;
+ __le16 stat_ram_addr;
+ __le16 num_pages_in_pbl;
+ u8 ptu_log_page_size;
+ u8 log_page_size;
+ __le16 unsolicited_cq_count;
+ __le16 cmdq_count;
+ u8 bdq_resource_id;
+ u8 reserved0[3];
+ struct regpair xferq_pbl_addr;
+ struct regpair reserved1;
+ struct regpair reserved2[3];
+};
+
+struct mstorm_fcoe_conn_st_ctx {
+ struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
+ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
+};
+
+struct fcoe_conn_context {
+ struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
+ struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
+ struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct regpair xstorm_ag_padding[6];
+ struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
+ struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
+};
+
+struct fcoe_conn_offload_ramrod_params {
+ struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
+};
+
+struct fcoe_conn_terminate_ramrod_params {
+ struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
+};
+
+enum fcoe_event_type {
+ FCOE_EVENT_INIT_FUNC,
+ FCOE_EVENT_DESTROY_FUNC,
+ FCOE_EVENT_STAT_FUNC,
+ FCOE_EVENT_OFFLOAD_CONN,
+ FCOE_EVENT_TERMINATE_CONN,
+ FCOE_EVENT_ERROR,
+ MAX_FCOE_EVENT_TYPE
+};
+
+struct fcoe_init_ramrod_params {
+ struct fcoe_init_func_ramrod_data init_ramrod_data;
+};
+
+enum fcoe_ramrod_cmd_id {
+ FCOE_RAMROD_CMD_ID_INIT_FUNC,
+ FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
+ FCOE_RAMROD_CMD_ID_STAT_FUNC,
+ FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
+ FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
+ MAX_FCOE_RAMROD_CMD_ID
+};
+
+struct fcoe_stat_ramrod_params {
+ struct fcoe_stat_ramrod_data stat_ramrod_data;
+};
+
+struct ystorm_fcoe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
struct ystorm_iscsi_conn_st_ctx {
__le32 reserved[4];
};
@@ -8411,6 +9204,7 @@ struct public_func {
#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
@@ -8505,6 +9299,13 @@ struct lan_stats_stc {
u32 rserved;
};
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
struct ocbb_data_stc {
u32 ocbb_host_addr;
u32 ocsd_host_addr;
@@ -8578,6 +9379,7 @@ union drv_union_data {
struct drv_version_stc drv_version;
struct lan_stats_stc lan_stats;
+ struct fcoe_stats_stc fcoe_stats;
struct ocbb_data_stc ocbb_info;
struct temperature_status_stc temp_info;
struct resource_info resource;
@@ -8881,6 +9683,7 @@ struct nvm_cfg1_glob {
u32 misc_sig;
u32 device_capabilities;
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
u32 power_dissipated;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 6e4fae9b1430..899cad7f97ea 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -817,6 +841,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
if (pq_id > p_hwfn->qm_info.num_pf_rls)
pq_id = p_hwfn->qm_info.offload_pq;
break;
+ case PROTOCOLID_FCOE:
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
default:
pq_id = 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index d01557092868..9277264d2e65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_HW_H
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 23e455f22adc..d891a6852695 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d567ba94c8d1..243b64e0d4dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index 1e832049983d..555dd086796d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_INIT_OPS_H
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index c68dbf7092b1..84310b60849b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 0948be64dc78..0ae0bb4593ef 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_INT_H
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 17a70122df05..3a44d6b395fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index 67c25f3db4d5..20c187f4ed0b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_ISCSI_H
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 6a3727c4c0c6..df932be5a4e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -74,6 +98,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->cid = cid;
p_cid->vf_qid = vf_qid;
p_cid->rel = *p_params;
+ p_cid->p_owner = p_hwfn;
/* Don't try calculating the absolute indices for VFs */
if (IS_VF(p_hwfn->cdev)) {
@@ -189,6 +214,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->vport_id = abs_vport_id;
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
+ p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
p_ramrod->untagged = p_params->only_untagged;
@@ -248,76 +274,103 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
- struct qed_rss_params *p_params)
+ struct qed_rss_params *p_rss)
{
- struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
- u16 abs_l2_queue = 0, capabilities = 0;
- int rc = 0, i;
+ struct eth_vport_rss_config *p_config;
+ u16 capabilities = 0;
+ int i, table_size;
+ int rc = 0;
- if (!p_params) {
+ if (!p_rss) {
p_ramrod->common.update_rss_flg = 0;
return rc;
}
+ p_config = &p_ramrod->rss_config;
- BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
- ETH_RSS_IND_TABLE_ENTRIES_NUM);
+ BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
- rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+ rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
if (rc)
return rc;
- p_ramrod->common.update_rss_flg = p_params->update_rss_config;
- rss->update_rss_capabilities = p_params->update_rss_capabilities;
- rss->update_rss_ind_table = p_params->update_rss_ind_table;
- rss->update_rss_key = p_params->update_rss_key;
+ p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+ p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+ p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+ p_config->update_rss_key = p_rss->update_rss_key;
- rss->rss_mode = p_params->rss_enable ?
- ETH_VPORT_RSS_MODE_REGULAR :
- ETH_VPORT_RSS_MODE_DISABLED;
+ p_config->rss_mode = p_rss->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR :
+ ETH_VPORT_RSS_MODE_DISABLED;
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV4));
+ !!(p_rss->rss_caps & QED_RSS_IPV4));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV6));
+ !!(p_rss->rss_caps & QED_RSS_IPV6));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+ !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+ !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+ !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
- rss->tbl_size = p_params->rss_table_size_log;
+ !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
+ p_config->tbl_size = p_rss->rss_table_size_log;
- rss->capabilities = cpu_to_le16(capabilities);
+ p_config->capabilities = cpu_to_le16(capabilities);
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
p_ramrod->common.update_rss_flg,
- rss->rss_mode, rss->update_rss_capabilities,
- capabilities, rss->update_rss_ind_table,
- rss->update_rss_key);
+ p_config->rss_mode,
+ p_config->update_rss_capabilities,
+ p_config->capabilities,
+ p_config->update_rss_ind_table, p_config->update_rss_key);
- for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
- rc = qed_fw_l2_queue(p_hwfn,
- (u8)p_params->rss_ind_table[i],
- &abs_l2_queue);
- if (rc)
- return rc;
+ table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+ if (!p_queue)
+ return -EINVAL;
- rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
- i, rss->indirection_table[i]);
+ p_config->indirection_table[i] =
+ cpu_to_le16(p_queue->abs.queue_id);
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ le16_to_cpu(p_config->indirection_table[i]),
+ le16_to_cpu(p_config->indirection_table[i + 1]),
+ le16_to_cpu(p_config->indirection_table[i + 2]),
+ le16_to_cpu(p_config->indirection_table[i + 3]),
+ le16_to_cpu(p_config->indirection_table[i + 4]),
+ le16_to_cpu(p_config->indirection_table[i + 5]),
+ le16_to_cpu(p_config->indirection_table[i + 6]),
+ le16_to_cpu(p_config->indirection_table[i + 7]),
+ le16_to_cpu(p_config->indirection_table[i + 8]),
+ le16_to_cpu(p_config->indirection_table[i + 9]),
+ le16_to_cpu(p_config->indirection_table[i + 10]),
+ le16_to_cpu(p_config->indirection_table[i + 11]),
+ le16_to_cpu(p_config->indirection_table[i + 12]),
+ le16_to_cpu(p_config->indirection_table[i + 13]),
+ le16_to_cpu(p_config->indirection_table[i + 14]),
+ le16_to_cpu(p_config->indirection_table[i + 15]));
}
for (i = 0; i < 10; i++)
- rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+ p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
return rc;
}
@@ -1729,13 +1782,31 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
int max_vf_mac_filters = 0;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- for_each_hwfn(cdev, i)
- info->num_queues +=
- FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
- if (cdev->int_params.fp_msix_cnt)
- info->num_queues =
- min_t(u8, info->num_queues,
- cdev->int_params.fp_msix_cnt);
+ u16 num_queues = 0;
+
+ /* Since the feature controls only queue-zones,
+ * make sure we have the contexts [rx, tx, xdp] to
+ * match.
+ */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ u16 l2_queues = (u16)FEAT_NUM(hwfn,
+ QED_PF_L2_QUE);
+ u16 cids;
+
+ cids = hwfn->pf_params.eth_pf_params.num_cons;
+ num_queues += min_t(u16, l2_queues, cids / 3);
+ }
+
+ /* queues might theoretically be >256, but interrupts'
+ * upper-limit guarantes that it would fit in a u8.
+ */
+ if (cdev->int_params.fp_msix_cnt) {
+ u8 irqs = cdev->int_params.fp_msix_cnt;
+
+ info->num_queues = (u8)min_t(u16,
+ num_queues, irqs);
+ }
} else {
info->num_queues = cdev->num_hwfns;
}
@@ -1776,7 +1847,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
qed_fill_dev_info(cdev, &info->common);
if (IS_VF(cdev))
- memset(info->common.hw_mac, 0, ETH_ALEN);
+ eth_zero_addr(info->common.hw_mac);
return 0;
}
@@ -1816,6 +1887,7 @@ static int qed_start_vport(struct qed_dev *cdev,
start.drop_ttl0 = params->drop_ttl0;
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.handle_ptp_pkts = params->handle_ptp_pkts;
start.vport_id = params->vport_id;
start.max_buffers_per_cqe = 16;
start.mtu = params->mtu;
@@ -1857,18 +1929,84 @@ static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
return 0;
}
+static int qed_update_vport_rss(struct qed_dev *cdev,
+ struct qed_update_vport_rss_params *input,
+ struct qed_rss_params *rss)
+{
+ int i, fn;
+
+ /* Update configuration with what's correct regardless of CMT */
+ rss->update_rss_config = 1;
+ rss->rss_enable = 1;
+ rss->update_rss_capabilities = 1;
+ rss->update_rss_ind_table = 1;
+ rss->update_rss_key = 1;
+ rss->rss_caps = input->rss_caps;
+ memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+ if (cdev->num_hwfns == 1) {
+ memcpy(rss->rss_ind_table,
+ input->rss_ind_table,
+ QED_RSS_IND_TABLE_SIZE * sizeof(void *));
+ rss->rss_table_size_log = 7;
+ return 0;
+ }
+
+ /* Start by copying the non-spcific information to the 2nd copy */
+ memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
+
+ /* CMT should be round-robin */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ struct qed_queue_cid *cid = input->rss_ind_table[i];
+ struct qed_rss_params *t_rss;
+
+ if (cid->p_owner == QED_LEADING_HWFN(cdev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
+ }
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(cdev, fn) {
+ for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+ if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
+ DP_VERBOSE(cdev, NETIF_MSG_IFUP,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ return -EINVAL;
+ }
+ rss[fn].rss_table_size_log = 6;
+ }
+
+ return 0;
+}
+
static int qed_update_vport(struct qed_dev *cdev,
struct qed_update_vport_params *params)
{
struct qed_sp_vport_update_params sp_params;
- struct qed_rss_params sp_rss_params;
- int rc, i;
+ struct qed_rss_params *rss;
+ int rc = 0, i;
if (!cdev)
return -ENODEV;
+ rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
+ if (!rss)
+ return -ENOMEM;
+
memset(&sp_params, 0, sizeof(sp_params));
- memset(&sp_rss_params, 0, sizeof(sp_rss_params));
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
@@ -1882,66 +2020,24 @@ static int qed_update_vport(struct qed_dev *cdev,
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
- /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
- * We need to re-fix the rss values per engine for CMT.
- */
- if (cdev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss = &params->rss_params;
- int k, max = 0;
-
- /* Find largest entry, since it's possible RSS needs to
- * be disabled [in case only 1 queue per-hwfn]
- */
- for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
- max = (max > rss->rss_ind_table[k]) ?
- max : rss->rss_ind_table[k];
-
- /* Either fix RSS values or disable RSS */
- if (cdev->num_hwfns < max + 1) {
- int divisor = (max + cdev->num_hwfns - 1) /
- cdev->num_hwfns;
-
- DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "CMT - fixing RSS values (modulo %02x)\n",
- divisor);
-
- for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
- rss->rss_ind_table[k] =
- rss->rss_ind_table[k] % divisor;
- } else {
- DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ /* Prepare the RSS configuration */
+ if (params->update_rss_flg)
+ if (qed_update_vport_rss(cdev, &params->rss_params, rss))
params->update_rss_flg = 0;
- }
- }
-
- /* Now, update the RSS configuration for actual configuration */
- if (params->update_rss_flg) {
- sp_rss_params.update_rss_config = 1;
- sp_rss_params.rss_enable = 1;
- sp_rss_params.update_rss_capabilities = 1;
- sp_rss_params.update_rss_ind_table = 1;
- sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = params->rss_params.rss_caps;
- sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
- memcpy(sp_rss_params.rss_ind_table,
- params->rss_params.rss_ind_table,
- QED_RSS_IND_TABLE_SIZE * sizeof(u16));
- memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
- QED_RSS_KEY_SIZE * sizeof(u32));
- sp_params.rss_params = &sp_rss_params;
- }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (params->update_rss_flg)
+ sp_params.rss_params = &rss[i];
+
sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = qed_sp_vport_update(p_hwfn, &sp_params,
QED_SPQ_MODE_EBLOCK,
NULL);
if (rc) {
DP_ERR(cdev, "Failed to update VPORT\n");
- return rc;
+ goto out;
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
@@ -1950,7 +2046,9 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg);
}
- return 0;
+out:
+ vfree(rss);
+ return rc;
}
static int qed_start_rxq(struct qed_dev *cdev,
@@ -2114,11 +2212,14 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
QED_ACCEPT_MCAST_MATCHED |
QED_ACCEPT_BCAST;
- if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
QED_ACCEPT_MCAST_UNMATCHED;
- else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+ accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ }
return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
QED_SPQ_MODE_CB, NULL);
@@ -2229,6 +2330,8 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
#endif
+extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
#ifdef CONFIG_QED_SRIOV
@@ -2237,6 +2340,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
#ifdef CONFIG_DCB
.dcb = &qed_dcbnl_ops_pass,
#endif
+ .ptp = &qed_ptp_ops_pass,
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
.check_mac = &qed_check_mac,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 48c9bfc28140..e763abd334f6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_L2_H
#define _QED_L2_H
@@ -15,6 +39,20 @@
#include "qed.h"
#include "qed_hw.h"
#include "qed_sp.h"
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
struct qed_sge_tpa_params {
u8 max_buffers_per_cqe;
@@ -118,6 +156,7 @@ struct qed_sp_vport_start_params {
enum qed_tpa_mode tpa_mode;
bool remove_inner_vlan;
bool tx_switching;
+ bool handle_ptp_pkts;
bool only_untagged;
bool drop_ttl0;
u8 max_buffers_per_cqe;
@@ -132,18 +171,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
-struct qed_rss_params {
- u8 update_rss_config;
- u8 rss_enable;
- u8 rss_eng_id;
- u8 update_rss_capabilities;
- u8 update_rss_ind_table;
- u8 update_rss_key;
- u8 rss_caps;
- u8 rss_table_size_log;
- u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
- u32 rss_key[QED_RSS_KEY_SIZE];
-};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
@@ -263,6 +290,8 @@ struct qed_queue_cid {
/* Legacy VFs might have Rx producer located elsewhere */
bool b_legacy_vf;
+
+ struct qed_hwfn *p_owner;
};
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 8e5cb7605b0f..9a0b9af10a57 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1,10 +1,33 @@
/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -297,7 +320,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -309,7 +332,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->gsi_enable)
+ if (p_ll2_conn->conn.gsi_enable)
qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->
my_id,
@@ -378,7 +401,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
spin_unlock_irqrestore(&p_tx->lock, flags);
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->gsi_enable)
+ if (p_ll2_conn->conn.gsi_enable)
qed_ll2b_complete_tx_gsi_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
@@ -550,7 +573,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -738,7 +761,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
l4_hdr_offset_w,
- p_ll2_conn->tx_dest, 0,
+ p_ll2_conn->conn.tx_dest, 0,
first_frag,
p_buffer->packet_length,
p_buffer, true);
@@ -858,7 +881,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
u16 buf_idx;
int rc = 0;
- if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return rc;
if (!rx_num_ooo_buffers)
@@ -901,7 +924,7 @@ static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -913,7 +936,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
{
struct qed_ooo_buffer *p_buffer;
- if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -945,23 +968,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_info *ll2_info;
+ struct qed_ll2_conn ll2_info;
int rc;
- ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
- if (!ll2_info)
- return -ENOMEM;
- ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
- ll2_info->mtu = params->mtu;
- ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
- ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
- ll2_info->tx_tc = OOO_LB_TC;
- ll2_info->tx_dest = CORE_TX_DEST_LB;
-
- rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+ ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
+ ll2_info.mtu = params->mtu;
+ ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info.tx_tc = OOO_LB_TC;
+ ll2_info.tx_dest = CORE_TX_DEST_LB;
+
+ rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
handle);
- kfree(ll2_info);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
goto out;
@@ -1006,7 +1025,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
u8 action_on_error)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1032,7 +1051,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
DMA_REGPAIR_LE(p_ramrod->bd_base,
p_rx->rxq_chain.p_phys_addr);
cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1040,8 +1059,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
- p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
: 1;
@@ -1056,14 +1075,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
}
p_ramrod->action_on_error.error_type = action_on_error;
- p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1075,7 +1094,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1096,7 +1115,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@@ -1106,11 +1125,14 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = p_ll2_conn->tx_tc;
+ pq_params.core.tc = p_ll2_conn->conn.tx_tc;
pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
switch (conn_type) {
+ case QED_LL2_TYPE_FCOE:
+ p_ramrod->conn_type = PROTOCOLID_FCOE;
+ break;
case QED_LL2_TYPE_ISCSI:
case QED_LL2_TYPE_ISCSI_OOO:
p_ramrod->conn_type = PROTOCOLID_ISCSI;
@@ -1123,7 +1145,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
}
- p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1224,7 +1246,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn_type, rx_num_desc);
+ p_ll2_info->conn.conn_type, rx_num_desc);
out:
return rc;
@@ -1262,7 +1284,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn_type, tx_num_desc);
+ p_ll2_info->conn.conn_type, tx_num_desc);
out:
if (rc)
@@ -1273,7 +1295,7 @@ out:
}
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_params,
+ struct qed_ll2_conn *p_params,
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle)
@@ -1302,15 +1324,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (!p_ll2_info)
return -EBUSY;
- p_ll2_info->conn_type = p_params->conn_type;
- p_ll2_info->mtu = p_params->mtu;
- p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
- p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
- p_ll2_info->tx_tc = p_params->tx_tc;
- p_ll2_info->tx_dest = p_params->tx_dest;
- p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
- p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
- p_ll2_info->gsi_enable = p_params->gsi_enable;
+ p_ll2_info->conn = *p_params;
rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
if (rc)
@@ -1371,9 +1385,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
SET_FIELD(action_on_error,
CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
- p_ll2_conn->ai_err_packet_too_big);
+ p_ll2_conn->conn.ai_err_packet_too_big);
SET_FIELD(action_on_error,
- CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+ CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
}
@@ -1447,6 +1461,15 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+ qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ 0x8906, 0,
+ QED_LLH_FILTER_ETHERTYPE);
+ qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ 0x8914, 0,
+ QED_LLH_FILTER_ETHERTYPE);
+ }
+
return rc;
}
@@ -1600,7 +1623,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
p_ll2->queue_id,
p_ll2->cid,
- p_ll2->conn_type,
+ p_ll2->conn.conn_type,
prod_idx,
first_frag_len,
num_of_bds,
@@ -1676,7 +1699,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id,
- p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+ p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
}
int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1817,9 +1840,18 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+ qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ 0x8906, 0,
+ QED_LLH_FILTER_ETHERTYPE);
+ qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ 0x8914, 0,
+ QED_LLH_FILTER_ETHERTYPE);
+ }
+
return rc;
}
@@ -1993,7 +2025,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
- struct qed_ll2_info ll2_info;
+ struct qed_ll2_conn ll2_info;
struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
@@ -2028,6 +2060,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+ case QED_PCI_FCOE:
+ conn_type = QED_LL2_TYPE_FCOE;
+ gsi_enable = 0;
+ break;
case QED_PCI_ISCSI:
conn_type = QED_LL2_TYPE_ISCSI;
gsi_enable = 0;
@@ -2041,6 +2077,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
/* Prepare the temporary ll2 information */
memset(&ll2_info, 0, sizeof(ll2_info));
+
ll2_info.conn_type = conn_type;
ll2_info.mtu = params->mtu;
ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2120,7 +2157,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
-
return 0;
release_terminate_all:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 6625a3ae5a33..31a409033c41 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -1,10 +1,33 @@
/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_LL2_H
@@ -31,7 +54,7 @@ enum qed_ll2_roce_flavor_type {
};
enum qed_ll2_conn_type {
- QED_LL2_TYPE_RESERVED,
+ QED_LL2_TYPE_FCOE,
QED_LL2_TYPE_ISCSI,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_ISCSI_OOO,
@@ -112,15 +135,8 @@ struct qed_ll2_tx_queue {
bool b_completing_packet;
};
-struct qed_ll2_info {
- /* Lock protecting the state of LL2 */
- struct mutex mutex;
+struct qed_ll2_conn {
enum qed_ll2_conn_type conn_type;
- u32 cid;
- u8 my_id;
- u8 queue_id;
- u8 tx_stats_id;
- bool b_active;
u16 mtu;
u8 rx_drop_ttl0_flg;
u8 rx_vlan_removal_en;
@@ -128,10 +144,21 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
enum core_error_handle ai_err_packet_too_big;
enum core_error_handle ai_err_no_buf;
+ u8 gsi_enable;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+ struct qed_ll2_conn conn;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
u8 tx_stats_en;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
- u8 gsi_enable;
};
/**
@@ -149,7 +176,7 @@ struct qed_ll2_info {
* @return 0 on success, failure otherwise
*/
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_params,
+ struct qed_ll2_conn *p_params,
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index aeb98d8c5626..eef30a598b40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/stddef.h>
@@ -29,9 +53,11 @@
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_ll2.h"
+#include "qed_fcoe.h"
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
+#include "qed_debug.h"
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
@@ -853,6 +879,17 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
+ /* In case we might support RDMA, don't allow qede to be greedy
+ * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+ */
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
+ QED_PCI_ETH_ROCE) {
+ u16 *num_cons;
+
+ num_cons = &params->eth_pf_params.num_cons;
+ *num_cons = min_t(u16, *num_cons, 192);
+ }
+
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -867,6 +904,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
+ struct qed_ptt *p_ptt;
int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
@@ -881,6 +919,14 @@ static int qed_slowpath_start(struct qed_dev *cdev,
QED_FW_FILE_NAME);
goto err;
}
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (p_ptt) {
+ QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
+ } else {
+ DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
+ goto err;
+ }
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -968,6 +1014,10 @@ err:
if (IS_PF(cdev))
release_firmware(cdev->firmware);
+ if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+
qed_iov_wq_stop(cdev, false);
return rc;
@@ -981,6 +1031,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_ptp_ptt);
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
@@ -1020,6 +1072,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
enum qed_sb_type type)
{
struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
int hwfn_index;
u16 rel_sb_id;
u8 n_hwfns;
@@ -1041,8 +1094,18 @@ static u32 qed_sb_init(struct qed_dev *cdev,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
hwfn_index, rel_sb_id, sb_id);
- rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
- sb_virt_addr, sb_phy_addr, rel_sb_id);
+ if (IS_PF(p_hwfn->cdev)) {
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
+ sb_phy_addr, rel_sb_id);
+ qed_ptt_release(p_hwfn, p_ptt);
+ } else {
+ rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
+ sb_phy_addr, rel_sb_id);
+ }
return rc;
}
@@ -1083,12 +1146,18 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
if (!cdev)
return -ENODEV;
- if (IS_VF(cdev))
- return 0;
-
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
+ /* When VF wants to set link, force it to read the bulletin instead.
+ * This mimics the PF behavior, where a noitification [both immediate
+ * and possible later] would be generated when changing properties.
+ */
+ if (IS_VF(cdev)) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
+ return 0;
+ }
+
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EBUSY;
@@ -1553,6 +1622,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.sb_release = &qed_sb_release,
.simd_handler_config = &qed_simd_handler_config,
.simd_handler_clean = &qed_simd_handler_clean,
+ .dbg_grc = &qed_dbg_grc,
+ .dbg_grc_size = &qed_dbg_grc_size,
.can_link_change = &qed_can_link_change,
.set_link = &qed_set_link,
.get_link = &qed_get_current_link,
@@ -1586,6 +1657,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
+ case QED_MCP_FCOE_STATS:
+ qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
+ break;
default:
DP_ERR(cdev, "Invalid protocol type = %d\n", type);
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 6dd3ce443484..314022df3469 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -168,6 +192,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->lock);
+ spin_lock_init(&p_info->link_lock);
return 0;
@@ -586,6 +611,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
u8 max_bw, min_bw;
u32 status = 0;
+ /* Prevent SW/attentions from doing this at the same time */
+ spin_lock_bh(&p_hwfn->mcp_info->link_lock);
+
p_link = &p_hwfn->mcp_info->link_output;
memset(p_link, 0, sizeof(*p_link));
if (!b_reset) {
@@ -600,7 +628,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link indications\n");
- return;
+ goto out;
}
if (p_hwfn->b_drv_link_init)
@@ -707,6 +735,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
qed_link_update(p_hwfn);
+out:
+ spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
}
int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
@@ -756,9 +786,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return rc;
}
- /* Reset the link status if needed */
- if (!b_up)
- qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+ /* Mimic link-change attention, done for several reasons:
+ * - On reset, there's no guarantee MFW would trigger
+ * an attention.
+ * - On initialization, older MFWs might not indicate link change
+ * during LFA, so we'll never get an UP indication.
+ */
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
return 0;
}
@@ -1098,12 +1132,17 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ *p_proto = QED_PCI_ETH;
+ else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
break;
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
+ case FUNC_MF_CFG_PROTOCOL_FCOE:
+ *p_proto = QED_PCI_FCOE;
+ break;
case FUNC_MF_CFG_PROTOCOL_ROCE:
DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
/* Fallthrough */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 407a2c1830fb..368e88de146c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_MCP_H
@@ -13,6 +37,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/qed/qed_fcoe_if.h>
#include "qed_hsi.h"
struct qed_mcp_link_speed_params {
@@ -460,7 +485,13 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * 2))
struct qed_mcp_info {
+ /* Spinlock used for protecting the access to the MFW mailbox */
spinlock_t lock;
+
+ /* Spinlock used for syncing SW link-changes and link-changes
+ * originating from attention context.
+ */
+ spinlock_t link_lock;
bool block_mb_sending;
u32 public_base;
u32 drv_mb_addr;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 155abcb507fd..7d731c6cb892 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index 7a0670a9a074..4f138fb5f533 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_OOO_H
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
new file mode 100644
index 000000000000..d27aa85da23c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -0,0 +1,323 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hw.h"
+#include "qed_l2.h"
+#include "qed_ptp.h"
+#include "qed_reg_addr.h"
+
+/* 16 nano second time quantas to wait before making a Drift adjustment */
+#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0
+/* Nano seconds to add/subtract when making a Drift adjustment */
+#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28
+/* Add/subtract the Adjustment_Value when making a Drift adjustment */
+#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
+#define QED_TIMESTAMP_MASK BIT(16)
+
+/* Read Rx timestamp */
+static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 val;
+
+ *timestamp = 0;
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
+ if (!(val & QED_TIMESTAMP_MASK)) {
+ DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
+ return -EINVAL;
+ }
+
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
+ *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
+ *timestamp <<= 32;
+ *timestamp |= val;
+
+ /* Reset timestamp register to allow new timestamp */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+ QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+/* Read Tx timestamp */
+static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 val;
+
+ *timestamp = 0;
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
+ if (!(val & QED_TIMESTAMP_MASK)) {
+ DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val);
+ return -EINVAL;
+ }
+
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
+ *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
+ *timestamp <<= 32;
+ *timestamp |= val;
+
+ /* Reset timestamp register to allow new timestamp */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+/* Read Phy Hardware Clock */
+static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 temp = 0;
+
+ temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
+ *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
+ *phc_cycles <<= 32;
+ *phc_cycles |= temp;
+
+ return 0;
+}
+
+/* Filter PTP protocol packets that need to be timestamped */
+static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev,
+ enum qed_ptp_filter_type type)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 rule_mask, parm_mask;
+
+ switch (type) {
+ case QED_PTP_FILTER_L2_IPV4_IPV6:
+ parm_mask = 0x6AA;
+ rule_mask = 0x3EEE;
+ break;
+ case QED_PTP_FILTER_L2:
+ parm_mask = 0x6BF;
+ rule_mask = 0x3EFF;
+ break;
+ case QED_PTP_FILTER_IPV4_IPV6:
+ parm_mask = 0x7EA;
+ rule_mask = 0x3FFE;
+ break;
+ case QED_PTP_FILTER_IPV4:
+ parm_mask = 0x7EE;
+ rule_mask = 0x3FFE;
+ break;
+ default:
+ DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type);
+ return -EINVAL;
+ }
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1);
+
+ /* Reset possibly old timestamps */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+ QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
+ * FW/HW accepts the adjustment value in terms of 3 parameters:
+ * Drift period - adjustment happens once in certain number of nano seconds.
+ * Drift value - time is adjusted by a certain value, for example by 5 ns.
+ * Drift direction - add or subtract the adjustment value.
+ * The routine translates ppb into the adjustment triplet in an optimal manner.
+ */
+static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
+{
+ s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 drift_ctr_cfg = 0, drift_state;
+ int drift_dir = 1;
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ drift_dir = 0;
+ }
+
+ if (ppb > 1) {
+ s64 best_dif = ppb, best_approx_dev = 1;
+
+ /* Adjustment value is up to +/-7ns, find an optimal value in
+ * this range.
+ */
+ for (val = 7; val > 0; val--) {
+ period = div_s64(val * 1000000000, ppb);
+ period -= 8;
+ period >>= 4;
+ if (period < 1)
+ period = 1;
+ if (period > 0xFFFFFFE)
+ period = 0xFFFFFFE;
+
+ /* Check both rounding ends for approximate error */
+ approx_dev = period * 16 + 8;
+ dif = ppb * approx_dev - val * 1000000000;
+ dif2 = dif + 16 * ppb;
+
+ if (dif < 0)
+ dif = -dif;
+ if (dif2 < 0)
+ dif2 = -dif2;
+
+ /* Determine which end gives better approximation */
+ if (dif * (approx_dev + 16) > dif2 * approx_dev) {
+ period++;
+ approx_dev += 16;
+ dif = dif2;
+ }
+
+ /* Track best approximation found so far */
+ if (best_dif * approx_dev > dif * best_approx_dev) {
+ best_dif = dif;
+ best_val = val;
+ best_period = period;
+ best_approx_dev = approx_dev;
+ }
+ }
+ } else if (ppb == 1) {
+ /* This is a special case as its the only value which wouldn't
+ * fit in a s64 variable. In order to prevent castings simple
+ * handle it seperately.
+ */
+ best_val = 4;
+ best_period = 0xee6b27f;
+ } else {
+ best_val = 0;
+ best_period = 0xFFFFFFF;
+ }
+
+ drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
+ (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
+ (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
+
+ drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
+ if (drift_state & 1) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
+ drift_ctr_cfg);
+ } else {
+ DP_INFO(p_hwfn, "Drift counter is not reset\n");
+ return -EINVAL;
+ }
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+ return 0;
+}
+
+static int qed_ptp_hw_enable(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+ /* Reset PTP event detection rules - will be configured in the IOCTL */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
+
+ /* Pause free running counter */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
+ /* Resume free running counter */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+
+ /* Disable drift register */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+ /* Reset possibly old timestamps */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+ QED_TIMESTAMP_MASK);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE);
+
+ return 0;
+}
+
+static int qed_ptp_hw_disable(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+ /* Reset PTP event detection rules */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable the PTP feature */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+
+ return 0;
+}
+
+const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
+ .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on,
+ .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters,
+ .read_rx_ts = qed_ptp_hw_read_rx_ts,
+ .read_tx_ts = qed_ptp_hw_read_tx_ts,
+ .read_cc = qed_ptp_hw_read_cc,
+ .adjfreq = qed_ptp_hw_adjfreq,
+ .disable = qed_ptp_hw_disable,
+ .enable = qed_ptp_hw_enable,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
new file mode 100644
index 000000000000..63c666d0b739
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
@@ -0,0 +1,47 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_PTP_H
+#define _QED_PTP_H
+#include <linux/types.h>
+
+int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_ptp_filter_type type);
+int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
+int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
+int qed_ptp_read_cc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u64 *cycles);
+int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb);
+int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 97544205a8c1..d59d9df60cd2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef REG_ADDR_H
@@ -86,6 +110,8 @@
0x1e80000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE \
+ 0x1f0164UL
#define PRS_REG_SEARCH_TCP \
0x1f0400UL
#define PRS_REG_SEARCH_UDP \
@@ -96,6 +122,12 @@
0x1f040cUL
#define PRS_REG_SEARCH_OPENFLOW \
0x1f0434UL
+#define PRS_REG_SEARCH_TAG1 \
+ 0x1f0444UL
+#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
+ 0x1f0a0cUL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
+ 0x1f0410UL
#define TM_REG_PF_ENABLE_CONN \
0x2c043cUL
#define TM_REG_PF_ENABLE_TASK \
@@ -1457,4 +1489,35 @@
#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define NIG_REG_RX_PTP_EN 0x501900UL
+#define NIG_REG_TX_PTP_EN 0x501904UL
+#define NIG_REG_LLH_PTP_TO_HOST 0x501908UL
+#define NIG_REG_LLH_PTP_TO_MCP 0x50190cUL
+#define NIG_REG_PTP_SW_TXTSEN 0x501910UL
+#define NIG_REG_LLH_PTP_ETHERTYPE_1 0x501914UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_LSB 0x501918UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_MSB 0x50191cUL
+#define NIG_REG_LLH_PTP_PARAM_MASK 0x501920UL
+#define NIG_REG_LLH_PTP_RULE_MASK 0x501924UL
+#define NIG_REG_TX_LLH_PTP_PARAM_MASK 0x501928UL
+#define NIG_REG_TX_LLH_PTP_RULE_MASK 0x50192cUL
+#define NIG_REG_LLH_PTP_HOST_BUF_SEQID 0x501930UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_LSB 0x501934UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_MSB 0x501938UL
+#define NIG_REG_LLH_PTP_MCP_BUF_SEQID 0x50193cUL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_LSB 0x501940UL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_MSB 0x501944UL
+#define NIG_REG_TX_LLH_PTP_BUF_SEQID 0x501948UL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_LSB 0x50194cUL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_MSB 0x501950UL
+#define NIG_REG_RX_PTP_TS_MSB_ERR 0x501954UL
+#define NIG_REG_TX_PTP_TS_MSB_ERR 0x501958UL
+#define NIG_REG_TSGEN_SYNC_TIME_LSB 0x5088c0UL
+#define NIG_REG_TSGEN_SYNC_TIME_MSB 0x5088c4UL
+#define NIG_REG_TSGEN_RST_DRIFT_CNTR 0x5088d8UL
+#define NIG_REG_TSGEN_DRIFT_CNTR_CONF 0x5088dcUL
+#define NIG_REG_TS_OUTPUT_ENABLE_PDA 0x508870UL
+#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 2a16547c8966..d9ff6b28591c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -1,5 +1,5 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -948,7 +948,9 @@ static int qed_rdma_create_cq(void *rdma_cxt,
err:
/* release allocated icid */
+ spin_lock_bh(&p_info->lock);
qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ spin_unlock_bh(&p_info->lock);
DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
return rc;
@@ -1766,13 +1768,13 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
if (rc)
goto err_resp;
- dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
- p_resp_ramrod_res, resp_ramrod_res_phys);
-
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+
if (!(qp->req_offloaded)) {
/* Don't send query qp for the requester */
out_params->sq_psn = qp->sq_psn;
@@ -1813,9 +1815,6 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
if (rc)
goto err_req;
- dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
- p_req_ramrod_res, req_ramrod_res_phys);
-
out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
@@ -1823,6 +1822,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+
out_params->draining = false;
if (rq_err_state)
@@ -1847,6 +1849,7 @@ err_resp:
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
u32 start_cid;
@@ -1861,35 +1864,39 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return -EINVAL;
}
- rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
- if (rc)
- return rc;
+ if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+ &num_invalidated_mw);
+ if (rc)
+ return rc;
- /* Send destroy requester ramrod */
- rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
- if (rc)
- return rc;
+ /* Send destroy requester ramrod */
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+ &num_bound_mw);
+ if (rc)
+ return rc;
- if (num_invalidated_mw != num_bound_mw) {
- DP_NOTICE(p_hwfn,
- "number of invalidate memory windows is different from bounded ones\n");
- return -EINVAL;
- }
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ spin_lock_bh(&p_rdma_info->lock);
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_hwfn->p_rdma_info->proto);
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
- /* Release responder's icid */
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
- qp->icid - start_cid);
+ /* Release responder's icid */
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
+ qp->icid - start_cid);
- /* Release requester's icid */
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
- qp->icid + 1 - start_cid);
+ /* Release requester's icid */
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
+ qp->icid + 1 - start_cid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ spin_unlock_bh(&p_rdma_info->lock);
+ }
return 0;
}
@@ -2632,7 +2639,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2;
- struct qed_ll2_info ll2_params;
+ struct qed_ll2_conn ll2_params;
int rc;
if (!params) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 279f342af8db..36cf4b2ab7fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -1,5 +1,5 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
index 48bfaecaf6dc..1bafc05db2b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -1,3 +1,35 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
#include <linux/crc32.h>
#include "qed.h"
#include "qed_dev_api.h"
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 9c897bc68d05..30393ffaa8e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_SP_H
@@ -85,6 +109,10 @@ union ramrod_data {
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq;
struct roce_init_func_ramrod_data roce_init_func;
+ struct fcoe_init_ramrod_params fcoe_init;
+ struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
+ struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
+ struct fcoe_stat_ramrod_params fcoe_stat;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a39ef2e7a9a6..6fb80f9ef446 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
@@ -362,6 +386,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
case QED_PCI_ETH:
p_ramrod->personality = PERSONALITY_ETH;
break;
+ case QED_PCI_FCOE:
+ p_ramrod->personality = PERSONALITY_FCOE;
+ break;
case QED_PCI_ISCSI:
p_ramrod->personality = PERSONALITY_ISCSI;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f022469bdcf8..645328a9f0cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 85b09dd1787a..29ed785f1dc2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1,13 +1,38 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/etherdevice.h>
#include <linux/crc32.h>
+#include <linux/vmalloc.h>
#include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
@@ -806,10 +831,52 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
vf->num_sbs = 0;
}
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iov_vf_init_params *p_params)
{
+ struct qed_mcp_link_capabilities link_caps;
+ struct qed_mcp_link_params link_params;
+ struct qed_mcp_link_state link_state;
u8 num_of_vf_avaiable_chains = 0;
struct qed_vf_info *vf = NULL;
u16 qid, num_irqs;
@@ -898,6 +965,15 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
p_queue->fw_tx_qid, p_queue->fw_cid);
}
+ /* Update the link configuration in bulletin */
+ memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
+ memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (!rc) {
vf->b_init = true;
@@ -909,45 +985,6 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
- u16 vfid,
- struct qed_mcp_link_params *params,
- struct qed_mcp_link_state *link,
- struct qed_mcp_link_capabilities *p_caps)
-{
- struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
- vfid,
- false);
- struct qed_bulletin_content *p_bulletin;
-
- if (!p_vf)
- return;
-
- p_bulletin = p_vf->bulletin.p_virt;
- p_bulletin->req_autoneg = params->speed.autoneg;
- p_bulletin->req_adv_speed = params->speed.advertised_speeds;
- p_bulletin->req_forced_speed = params->speed.forced_speed;
- p_bulletin->req_autoneg_pause = params->pause.autoneg;
- p_bulletin->req_forced_rx = params->pause.forced_rx;
- p_bulletin->req_forced_tx = params->pause.forced_tx;
- p_bulletin->req_loopback = params->loopback_mode;
-
- p_bulletin->link_up = link->link_up;
- p_bulletin->speed = link->speed;
- p_bulletin->full_duplex = link->full_duplex;
- p_bulletin->autoneg = link->an;
- p_bulletin->autoneg_complete = link->an_complete;
- p_bulletin->parallel_detection = link->parallel_detection;
- p_bulletin->pfc_enabled = link->pfc_enabled;
- p_bulletin->partner_adv_speed = link->partner_adv_speed;
- p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
- p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
- p_bulletin->partner_adv_pause = link->partner_adv_pause;
- p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
- p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 rel_vf_id)
{
@@ -1199,7 +1236,10 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
return;
/* Clear the VF mac */
- memset(vf_info->mac, 0, ETH_ALEN);
+ eth_zero_addr(vf_info->mac);
+
+ vf_info->rx_accept_mode = 0;
+ vf_info->tx_accept_mode = 0;
}
static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
@@ -2294,12 +2334,14 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
struct qed_sp_vport_update_params *p_data,
struct qed_rss_params *p_rss,
- struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+ struct qed_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 i, q_idx, max_q_idx;
+ bool b_reject = false;
u16 table_size;
+ u16 i, q_idx;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2323,34 +2365,39 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
p_rss->rss_eng_id = vf->relative_vf_id + 1;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
- max_q_idx = ARRAY_SIZE(vf->vf_queues);
-
for (i = 0; i < table_size; i++) {
- u16 index = vf->vf_queues[0].fw_rx_qid;
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx)
- DP_NOTICE(p_hwfn,
- "rss_ind_table[%d] = %d, rxq is out of range\n",
- i, q_idx);
- else if (!vf->vf_queues[q_idx].p_rx_cid)
- DP_NOTICE(p_hwfn,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- else
- index = vf->vf_queues[q_idx].fw_rx_qid;
- p_rss->rss_ind_table[i] = index;
+ if (!vf->vf_queues[q_idx].p_rx_cid) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Omitting RSS due to inactive queue %08x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
}
p_data->rss_params = p_rss;
+out:
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
}
static void
@@ -2401,16 +2448,49 @@ qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
}
+static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
+ u8 vfid,
+ struct qed_sp_vport_update_params *params,
+ u16 *tlvs)
+{
+ u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+ struct qed_filter_accept_flags *flags = &params->accept_flags;
+ struct qed_public_vf_info *vf_info;
+
+ /* Untrusted VFs can't even be trusted to know that fact.
+ * Simply indicate everything is configured fine, and trace
+ * configuration 'behind their back'.
+ */
+ if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
+ return 0;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+ if (flags->update_rx_mode_config) {
+ vf_info->rx_accept_mode = flags->rx_accept_filter;
+ if (!vf_info->is_trusted_configured)
+ flags->rx_accept_filter &= ~mask;
+ }
+
+ if (flags->update_tx_mode_config) {
+ vf_info->tx_accept_mode = flags->tx_accept_filter;
+ if (!vf_info->is_trusted_configured)
+ flags->tx_accept_filter &= ~mask;
+ }
+
+ return 0;
+}
+
static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
+ struct qed_rss_params *p_rss_params = NULL;
struct qed_sp_vport_update_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct qed_sge_tpa_params sge_tpa_params;
- struct qed_rss_params rss_params;
+ u16 tlvs_mask = 0, tlvs_accepted = 0;
u8 status = PFVF_STATUS_SUCCESS;
- u16 tlvs_mask = 0;
u16 length;
int rc;
@@ -2423,6 +2503,11 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
status = PFVF_STATUS_FAILURE;
goto out;
}
+ p_rss_params = vzalloc(sizeof(*p_rss_params));
+ if (p_rss_params == NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
memset(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
@@ -2437,20 +2522,33 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
- qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
- mbx, &tlvs_mask);
qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
&sge_tpa_params, mbx, &tlvs_mask);
- /* Just log a message if there is no single extended tlv in buffer.
- * When all features of vport update ramrod would be requested by VF
- * as extended TLVs in buffer then an error can be returned in response
- * if there is no extended TLV present in buffer.
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
*/
- if (!tlvs_mask) {
- DP_NOTICE(p_hwfn,
- "No feature tlvs found for vport update\n");
+ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
+ if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
+ &params, &tlvs_accepted)) {
+ tlvs_accepted = 0;
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (!tlvs_accepted) {
+ if (tlvs_mask)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Upper-layer prevents VF vport configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No feature tlvs found for vport update\n");
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
@@ -2461,8 +2559,9 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
status = PFVF_STATUS_FAILURE;
out:
+ vfree(p_rss_params);
length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
- tlvs_mask, tlvs_mask);
+ tlvs_mask, tlvs_accepted);
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
@@ -2539,8 +2638,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
if (ether_addr_equal(p_vf->shadow_config.macs[i],
p_params->mac)) {
- memset(p_vf->shadow_config.macs[i], 0,
- ETH_ALEN);
+ eth_zero_addr(p_vf->shadow_config.macs[i]);
break;
}
}
@@ -2553,7 +2651,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
} else if (p_params->opcode == QED_FILTER_REPLACE ||
p_params->opcode == QED_FILTER_FLUSH) {
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
- memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+ eth_zero_addr(p_vf->shadow_config.macs[i]);
}
/* List the new MAC address */
@@ -3892,6 +3990,32 @@ static int qed_set_vf_rate(struct qed_dev *cdev,
return 0;
}
+static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
+ DP_NOTICE(hwfn,
+ "SR-IOV sanity check failed, can't set trust\n");
+ return -EINVAL;
+ }
+
+ vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+ if (vf->is_trusted_request == trust)
+ return 0;
+ vf->is_trusted_request = trust;
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
+ }
+
+ return 0;
+}
+
static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
{
u64 events[QED_VF_ARRAY_LENGTH];
@@ -3996,6 +4120,61 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
+static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+{
+ struct qed_sp_vport_update_params params;
+ struct qed_filter_accept_flags *flags;
+ struct qed_public_vf_info *vf_info;
+ struct qed_vf_info *vf;
+ u8 mask;
+ int i;
+
+ mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+ flags = &params.accept_flags;
+
+ qed_for_each_vf(hwfn, i) {
+ /* Need to make sure current requested configuration didn't
+ * flip so that we'll end up configuring something that's not
+ * needed.
+ */
+ vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
+ if (vf_info->is_trusted_configured ==
+ vf_info->is_trusted_request)
+ continue;
+ vf_info->is_trusted_configured = vf_info->is_trusted_request;
+
+ /* Validate that the VF has a configured vport */
+ vf = qed_iov_get_vf_info(hwfn, i, true);
+ if (!vf->vport_instance)
+ continue;
+
+ memset(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+
+ if (vf_info->rx_accept_mode & mask) {
+ flags->update_rx_mode_config = 1;
+ flags->rx_accept_filter = vf_info->rx_accept_mode;
+ }
+
+ if (vf_info->tx_accept_mode & mask) {
+ flags->update_tx_mode_config = 1;
+ flags->tx_accept_filter = vf_info->tx_accept_mode;
+ }
+
+ /* Remove if needed; Otherwise this would set the mask */
+ if (!vf_info->is_trusted_configured) {
+ flags->rx_accept_filter &= ~mask;
+ flags->tx_accept_filter &= ~mask;
+ }
+
+ if (flags->update_rx_mode_config ||
+ flags->update_tx_mode_config)
+ qed_sp_vport_update(hwfn, &params,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ }
+}
+
static void qed_iov_pf_task(struct work_struct *work)
{
@@ -4031,6 +4210,9 @@ static void qed_iov_pf_task(struct work_struct *work)
if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
&hwfn->iov_task_flags))
qed_handle_bulletin_post(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
+ qed_iov_handle_trust_change(hwfn);
}
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
@@ -4093,4 +4275,5 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = {
.set_link_state = &qed_set_vf_link_state,
.set_spoof = &qed_spoof_configure,
.set_rate = &qed_set_vf_rate,
+ .set_trust = &qed_set_vf_trust,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 509c02b4772e..fc08cc2da6a7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_SRIOV_H
@@ -56,6 +80,14 @@ struct qed_public_vf_info {
/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
int tx_rate;
+
+ /* Trusted VFs can configure promiscuous mode.
+ * Also store shadow promisc configuration if needed.
+ */
+ bool is_trusted_configured;
+ bool is_trusted_request;
+ u8 rx_accept_mode;
+ u8 tx_accept_mode;
};
struct qed_iov_vf_init_params {
@@ -221,6 +253,8 @@ enum qed_iov_wq_flag {
QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
QED_IOV_WQ_STOP_WQ_FLAG,
QED_IOV_WQ_FLR_FLAG,
+ QED_IOV_WQ_TRUST_FLAG,
+ QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
};
#ifdef CONFIG_QED_SRIOV
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 60b31a8ede73..15d2855ec563 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#include <linux/crc32.h>
@@ -814,6 +838,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
if (p_params->rss_params) {
struct qed_rss_params *rss_params = p_params->rss_params;
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = qed_add_tlv(p_hwfn,
@@ -836,8 +861,15 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
p_rss_tlv->rss_enable = rss_params->rss_enable;
p_rss_tlv->rss_caps = rss_params->rss_caps;
p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
- memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
- sizeof(rss_params->rss_ind_table));
+
+ table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
sizeof(rss_params->rss_key));
}
@@ -1253,6 +1285,9 @@ void qed_iov_vf_task(struct work_struct *work)
/* Handle bulletin board changes */
qed_vf_read_bulletin(hwfn, &change);
+ if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
+ &hwfn->iov_task_flags))
+ change = 1;
if (change)
qed_handle_bulletin_change(hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 11eb3854e6f2..7da0b165d8bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -1,9 +1,33 @@
/* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#ifndef _QED_VF_H
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 048a230c3ce0..bc5f7c3b277d 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_QEDE) := qede.o
-qede-y := qede_main.o qede_ethtool.o
+qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index c79dc78746fc..f2aaef2cfb86 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -1,11 +1,34 @@
/* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
#ifndef _QEDE_H_
#define _QEDE_H_
#include <linux/compiler.h>
@@ -26,7 +49,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
-#define QEDE_REVISION_VERSION 9
+#define QEDE_REVISION_VERSION 10
#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
@@ -114,6 +137,8 @@ struct qede_rdma_dev {
struct workqueue_struct *roce_wq;
};
+struct qede_ptp;
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -125,8 +150,10 @@ struct qede_dev {
u32 flags;
#define QEDE_FLAG_IS_VF BIT(0)
#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
+#define QEDE_TX_TIMESTAMPING_EN BIT(1)
const struct qed_eth_ops *ops;
+ struct qede_ptp *ptp;
struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
@@ -141,6 +168,7 @@ struct qede_dev {
u16 num_queues;
#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
+#define QEDE_RX_QUEUE_IDX(edev, i) (i)
#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info;
@@ -171,7 +199,10 @@ struct qede_dev {
#define QEDE_RSS_KEY_INITED BIT(1)
#define QEDE_RSS_CAPS_INITED BIT(2)
u32 rss_params_inited; /* bit-field to track initialized rss params */
- struct qed_update_vport_rss_params rss_params;
+ u16 rss_ind_table[128];
+ u32 rss_key[10];
+ u8 rss_caps;
+
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
@@ -257,7 +288,7 @@ struct qede_rx_queue {
u16 sw_rx_cons;
u16 sw_rx_prod;
- u16 num_rx_buffers; /* Slowpath */
+ u16 filled_buffers;
u8 data_direction;
u8 rxq_id;
@@ -270,6 +301,9 @@ struct qede_rx_queue {
struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring ____cacheline_aligned;
+ /* Used once per each NAPI run */
+ u16 num_rx_buffers;
+
/* GRO */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
@@ -385,9 +419,42 @@ struct qede_reload_args {
} u;
};
+/* Datapath functions definition */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+netdev_features_t qede_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
+int qede_free_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq, int *len);
+int qede_poll(struct napi_struct *napi, int budget);
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
+
+/* Filtering function definitions */
+void qede_force_mac(void *dev, u8 *mac, bool forced);
+int qede_set_mac_addr(struct net_device *ndev, void *p);
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
+int qede_configure_vlan_filters(struct qede_dev *edev);
+
+int qede_set_features(struct net_device *dev, netdev_features_t features);
+void qede_set_rx_mode(struct net_device *ndev);
+void qede_config_rx_mode(struct net_device *ndev);
+void qede_fill_rss_params(struct qede_dev *edev,
+ struct qed_update_vport_rss_params *rss, u8 *update);
+
+void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
+void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
+
+int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+
#ifdef CONFIG_DCB
void qede_set_dcbnl_ops(struct net_device *ndev);
#endif
+
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 1c48f445c93b..897953133245 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1,11 +1,34 @@
/* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
@@ -14,7 +37,9 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/capability.h>
+#include <linux/vmalloc.h>
#include "qede.h"
+#include "qede_ptp.h"
#define QEDE_RQSTAT_OFFSET(stat_name) \
(offsetof(struct qede_rx_queue, stat_name))
@@ -908,8 +933,7 @@ static int qede_set_channels(struct net_device *dev,
/* Reset the indirection table if rx queue count is updated */
if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
- memset(&edev->rss_params.rss_ind_table, 0,
- sizeof(edev->rss_params.rss_ind_table));
+ memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table));
}
qede_reload(edev, NULL, false);
@@ -917,6 +941,14 @@ static int qede_set_channels(struct net_device *dev,
return 0;
}
+static int qede_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return qede_ptp_get_ts_info(edev, info);
+}
+
static int qede_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
@@ -955,11 +987,11 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+ if (edev->rss_caps & QED_RSS_IPV4_UDP)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+ if (edev->rss_caps & QED_RSS_IPV6_UDP)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case IPV4_FLOW:
@@ -992,8 +1024,9 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
{
- struct qed_update_vport_params vport_update_params;
+ struct qed_update_vport_params *vport_update_params;
u8 set_caps = 0, clr_caps = 0;
+ int rc = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Set rss flags command parameters: flow type = %d, data = %llu\n",
@@ -1068,27 +1101,29 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
}
/* No action is needed if there is no change in the rss capability */
- if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
- ~clr_caps) | set_caps))
+ if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps))
return 0;
/* Update internal configuration */
- edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
- set_caps;
+ edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps);
edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
/* Re-configure if possible */
- if (netif_running(edev->ndev)) {
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.update_rss_flg = 1;
- vport_update_params.vport_id = 0;
- memcpy(&vport_update_params.rss_params, &edev->rss_params,
- sizeof(vport_update_params.rss_params));
- return edev->ops->vport_update(edev->cdev,
- &vport_update_params);
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_OPEN) {
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params) {
+ __qede_unlock(edev);
+ return -ENOMEM;
+ }
+ qede_fill_rss_params(edev, &vport_update_params->rss_params,
+ &vport_update_params->update_rss_flg);
+ rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+ vfree(vport_update_params);
}
+ __qede_unlock(edev);
- return 0;
+ return rc;
}
static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
@@ -1113,7 +1148,7 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev)
{
struct qede_dev *edev = netdev_priv(dev);
- return sizeof(edev->rss_params.rss_key);
+ return sizeof(edev->rss_key);
}
static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
@@ -1128,11 +1163,10 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
return 0;
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
- indir[i] = edev->rss_params.rss_ind_table[i];
+ indir[i] = edev->rss_ind_table[i];
if (key)
- memcpy(key, edev->rss_params.rss_key,
- qede_get_rxfh_key_size(dev));
+ memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
return 0;
}
@@ -1140,9 +1174,9 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
- struct qed_update_vport_params vport_update_params;
+ struct qed_update_vport_params *vport_update_params;
struct qede_dev *edev = netdev_priv(dev);
- int i;
+ int i, rc = 0;
if (edev->dev_info.common.num_hwfns > 1) {
DP_INFO(edev,
@@ -1158,27 +1192,30 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
if (indir) {
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
- edev->rss_params.rss_ind_table[i] = indir[i];
+ edev->rss_ind_table[i] = indir[i];
edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
}
if (key) {
- memcpy(&edev->rss_params.rss_key, key,
- qede_get_rxfh_key_size(dev));
+ memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
}
- if (netif_running(edev->ndev)) {
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.update_rss_flg = 1;
- vport_update_params.vport_id = 0;
- memcpy(&vport_update_params.rss_params, &edev->rss_params,
- sizeof(vport_update_params.rss_params));
- return edev->ops->vport_update(edev->cdev,
- &vport_update_params);
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_OPEN) {
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params) {
+ __qede_unlock(edev);
+ return -ENOMEM;
+ }
+ qede_fill_rss_params(edev, &vport_update_params->rss_params,
+ &vport_update_params->update_rss_flg);
+ rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+ vfree(vport_update_params);
}
+ __qede_unlock(edev);
- return 0;
+ return rc;
}
/* This function enables the interrupt generation and the NAPI on the device */
@@ -1296,7 +1333,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
- int i, rc = 0;
+ int i, iter, rc = 0;
u8 *data_ptr;
for_each_queue(i) {
@@ -1315,7 +1352,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP.
*/
- for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
if (!qede_has_rx_work(rxq)) {
usleep_range(100, 200);
continue;
@@ -1362,7 +1399,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (i == QEDE_SELFTEST_POLL_COUNT) {
+ if (iter == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
@@ -1558,6 +1595,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_ts_info = qede_get_ts_info,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.self_test = qede_self_test,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
new file mode 100644
index 000000000000..107c3fda4792
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -0,0 +1,759 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/udp_tunnel.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+
+void qede_force_mac(void *dev, u8 *mac, bool forced)
+{
+ struct qede_dev *edev = dev;
+
+ /* MAC hints take effect only if we haven't set one already */
+ if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+ return;
+
+ ether_addr_copy(edev->ndev->dev_addr, mac);
+ ether_addr_copy(edev->primary_mac, mac);
+}
+
+void qede_fill_rss_params(struct qede_dev *edev,
+ struct qed_update_vport_rss_params *rss, u8 *update)
+{
+ bool need_reset = false;
+ int i;
+
+ if (QEDE_RSS_COUNT(edev) <= 1) {
+ memset(rss, 0, sizeof(*rss));
+ *update = 0;
+ return;
+ }
+
+ /* Need to validate current RSS config uses valid entries */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
+ need_reset = true;
+ break;
+ }
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 indir_val, val;
+
+ val = QEDE_RSS_COUNT(edev);
+ indir_val = ethtool_rxfh_indir_default(i, val);
+ edev->rss_ind_table[i] = indir_val;
+ }
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ /* Now that we have the queue-indirection, prepare the handles */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
+
+ rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+ netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+ memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
+
+ if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+ edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+ }
+ rss->rss_caps = edev->rss_caps;
+
+ *update = 1;
+}
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char mac[ETH_ALEN])
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.mac_valid = 1;
+ ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ u16 vid)
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.vlan_valid = 1;
+ filter_cmd.filter.ucast.vlan = vid;
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+ struct qed_update_vport_params *params;
+ int rc;
+
+ /* Proceed only if action actually needs to be performed */
+ if (edev->accept_any_vlan == action)
+ return 0;
+
+ params = vzalloc(sizeof(*params));
+ if (!params)
+ return -ENOMEM;
+
+ params->vport_id = 0;
+ params->accept_any_vlan = action;
+ params->update_accept_any_vlan_flg = 1;
+
+ rc = edev->ops->vport_update(edev->cdev, params);
+ if (rc) {
+ DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+ action ? "enable" : "disable");
+ } else {
+ DP_INFO(edev, "%s accept-any-vlan\n",
+ action ? "enabled" : "disabled");
+ edev->accept_any_vlan = action;
+ }
+
+ vfree(params);
+ return 0;
+}
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan, *tmp;
+ int rc = 0;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan) {
+ DP_INFO(edev, "Failed to allocate struct for vlan\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&vlan->list);
+ vlan->vid = vid;
+ vlan->configured = false;
+
+ /* Verify vlan isn't already configured */
+ list_for_each_entry(tmp, &edev->vlan_list, list) {
+ if (tmp->vid == vlan->vid) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "vlan already configured\n");
+ kfree(vlan);
+ return -EEXIST;
+ }
+ }
+
+ /* If interface is down, cache this VLAN ID and return */
+ __qede_lock(edev);
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, VLAN %d will be configured when interface is up\n",
+ vid);
+ if (vid != 0)
+ edev->non_configured_vlans++;
+ list_add(&vlan->list, &edev->vlan_list);
+ goto out;
+ }
+
+ /* Check for the filter limit.
+ * Note - vlan0 has a reserved filter and can be added without
+ * worrying about quota
+ */
+ if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+ (vlan->vid == 0)) {
+ rc = qede_set_ucast_rx_vlan(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %d\n",
+ vlan->vid);
+ kfree(vlan);
+ goto out;
+ }
+ vlan->configured = true;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0)
+ edev->configured_vlans++;
+ } else {
+ /* Out of quota; Activate accept-any-VLAN mode */
+ if (!edev->non_configured_vlans) {
+ rc = qede_config_accept_any_vlan(edev, true);
+ if (rc) {
+ kfree(vlan);
+ goto out;
+ }
+ }
+
+ edev->non_configured_vlans++;
+ }
+
+ list_add(&vlan->list, &edev->vlan_list);
+
+out:
+ __qede_unlock(edev);
+ return rc;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+ struct qede_vlan *vlan)
+{
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ if (vlan->configured)
+ edev->configured_vlans--;
+ else
+ edev->non_configured_vlans--;
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+ int rc = 0, real_rc = 0, accept_any_vlan = 0;
+ struct qed_dev_eth_info *dev_info;
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return 0;
+
+ dev_info = &edev->dev_info;
+
+ /* Configure non-configured vlans */
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (vlan->configured)
+ continue;
+
+ /* We have used all our credits, now enable accept_any_vlan */
+ if ((vlan->vid != 0) &&
+ (edev->configured_vlans == dev_info->num_vlan_filters)) {
+ accept_any_vlan = 1;
+ continue;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %u\n",
+ vlan->vid);
+ real_rc = rc;
+ continue;
+ }
+
+ vlan->configured = true;
+ /* vlan0 filter doesn't consume our VLAN filter's quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans--;
+ edev->configured_vlans++;
+ }
+ }
+
+ /* enable accept_any_vlan mode if we have more VLANs than credits,
+ * or remove accept_any_vlan mode if we've actually removed
+ * a non-configured vlan, and all remaining vlans are truly configured.
+ */
+
+ if (accept_any_vlan)
+ rc = qede_config_accept_any_vlan(edev, true);
+ else if (!edev->non_configured_vlans)
+ rc = qede_config_accept_any_vlan(edev, false);
+
+ if (rc && !real_rc)
+ real_rc = rc;
+
+ return real_rc;
+}
+
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan = NULL;
+ int rc = 0;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+ /* Find whether entry exists */
+ __qede_lock(edev);
+ list_for_each_entry(vlan, &edev->vlan_list, list)
+ if (vlan->vid == vid)
+ break;
+
+ if (!vlan || (vlan->vid != vid)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Vlan isn't configured\n");
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ /* As interface is already down, we don't have a VPORT
+ * instance to remove vlan filter. So just update vlan list
+ */
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, removing VLAN from list only\n");
+ qede_del_vlan_from_list(edev, vlan);
+ goto out;
+ }
+
+ /* Remove vlan */
+ if (vlan->configured) {
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+ vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ goto out;
+ }
+ }
+
+ qede_del_vlan_from_list(edev, vlan);
+
+ /* We have removed a VLAN - try to see if we can
+ * configure non-configured VLAN from the list.
+ */
+ rc = qede_configure_vlan_filters(edev);
+
+out:
+ __qede_unlock(edev);
+ return rc;
+}
+
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return;
+
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (!vlan->configured)
+ continue;
+
+ vlan->configured = false;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans++;
+ edev->configured_vlans--;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "marked vlan %d as non-configured\n", vlan->vid);
+ }
+
+ edev->accept_any_vlan = false;
+}
+
+static void qede_set_features_reload(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ edev->ndev->features = args->u.features;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload) {
+ struct qede_reload_args args;
+
+ args.u.features = features;
+ args.func = &qede_set_features_reload;
+
+ /* Make sure that we definitely need to reload.
+ * In case of an eBPF attached program, there will be no FW
+ * aggregations, so no need to actually reload.
+ */
+ __qede_lock(edev);
+ if (edev->xdp_prog)
+ args.func(edev, &args);
+ else
+ qede_reload(edev, &args, true);
+ __qede_unlock(edev);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(ti->port);
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
+ t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(ti->port);
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (t_port != edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
+ t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (t_port != edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_xdp_reload_func(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ struct bpf_prog *old;
+
+ old = xchg(&edev->xdp_prog, args->u.new_prog);
+ if (old)
+ bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+ struct qede_reload_args args;
+
+ if (prog && prog->xdp_adjust_head) {
+ DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we're called, there was already a bpf reference increment */
+ args.func = &qede_xdp_reload_func;
+ args.u.new_prog = prog;
+ qede_reload(edev, &args, false);
+
+ return 0;
+}
+
+int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return qede_xdp_set(edev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!edev->xdp_prog;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char *mac, int num_macs)
+{
+ struct qed_filter_params filter_cmd;
+ int i;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_MCAST;
+ filter_cmd.filter.mcast.type = opcode;
+ filter_cmd.filter.mcast.num = num_macs;
+
+ for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+ ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int rc;
+
+ ASSERT_RTNL(); /* @@@TBD To be removed */
+
+ DP_INFO(edev, "Set_mac_addr called\n");
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ DP_NOTICE(edev, "The MAC address is not valid\n");
+ return -EFAULT;
+ }
+
+ if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+ DP_NOTICE(edev, "qed prevents setting MAC\n");
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+ if (!netif_running(ndev)) {
+ DP_NOTICE(edev, "The device is currently down\n");
+ return 0;
+ }
+
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ edev->primary_mac);
+ if (rc)
+ return rc;
+
+ edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+
+ /* Add MAC filter according to the new unicast HW MAC address */
+ ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+ return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+ enum qed_filter_rx_mode_type *accept_flags)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ unsigned char *mc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc = 0, mc_count;
+ size_t size;
+
+ size = 64 * ETH_ALEN;
+
+ mc_macs = kzalloc(size, GFP_KERNEL);
+ if (!mc_macs) {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for multicast MACs\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ temp = mc_macs;
+
+ /* Remove all previously configured MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ mc_macs, 1);
+ if (rc)
+ goto exit;
+
+ netif_addr_lock_bh(ndev);
+
+ mc_count = netdev_mc_count(ndev);
+ if (mc_count < 64) {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Check for all multicast @@@TBD resource allocation */
+ if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
+ if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+ *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+ } else {
+ /* Add all multicast MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ mc_macs, mc_count);
+ }
+
+exit:
+ kfree(mc_macs);
+ return rc;
+}
+
+void qede_set_rx_mode(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+/* Must be called with qede_lock held */
+void qede_config_rx_mode(struct net_device *ndev)
+{
+ enum qed_filter_rx_mode_type accept_flags;
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_filter_params rx_mode;
+ unsigned char *uc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc, uc_count;
+ size_t size;
+
+ netif_addr_lock_bh(ndev);
+
+ uc_count = netdev_uc_count(ndev);
+ size = uc_count * ETH_ALEN;
+
+ uc_macs = kzalloc(size, GFP_ATOMIC);
+ if (!uc_macs) {
+ DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+ netif_addr_unlock_bh(ndev);
+ return;
+ }
+
+ temp = uc_macs;
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Configure the struct for the Rx mode */
+ memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+ rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+ /* Remove all previous unicast secondary macs and multicast macs
+ * (configrue / leave the primary mac)
+ */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+ edev->primary_mac);
+ if (rc)
+ goto out;
+
+ /* Check for promiscuous */
+ if (ndev->flags & IFF_PROMISC)
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ else
+ accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
+
+ /* Configure all filters regardless, in case promisc is rejected */
+ if (uc_count < edev->dev_info.num_mac_filters) {
+ int i;
+
+ temp = uc_macs;
+ for (i = 0; i < uc_count; i++) {
+ rc = qede_set_ucast_rx_mac(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ temp);
+ if (rc)
+ goto out;
+
+ temp += ETH_ALEN;
+ }
+ } else {
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ }
+
+ rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+ if (rc)
+ goto out;
+
+ /* take care of VLAN mode */
+ if (ndev->flags & IFF_PROMISC) {
+ qede_config_accept_any_vlan(edev, true);
+ } else if (!edev->non_configured_vlans) {
+ /* It's possible that accept_any_vlan mode is set due to a
+ * previous setting of IFF_PROMISC. If vlan credits are
+ * sufficient, disable accept_any_vlan.
+ */
+ qede_config_accept_any_vlan(edev, false);
+ }
+
+ rx_mode.filter.accept_flags = accept_flags;
+ edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+ kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
new file mode 100644
index 000000000000..1e65038c8fc0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -0,0 +1,1700 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
+#include <net/udp_tunnel.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/ip6_checksum.h>
+#include "qede_ptp.h"
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+/*********************************
+ * Content also used by slowpath *
+ *********************************/
+
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ struct page *data;
+
+ /* In case lazy-allocation is allowed, postpone allocation until the
+ * end of the NAPI run. We'd still need to make sure the Rx ring has
+ * sufficient buffers to guarantee an additional Rx interrupt.
+ */
+ if (allow_lazy && likely(rxq->filled_buffers > 12)) {
+ rxq->filled_buffers--;
+ return 0;
+ }
+
+ data = alloc_pages(GFP_ATOMIC, 0);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ /* Map the entire page as it would be used
+ * for multiple RX buffer segment size mapping.
+ */
+ mapping = dma_map_page(rxq->dev, data, 0,
+ PAGE_SIZE, rxq->data_direction);
+ if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+ __free_page(data);
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->page_offset = 0;
+ sw_rx_data->data = data;
+ sw_rx_data->mapping = mapping;
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+ rxq->sw_rx_prod++;
+ rxq->filled_buffers++;
+
+ return 0;
+}
+
+/* Unmap the data and free skb */
+int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
+{
+ u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_bd *tx_data_bd;
+ int bds_consumed = 0;
+ int nbds;
+ bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
+ int i, split_bd_len = 0;
+
+ if (unlikely(!skb)) {
+ DP_ERR(edev,
+ "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+ idx, txq->sw_tx_cons, txq->sw_tx_prod);
+ return -1;
+ }
+
+ *len = skb->len;
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ bds_consumed++;
+
+ nbds = first_bd->data.nbds;
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ bds_consumed++;
+ }
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ while (bds_consumed++ < nbds)
+ qed_chain_consume(&txq->tx_pbl);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
+
+ return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *first_bd,
+ int nbd, bool data_split)
+{
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+ struct eth_tx_bd *tx_data_bd;
+ int i, split_bd_len = 0;
+
+ /* Return prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ nbd--;
+ }
+
+ dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < nbd; i++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ if (tx_data_bd->nbytes)
+ dma_unmap_page(txq->dev,
+ BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ /* Return again prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
+{
+ u32 rc = XMIT_L4_CSUM;
+ __be16 l3_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return XMIT_PLAIN;
+
+ l3_proto = vlan_get_protocol(skb);
+ if (l3_proto == htons(ETH_P_IPV6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *ipv6_ext = 1;
+
+ if (skb->encapsulation) {
+ rc |= XMIT_ENC;
+ if (skb_is_gso(skb)) {
+ unsigned short gso_type = skb_shinfo(skb)->gso_type;
+
+ if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
+ (gso_type & SKB_GSO_GRE_CSUM))
+ rc |= XMIT_ENC_GSO_L4_CSUM;
+
+ rc |= XMIT_LSO;
+ return rc;
+ }
+ }
+
+ if (skb_is_gso(skb))
+ rc |= XMIT_LSO;
+
+ return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+ struct eth_tx_2nd_bd *second_bd,
+ struct eth_tx_3rd_bd *third_bd)
+{
+ u8 l4_proto;
+ u16 bd2_bits1 = 0, bd2_bits2 = 0;
+
+ bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+ bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+ bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ else
+ l4_proto = ip_hdr(skb)->protocol;
+
+ if (l4_proto == IPPROTO_UDP)
+ bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+ if (third_bd)
+ third_bd->data.bitfields |=
+ cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
+
+ second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
+ second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_tx_queue *txq,
+ skb_frag_t *frag, struct eth_tx_bd *bd)
+{
+ dma_addr_t mapping;
+
+ /* Map skb non-linear frag data for DMA */
+ mapping = skb_frag_dma_map(txq->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(txq->dev, mapping)))
+ return -ENOMEM;
+
+ /* Setup the data pointer of the frag data */
+ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+ return 0;
+}
+
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+ if (is_encap_pkt)
+ return (skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data);
+ else
+ return (skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data);
+}
+
+/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
+{
+ int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
+
+ if (xmit_type & XMIT_LSO) {
+ int hlen;
+
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
+
+ /* linear payload would require its own BD */
+ if (skb_headlen(skb) > hlen)
+ allowed_frags--;
+ }
+
+ return (skb_shinfo(skb)->nr_frags > allowed_frags);
+}
+#endif
+
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
+static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
+ struct sw_rx_data *metadata, u16 padding, u16 length)
+{
+ struct qede_tx_queue *txq = fp->xdp_tx;
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct eth_tx_1st_bd *first_bd;
+
+ if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+ txq->stopped_cnt++;
+ return -ENOMEM;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+ first_bd->data.bitfields |=
+ (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ first_bd->data.nbds = 1;
+
+ /* We can safely ignore the offset, as it's 0 for XDP */
+ BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+
+ /* Synchronize the buffer back to device, as program [probably]
+ * has changed it.
+ */
+ dma_sync_single_for_device(&edev->pdev->dev,
+ metadata->mapping + padding,
+ length, PCI_DMA_TODEVICE);
+
+ txq->sw_tx_ring.pages[idx] = metadata->data;
+ txq->sw_tx_prod++;
+
+ /* Mark the fastpath for future XDP doorbell */
+ fp->xdp_xmit = 1;
+
+ return 0;
+}
+
+int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+ u16 hw_bd_cons;
+
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+ return 0;
+
+ return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct eth_tx_1st_bd *bd;
+ u16 hw_bd_cons;
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
+ NUM_TX_BDS_MAX]);
+
+ txq->sw_tx_cons++;
+ txq->xmit_pkts++;
+ }
+}
+
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+ u16 hw_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ int rc;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ int len = 0;
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+ hw_bd_cons,
+ qed_chain_get_cons_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ txq->xmit_pkts++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+ /* Taking tx_lock is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in qede_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(netdev_txq)) &&
+ (edev->state == QEDE_STATE_OPEN) &&
+ (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+ "Wake queue was called\n");
+ }
+
+ __netif_tx_unlock(netdev_txq);
+ }
+
+ return 0;
+}
+
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ return hw_comp_cons != sw_comp_cons;
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
+ */
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
+{
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *curr_prod;
+ dma_addr_t new_mapping;
+
+ curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ *curr_prod = *curr_cons;
+
+ new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
+
+ rxq->sw_rx_prod++;
+ curr_cons->data = NULL;
+}
+
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
+{
+ struct sw_rx_data *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ qede_reuse_page(rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
+{
+ /* Move to the next segment in the page */
+ curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+ if (curr_cons->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ /* Since we failed to allocate new buffer
+ * current buffer can be used again.
+ */
+ curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
+ return -ENOMEM;
+ }
+
+ dma_unmap_page(rxq->dev, curr_cons->mapping,
+ PAGE_SIZE, rxq->data_direction);
+ } else {
+ /* Increment refcount of the page as we don't want
+ * network stack to take the ownership of the page
+ * which can be recycled multiple times by the driver.
+ */
+ page_ref_inc(curr_cons->data);
+ qede_reuse_page(rxq, curr_cons);
+ }
+
+ return 0;
+}
+
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+ u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+ u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = {0};
+
+ /* Update producers */
+ rx_prods.bd_prod = cpu_to_le16(bd_prod);
+ rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (u32 *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
+{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+ enum rss_hash_type htype;
+ u32 hash = 0;
+
+ htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+ if (htype) {
+ hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ hash = le32_to_cpu(rss_hash);
+ }
+ skb_set_hash(skb, hash, hash_type);
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+ skb_checksum_none_assert(skb);
+
+ if (csum_flag & QEDE_CSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
+ skb->csum_level = 1;
+ skb->encapsulation = 1;
+ }
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ if (vlan_tag)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&fp->napi, skb);
+ rxq->rcv_pkts++;
+}
+
+static void qede_set_gro_params(struct qede_dev *edev,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+ if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+ PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+ cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ u8 tpa_agg_index, u16 len_on_bd)
+{
+ struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+ NUM_RX_BDS_MAX];
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+ struct sk_buff *skb = tpa_info->skb;
+
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+ goto out;
+
+ /* Add one frag and update the appropriate fields in the skb */
+ skb_fill_page_desc(skb, tpa_info->frag_id++,
+ current_bd->data, current_bd->page_offset,
+ len_on_bd);
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
+ /* Incr page ref count to reuse on allocation failure
+ * so that it doesn't get freed while freeing SKB.
+ */
+ page_ref_inc(current_bd->data);
+ goto out;
+ }
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+
+ skb->data_len += len_on_bd;
+ skb->truesize += rxq->rx_buf_seg_size;
+ skb->len += len_on_bd;
+
+ return 0;
+
+out:
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, 1);
+
+ return -ENOMEM;
+}
+
+static bool qede_tunn_exist(u16 flag)
+{
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 tcsum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *replace_buf = &tpa_info->buffer;
+ dma_addr_t mapping = tpa_info->buffer_mapping;
+ struct sw_rx_data *sw_rx_data_cons;
+ struct sw_rx_data *sw_rx_data_prod;
+
+ sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ /* Use pre-allocated replacement buffer - we can't release the agg.
+ * start until its over and we don't want to risk allocation failing
+ * here, so re-allocate when aggregation will be over.
+ */
+ sw_rx_data_prod->mapping = replace_buf->mapping;
+
+ sw_rx_data_prod->data = replace_buf->data;
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+ sw_rx_data_prod->page_offset = replace_buf->page_offset;
+
+ rxq->sw_rx_prod++;
+
+ /* move partial skb from cons to pool (don't unmap yet)
+ * save mapping, incase we drop the packet later on.
+ */
+ tpa_info->buffer = *sw_rx_data_cons;
+ mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+ le32_to_cpu(rx_bd_cons->addr.lo));
+
+ tpa_info->buffer_mapping = mapping;
+ rxq->sw_rx_cons++;
+
+ /* set tpa state to start only if we are able to allocate skb
+ * for this aggregation, otherwise mark as error and aggregation will
+ * be dropped
+ */
+ tpa_info->skb = netdev_alloc_skb(edev->ndev,
+ le16_to_cpu(cqe->len_on_first_bd));
+ if (unlikely(!tpa_info->skb)) {
+ DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ goto cons_buf;
+ }
+
+ /* Start filling in the aggregation info */
+ skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+ tpa_info->frag_id = 0;
+ tpa_info->state = QEDE_AGG_STATE_START;
+
+ /* Store some information from first CQE */
+ tpa_info->start_cqe_placement_offset = cqe->placement_offset;
+ tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
+ if ((le16_to_cpu(cqe->pars_flags.flags) >>
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ else
+ tpa_info->vlan_tag = 0;
+
+ qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
+ /* This is needed in order to enable forwarding support */
+ qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
+ if (likely(cqe->ext_bd_len_list[0]))
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->ext_bd_len_list[0]));
+
+ if (unlikely(cqe->ext_bd_len_list[1])) {
+ DP_ERR(edev,
+ "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ }
+}
+
+#ifdef CONFIG_INET
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+
+ tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+ tcp_gro_complete(skb);
+}
+#endif
+
+static void qede_gro_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ /* FW can send a single MTU sized packet from gro flow
+ * due to aggregation timeout/last segment etc. which
+ * is not expected to be a gro packet. If a skb has zero
+ * frags then simply push it in the stack as non gso skb.
+ */
+ if (unlikely(!skb->data_len)) {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ goto send_skb;
+ }
+
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ skb_reset_network_header(skb);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ qede_gro_ip_csum(skb);
+ break;
+ case htons(ETH_P_IPV6):
+ qede_gro_ipv6_csum(skb);
+ break;
+ default:
+ DP_ERR(edev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ ntohs(skb->protocol));
+ }
+ }
+#endif
+
+send_skb:
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
+ qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ int i;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static void qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_agg_info *tpa_info;
+ struct sk_buff *skb;
+ int i;
+
+ tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ skb = tpa_info->skb;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA emd with more than a single len_list entry\n");
+
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+ goto err;
+
+ /* Sanity */
+ if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+ DP_ERR(edev,
+ "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+ cqe->num_of_bds, tpa_info->frag_id);
+ if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+ DP_ERR(edev,
+ "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+ le16_to_cpu(cqe->total_packet_len), skb->len);
+
+ memcpy(skb->data,
+ page_address(tpa_info->buffer.data) +
+ tpa_info->start_cqe_placement_offset +
+ tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
+
+ /* Finalize the SKB */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+ qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+ tpa_info->state = QEDE_AGG_STATE_NONE;
+
+ return;
+err:
+ tpa_info->state = QEDE_AGG_STATE_NONE;
+ dev_kfree_skb_any(tpa_info->skb);
+ tpa_info->skb = NULL;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 csum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ csum = QEDE_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return csum;
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+ if (!qede_tunn_exist(flag))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 flag)
+{
+ u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+ if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+ (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+ return true;
+
+ return false;
+}
+
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ struct bpf_prog *prog,
+ struct sw_rx_data *bd,
+ struct eth_fast_path_rx_reg_cqe *cqe)
+{
+ u16 len = le16_to_cpu(cqe->len_on_first_bd);
+ struct xdp_buff xdp;
+ enum xdp_action act;
+
+ xdp.data = page_address(bd->data) + cqe->placement_offset;
+ xdp.data_end = xdp.data + len;
+
+ /* Queues always have a full reset currently, so for the time
+ * being until there's atomic program replace just mark read
+ * side for map helpers.
+ */
+ rcu_read_lock();
+ act = bpf_prog_run_xdp(prog, &xdp);
+ rcu_read_unlock();
+
+ if (act == XDP_PASS)
+ return true;
+
+ /* Count number of packets not to be passed to stack */
+ rxq->xdp_no_pass++;
+
+ switch (act) {
+ case XDP_TX:
+ /* We need the replacement buffer before transmit. */
+ if (qede_alloc_rx_buffer(rxq, true)) {
+ qede_recycle_rx_bd_ring(rxq, 1);
+ trace_xdp_exception(edev->ndev, prog, act);
+ return false;
+ }
+
+ /* Now if there's a transmission problem, we'd still have to
+ * throw current buffer, as replacement was already allocated.
+ */
+ if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+ dma_unmap_page(rxq->dev, bd->mapping,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(bd->data);
+ trace_xdp_exception(edev->ndev, prog, act);
+ }
+
+ /* Regardless, we've consumed an Rx BD */
+ qede_rx_bd_ring_consume(rxq);
+ return false;
+
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(edev->ndev, prog, act);
+ case XDP_DROP:
+ qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+ }
+
+ return false;
+}
+
+static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len,
+ u16 pad)
+{
+ unsigned int offset = bd->page_offset;
+ struct skb_frag_struct *frag;
+ struct page *page = bd->data;
+ unsigned int pull_len;
+ struct sk_buff *skb;
+ unsigned char *va;
+
+ /* Allocate a new SKB with a sufficient large header len */
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Copy data into SKB - if it's small, we can simply copy it and
+ * re-use the already allcoated & mapped memory.
+ */
+ if (len + pad <= edev->rx_copybreak) {
+ memcpy(skb_put(skb, len),
+ page_address(page) + pad + offset, len);
+ qede_reuse_page(rxq, bd);
+ goto out;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, pad + offset, len, rxq->rx_buf_seg_size);
+
+ va = skb_frag_address(frag);
+ pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+ /* Align the pull_len to optimize memcpy */
+ memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+ /* Correct the skb & frag sizes offset after the pull */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+ /* Incr page ref count to reuse on allocation failure so
+ * that it doesn't get freed while freeing SKB [as its
+ * already mapped there].
+ */
+ page_ref_inc(page);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+out:
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+ return skb;
+}
+
+static int qede_rx_build_jumbo(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 first_bd_len)
+{
+ u16 pkt_len = le16_to_cpu(cqe->pkt_len);
+ struct sw_rx_data *bd;
+ u16 bd_cons_idx;
+ u8 num_frags;
+
+ pkt_len -= first_bd_len;
+
+ /* We've already used one BD for the SKB. Now take care of the rest */
+ for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
+ u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ goto out;
+ }
+
+ /* We need a replacement buffer for each BD */
+ if (unlikely(qede_alloc_rx_buffer(rxq, true)))
+ goto out;
+
+ /* Now that we've allocated the replacement buffer,
+ * we can safely consume the next BD and map it to the SKB.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+ qede_rx_bd_ring_consume(rxq);
+
+ dma_unmap_page(rxq->dev, bd->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ bd->data, 0, cur_size);
+
+ skb->truesize += PAGE_SIZE;
+ skb->data_len += cur_size;
+ skb->len += cur_size;
+ pkt_len -= cur_size;
+ }
+
+ if (unlikely(pkt_len))
+ DP_ERR(edev,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+
+out:
+ return num_frags;
+}
+
+static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ union eth_rx_cqe *cqe,
+ enum eth_rx_cqe_type type)
+{
+ switch (type) {
+ case ETH_RX_CQE_TYPE_TPA_START:
+ qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int qede_rx_process_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ u16 len, pad, bd_cons_idx, parse_flag;
+ enum eth_rx_cqe_type cqe_type;
+ union eth_rx_cqe *cqe;
+ struct sw_rx_data *bd;
+ struct sk_buff *skb;
+ __le16 flags;
+ u8 csum_flag;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ /* Process an unlikely slowpath event */
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ struct eth_slow_path_rx_cqe *sp_cqe;
+
+ sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
+ edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
+ return 0;
+ }
+
+ /* Handle TPA cqes */
+ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
+ return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
+
+ /* Get the data from the SW ring; Consume it only after it's evident
+ * we wouldn't recycle it.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ pad = fp_cqe->placement_offset;
+
+ /* Run eBPF program if one is attached */
+ if (xdp_prog)
+ if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
+ return 1;
+
+ /* If this is an error packet then drop it */
+ flags = cqe->fast_path_regular.pars_flags.flags;
+ parse_flag = le16_to_cpu(flags);
+
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
+ rxq->rx_ip_frags++;
+ } else {
+ DP_NOTICE(edev,
+ "CQE has error, flags = %x, dropping incoming packet\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+ return 0;
+ }
+ }
+
+ /* Basic validation passed; Need to prepare an SKB. This would also
+ * guarantee to finally consume the first BD upon success.
+ */
+ skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+ if (!skb) {
+ rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+ return 0;
+ }
+
+ /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
+ * by a single cqe.
+ */
+ if (fp_cqe->bd_num > 1) {
+ u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
+ fp_cqe, len);
+
+ if (unlikely(unmapped_frags > 0)) {
+ qede_recycle_rx_bd_ring(rxq, unmapped_frags);
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ }
+
+ /* The SKB contains all the data. Now prepare meta-magic */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
+ qede_set_skb_csum(skb, csum_flag);
+ skb_record_rx_queue(skb, rxq->rxq_id);
+ qede_ptp_record_rx_ts(edev, cqe, skb);
+
+ /* SKB is prepared - pass it to stack */
+ qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+ return 1;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_dev *edev = fp->edev;
+ u16 hw_comp_cons, sw_comp_cons;
+ int work_done = 0;
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Loop to complete all indicated BDs */
+ while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
+ qede_rx_process_cqe(edev, fp, rxq);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ work_done++;
+ }
+
+ /* Allocate replacement buffers */
+ while (rxq->num_rx_buffers - rxq->filled_buffers)
+ if (qede_alloc_rx_buffer(rxq, false))
+ break;
+
+ /* Update producers */
+ qede_update_rx_prod(edev, rxq);
+
+ return work_done;
+}
+
+static bool qede_poll_is_more_work(struct qede_fastpath *fp)
+{
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* *_has_*_work() reads the status block, thus we need to ensure that
+ * status block indices have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that we won't write the
+ * "newer" value of the status block to HW (if there was a DMA right
+ * after qede_has_rx_work and if there is no rmb, the memory reading
+ * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
+ * In this case there will never be another interrupt until there is
+ * another update of the status block, while there is still unhandled
+ * work.
+ */
+ rmb();
+
+ if (likely(fp->type & QEDE_FASTPATH_RX))
+ if (qede_has_rx_work(fp->rxq))
+ return true;
+
+ if (fp->type & QEDE_FASTPATH_XDP)
+ if (qede_txq_has_work(fp->xdp_tx))
+ return true;
+
+ if (likely(fp->type & QEDE_FASTPATH_TX))
+ if (qede_txq_has_work(fp->txq))
+ return true;
+
+ return false;
+}
+
+/*********************
+ * NDO & API related *
+ *********************/
+int qede_poll(struct napi_struct *napi, int budget)
+{
+ struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+ napi);
+ struct qede_dev *edev = fp->edev;
+ int rx_work_done = 0;
+
+ if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
+ qede_tx_int(edev, fp->txq);
+
+ if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+ qede_xdp_tx_int(edev, fp->xdp_tx);
+
+ rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ?
+ qede_rx_int(fp, budget) : 0;
+ if (rx_work_done < budget) {
+ if (!qede_poll_is_more_work(fp)) {
+ napi_complete_done(napi, rx_work_done);
+
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+ } else {
+ rx_work_done = budget;
+ }
+ }
+
+ if (fp->xdp_xmit) {
+ u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+ fp->xdp_xmit = 0;
+ fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+ qede_update_tx_producer(fp->xdp_tx);
+ }
+
+ return rx_work_done;
+}
+
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct qede_fastpath *fp = fp_cookie;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+ napi_schedule_irqoff(&fp->napi);
+ return IRQ_HANDLED;
+}
+
+/* Main transmit function */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct netdev_queue *netdev_txq;
+ struct qede_tx_queue *txq;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_2nd_bd *second_bd = NULL;
+ struct eth_tx_3rd_bd *third_bd = NULL;
+ struct eth_tx_bd *tx_data_bd = NULL;
+ u16 txq_index;
+ u8 nbd = 0;
+ dma_addr_t mapping;
+ int rc, frag_idx = 0, ipv6_ext = 0;
+ u8 xmit_type;
+ u16 idx;
+ u16 hlen;
+ bool data_split = false;
+
+ /* Get tx-queue context and netdev index */
+ txq_index = skb_get_queue_mapping(skb);
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
+ txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
+ netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
+
+ xmit_type = qede_xmit_type(skb, &ipv6_ext);
+
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+ if (qede_pkt_req_lin(skb, xmit_type)) {
+ if (skb_linearize(skb)) {
+ DP_NOTICE(edev,
+ "SKB linearization failed - silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring.skbs[idx].skb = skb;
+ first_bd = (struct eth_tx_1st_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ qede_ptp_tx_ts(edev, skb);
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(txq->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(txq->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+ nbd++;
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* In case there is IPv6 with extension headers or LSO we need 2nd and
+ * 3rd BDs.
+ */
+ if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+ second_bd = (struct eth_tx_2nd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(second_bd, 0, sizeof(*second_bd));
+
+ nbd++;
+ third_bd = (struct eth_tx_3rd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(third_bd, 0, sizeof(*third_bd));
+
+ nbd++;
+ /* We need to fill in additional data in second_bd... */
+ tx_data_bd = (struct eth_tx_bd *)second_bd;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Fill the parsing flags & params according to the requested offload */
+ if (xmit_type & XMIT_L4_CSUM) {
+ /* We don't re-calculate IP checksum as it is already done by
+ * the upper stack
+ */
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+ if (xmit_type & XMIT_ENC) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ first_bd->data.bitfields |=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ }
+
+ /* Legacy FW had flipped behavior in regard to this bit -
+ * I.e., needed to set to prevent FW from touching encapsulated
+ * packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ first_bd->data.bitfields ^=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ /* If the packet is IPv6 with extension header, indicate that
+ * to FW and pass few params, since the device cracker doesn't
+ * support parsing IPv6 with extension header/s.
+ */
+ if (unlikely(ipv6_ext))
+ qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+ }
+
+ if (xmit_type & XMIT_LSO) {
+ first_bd->data.bd_flags.bitfields |=
+ (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+ third_bd->data.lso_mss =
+ cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+ if (unlikely(xmit_type & XMIT_ENC)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+
+ if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
+ u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+
+ first_bd->data.bd_flags.bitfields |= 1 << tmp;
+ }
+ hlen = qede_get_skb_hlen(skb, true);
+ } else {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, false);
+ }
+
+ /* @@@TBD - if will not be removed need to check */
+ third_bd->data.bitfields |=
+ cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+ /* Make life easier for FW guys who can't deal with header and
+ * data on same BD. If we need to split, use the second bd...
+ */
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "TSO split header size is %d (%x:%x)\n",
+ first_bd->nbytes, first_bd->addr.hi,
+ first_bd->addr.lo);
+
+ mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+ le32_to_cpu(first_bd->addr.lo)) +
+ hlen;
+
+ BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+ le16_to_cpu(first_bd->nbytes) -
+ hlen);
+
+ /* this marks the BD as one that has no
+ * individual mapping
+ */
+ txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+ first_bd->nbytes = cpu_to_le16(hlen);
+
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ data_split = true;
+ }
+ } else {
+ first_bd->data.bitfields |=
+ (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ }
+
+ /* Handle fragmented skb */
+ /* special handle for frags inside 2nd and 3rd bds.. */
+ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+ rc = map_frag_to_bd(txq,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+
+ if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ else
+ tx_data_bd = NULL;
+
+ frag_idx++;
+ }
+
+ /* map last frags into 4th, 5th .... */
+ for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+
+ memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+ rc = map_frag_to_bd(txq,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = nbd;
+
+ netdev_tx_sent_queue(netdev_txq, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ /* Advance packet producer only before sending the packet since mapping
+ * of pages may fail.
+ */
+ txq->sw_tx_prod++;
+
+ /* 'next page' entries are counted in the producer value */
+ txq->tx_db.data.bd_prod =
+ cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+ qede_update_tx_producer(txq);
+
+ if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+ < (MAX_SKB_FRAGS + 1))) {
+ if (skb->xmit_more)
+ qede_update_tx_producer(txq);
+
+ netif_tx_stop_queue(netdev_txq);
+ txq->stopped_cnt++;
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Stop queue was called\n");
+ /* paired memory barrier is in qede_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons
+ */
+ smp_mb();
+
+ if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
+ (MAX_SKB_FRAGS + 1)) &&
+ (edev->state == QEDE_STATE_OPEN)) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Wake queue was called\n");
+ }
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/* 8B udp header + 8B base tunnel header + 32B option length */
+#define QEDE_MAX_TUN_HDR_LEN 48
+
+netdev_features_t qede_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb->encapsulation) {
+ u8 l4_proto = 0;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ /* Disable offloads for geneve tunnels, as HW can't parse
+ * the geneve header which has option length greater than 32B.
+ */
+ if ((l4_proto == IPPROTO_UDP) &&
+ ((skb_inner_mac_header(skb) -
+ skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
+ return features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_GSO_MASK);
+ }
+
+ return features;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index aecdd1c5c0ea..3a78c3f25157 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1,11 +1,34 @@
/* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
@@ -36,8 +59,10 @@
#include <linux/random.h>
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
+#include <linux/vmalloc.h>
#include <linux/qed/qede_roce.h>
#include "qede.h"
+#include "qede_ptp.h"
static char version[] =
"QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
@@ -154,8 +179,12 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
{
struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
struct qed_dev_info *qed_info = &edev->dev_info.common;
+ struct qed_update_vport_params *vport_params;
int rc;
+ vport_params = vzalloc(sizeof(*vport_params));
+ if (!vport_params)
+ return -ENOMEM;
DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
@@ -163,15 +192,13 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
/* Enable/Disable Tx switching for PF */
if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
- struct qed_update_vport_params params;
-
- memset(&params, 0, sizeof(params));
- params.vport_id = 0;
- params.update_tx_switching_flg = 1;
- params.tx_switching_flg = num_vfs_param ? 1 : 0;
- edev->ops->vport_update(edev->cdev, &params);
+ vport_params->vport_id = 0;
+ vport_params->update_tx_switching_flg = 1;
+ vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
+ edev->ops->vport_update(edev->cdev, vport_params);
}
+ vfree(vport_params);
return rc;
}
#endif
@@ -187,18 +214,6 @@ static struct pci_driver qede_pci_driver = {
#endif
};
-static void qede_force_mac(void *dev, u8 *mac, bool forced)
-{
- struct qede_dev *edev = dev;
-
- /* MAC hints take effect only if we haven't set one already */
- if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
- return;
-
- ether_addr_copy(edev->ndev->dev_addr, mac);
- ether_addr_copy(edev->primary_mac, mac);
-}
-
static struct qed_eth_cb_ops qede_ll_ops = {
{
.link_update = qede_link_update,
@@ -294,1643 +309,8 @@ static void __exit qede_cleanup(void)
module_init(qede_init);
module_exit(qede_cleanup);
-/* -------------------------------------------------------------------------
- * START OF FAST-PATH
- * -------------------------------------------------------------------------
- */
-
-/* Unmap the data and free skb */
-static int qede_free_tx_pkt(struct qede_dev *edev,
- struct qede_tx_queue *txq, int *len)
-{
- u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
- struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
- struct eth_tx_1st_bd *first_bd;
- struct eth_tx_bd *tx_data_bd;
- int bds_consumed = 0;
- int nbds;
- bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
- int i, split_bd_len = 0;
-
- if (unlikely(!skb)) {
- DP_ERR(edev,
- "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
- idx, txq->sw_tx_cons, txq->sw_tx_prod);
- return -1;
- }
-
- *len = skb->len;
-
- first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
-
- bds_consumed++;
-
- nbds = first_bd->data.nbds;
-
- if (data_split) {
- struct eth_tx_bd *split = (struct eth_tx_bd *)
- qed_chain_consume(&txq->tx_pbl);
- split_bd_len = BD_UNMAP_LEN(split);
- bds_consumed++;
- }
- dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
-
- /* Unmap the data of the skb frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
- tx_data_bd = (struct eth_tx_bd *)
- qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
- BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
- }
-
- while (bds_consumed++ < nbds)
- qed_chain_consume(&txq->tx_pbl);
-
- /* Free skb */
- dev_kfree_skb_any(skb);
- txq->sw_tx_ring.skbs[idx].skb = NULL;
- txq->sw_tx_ring.skbs[idx].flags = 0;
-
- return 0;
-}
-
-/* Unmap the data and free skb when mapping failed during start_xmit */
-static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
- struct eth_tx_1st_bd *first_bd,
- int nbd, bool data_split)
-{
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
- struct eth_tx_bd *tx_data_bd;
- int i, split_bd_len = 0;
-
- /* Return prod to its position before this skb was handled */
- qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
-
- first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
-
- if (data_split) {
- struct eth_tx_bd *split = (struct eth_tx_bd *)
- qed_chain_produce(&txq->tx_pbl);
- split_bd_len = BD_UNMAP_LEN(split);
- nbd--;
- }
-
- dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
-
- /* Unmap the data of the skb frags */
- for (i = 0; i < nbd; i++) {
- tx_data_bd = (struct eth_tx_bd *)
- qed_chain_produce(&txq->tx_pbl);
- if (tx_data_bd->nbytes)
- dma_unmap_page(txq->dev,
- BD_UNMAP_ADDR(tx_data_bd),
- BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
- }
-
- /* Return again prod to its position before this skb was handled */
- qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
-
- /* Free skb */
- dev_kfree_skb_any(skb);
- txq->sw_tx_ring.skbs[idx].skb = NULL;
- txq->sw_tx_ring.skbs[idx].flags = 0;
-}
-
-static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
-{
- u32 rc = XMIT_L4_CSUM;
- __be16 l3_proto;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return XMIT_PLAIN;
-
- l3_proto = vlan_get_protocol(skb);
- if (l3_proto == htons(ETH_P_IPV6) &&
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- *ipv6_ext = 1;
-
- if (skb->encapsulation) {
- rc |= XMIT_ENC;
- if (skb_is_gso(skb)) {
- unsigned short gso_type = skb_shinfo(skb)->gso_type;
-
- if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
- (gso_type & SKB_GSO_GRE_CSUM))
- rc |= XMIT_ENC_GSO_L4_CSUM;
-
- rc |= XMIT_LSO;
- return rc;
- }
- }
-
- if (skb_is_gso(skb))
- rc |= XMIT_LSO;
-
- return rc;
-}
-
-static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
- struct eth_tx_2nd_bd *second_bd,
- struct eth_tx_3rd_bd *third_bd)
-{
- u8 l4_proto;
- u16 bd2_bits1 = 0, bd2_bits2 = 0;
-
- bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
-
- bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
- ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
- << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
-
- bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
- ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
-
- if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
- l4_proto = ipv6_hdr(skb)->nexthdr;
- else
- l4_proto = ip_hdr(skb)->protocol;
-
- if (l4_proto == IPPROTO_UDP)
- bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
-
- if (third_bd)
- third_bd->data.bitfields |=
- cpu_to_le16(((tcp_hdrlen(skb) / 4) &
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
-
- second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
- second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
-}
-
-static int map_frag_to_bd(struct qede_tx_queue *txq,
- skb_frag_t *frag, struct eth_tx_bd *bd)
-{
- dma_addr_t mapping;
-
- /* Map skb non-linear frag data for DMA */
- mapping = skb_frag_dma_map(txq->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(txq->dev, mapping)))
- return -ENOMEM;
-
- /* Setup the data pointer of the frag data */
- BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
-
- return 0;
-}
-
-static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
-{
- if (is_encap_pkt)
- return (skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data);
- else
- return (skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data);
-}
-
-/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
-#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
-{
- int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
-
- if (xmit_type & XMIT_LSO) {
- int hlen;
-
- hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
-
- /* linear payload would require its own BD */
- if (skb_headlen(skb) > hlen)
- allowed_frags--;
- }
-
- return (skb_shinfo(skb)->nr_frags > allowed_frags);
-}
-#endif
-
-static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
-{
- /* wmb makes sure that the BDs data is updated before updating the
- * producer, otherwise FW may read old data from the BDs.
- */
- wmb();
- barrier();
- writel(txq->tx_db.raw, txq->doorbell_addr);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
-}
-
-static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
- struct sw_rx_data *metadata, u16 padding, u16 length)
-{
- struct qede_tx_queue *txq = fp->xdp_tx;
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- struct eth_tx_1st_bd *first_bd;
-
- if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
- txq->stopped_cnt++;
- return -ENOMEM;
- }
-
- first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
-
- memset(first_bd, 0, sizeof(*first_bd));
- first_bd->data.bd_flags.bitfields =
- BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
- first_bd->data.bitfields |=
- (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
- first_bd->data.nbds = 1;
-
- /* We can safely ignore the offset, as it's 0 for XDP */
- BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
-
- /* Synchronize the buffer back to device, as program [probably]
- * has changed it.
- */
- dma_sync_single_for_device(&edev->pdev->dev,
- metadata->mapping + padding,
- length, PCI_DMA_TODEVICE);
-
- txq->sw_tx_ring.pages[idx] = metadata->data;
- txq->sw_tx_prod++;
-
- /* Mark the fastpath for future XDP doorbell */
- fp->xdp_xmit = 1;
-
- return 0;
-}
-
-/* Main transmit function */
-static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
-{
- struct qede_dev *edev = netdev_priv(ndev);
- struct netdev_queue *netdev_txq;
- struct qede_tx_queue *txq;
- struct eth_tx_1st_bd *first_bd;
- struct eth_tx_2nd_bd *second_bd = NULL;
- struct eth_tx_3rd_bd *third_bd = NULL;
- struct eth_tx_bd *tx_data_bd = NULL;
- u16 txq_index;
- u8 nbd = 0;
- dma_addr_t mapping;
- int rc, frag_idx = 0, ipv6_ext = 0;
- u8 xmit_type;
- u16 idx;
- u16 hlen;
- bool data_split = false;
-
- /* Get tx-queue context and netdev index */
- txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
- netdev_txq = netdev_get_tx_queue(ndev, txq_index);
-
- WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
-
- xmit_type = qede_xmit_type(skb, &ipv6_ext);
-
-#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
- if (qede_pkt_req_lin(skb, xmit_type)) {
- if (skb_linearize(skb)) {
- DP_NOTICE(edev,
- "SKB linearization failed - silently dropping this SKB\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-#endif
-
- /* Fill the entry in the SW ring and the BDs in the FW ring */
- idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- txq->sw_tx_ring.skbs[idx].skb = skb;
- first_bd = (struct eth_tx_1st_bd *)
- qed_chain_produce(&txq->tx_pbl);
- memset(first_bd, 0, sizeof(*first_bd));
- first_bd->data.bd_flags.bitfields =
- 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
-
- /* Map skb linear data for DMA and set in the first BD */
- mapping = dma_map_single(txq->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(txq->dev, mapping))) {
- DP_NOTICE(edev, "SKB mapping failed\n");
- qede_free_failed_tx_pkt(txq, first_bd, 0, false);
- qede_update_tx_producer(txq);
- return NETDEV_TX_OK;
- }
- nbd++;
- BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
-
- /* In case there is IPv6 with extension headers or LSO we need 2nd and
- * 3rd BDs.
- */
- if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
- second_bd = (struct eth_tx_2nd_bd *)
- qed_chain_produce(&txq->tx_pbl);
- memset(second_bd, 0, sizeof(*second_bd));
-
- nbd++;
- third_bd = (struct eth_tx_3rd_bd *)
- qed_chain_produce(&txq->tx_pbl);
- memset(third_bd, 0, sizeof(*third_bd));
-
- nbd++;
- /* We need to fill in additional data in second_bd... */
- tx_data_bd = (struct eth_tx_bd *)second_bd;
- }
-
- if (skb_vlan_tag_present(skb)) {
- first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
- }
-
- /* Fill the parsing flags & params according to the requested offload */
- if (xmit_type & XMIT_L4_CSUM) {
- /* We don't re-calculate IP checksum as it is already done by
- * the upper stack
- */
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
-
- if (xmit_type & XMIT_ENC) {
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- first_bd->data.bitfields |=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
- }
-
- /* Legacy FW had flipped behavior in regard to this bit -
- * I.e., needed to set to prevent FW from touching encapsulated
- * packets when it didn't need to.
- */
- if (unlikely(txq->is_legacy))
- first_bd->data.bitfields ^=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
- /* If the packet is IPv6 with extension header, indicate that
- * to FW and pass few params, since the device cracker doesn't
- * support parsing IPv6 with extension header/s.
- */
- if (unlikely(ipv6_ext))
- qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
- }
-
- if (xmit_type & XMIT_LSO) {
- first_bd->data.bd_flags.bitfields |=
- (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
- third_bd->data.lso_mss =
- cpu_to_le16(skb_shinfo(skb)->gso_size);
-
- if (unlikely(xmit_type & XMIT_ENC)) {
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
-
- if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
- u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
-
- first_bd->data.bd_flags.bitfields |= 1 << tmp;
- }
- hlen = qede_get_skb_hlen(skb, true);
- } else {
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- hlen = qede_get_skb_hlen(skb, false);
- }
-
- /* @@@TBD - if will not be removed need to check */
- third_bd->data.bitfields |=
- cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
-
- /* Make life easier for FW guys who can't deal with header and
- * data on same BD. If we need to split, use the second bd...
- */
- if (unlikely(skb_headlen(skb) > hlen)) {
- DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
- "TSO split header size is %d (%x:%x)\n",
- first_bd->nbytes, first_bd->addr.hi,
- first_bd->addr.lo);
-
- mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
- le32_to_cpu(first_bd->addr.lo)) +
- hlen;
-
- BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
- le16_to_cpu(first_bd->nbytes) -
- hlen);
-
- /* this marks the BD as one that has no
- * individual mapping
- */
- txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
-
- first_bd->nbytes = cpu_to_le16(hlen);
-
- tx_data_bd = (struct eth_tx_bd *)third_bd;
- data_split = true;
- }
- } else {
- first_bd->data.bitfields |=
- (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
- }
-
- /* Handle fragmented skb */
- /* special handle for frags inside 2nd and 3rd bds.. */
- while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
- rc = map_frag_to_bd(txq,
- &skb_shinfo(skb)->frags[frag_idx],
- tx_data_bd);
- if (rc) {
- qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
- qede_update_tx_producer(txq);
- return NETDEV_TX_OK;
- }
-
- if (tx_data_bd == (struct eth_tx_bd *)second_bd)
- tx_data_bd = (struct eth_tx_bd *)third_bd;
- else
- tx_data_bd = NULL;
-
- frag_idx++;
- }
-
- /* map last frags into 4th, 5th .... */
- for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
- tx_data_bd = (struct eth_tx_bd *)
- qed_chain_produce(&txq->tx_pbl);
-
- memset(tx_data_bd, 0, sizeof(*tx_data_bd));
-
- rc = map_frag_to_bd(txq,
- &skb_shinfo(skb)->frags[frag_idx],
- tx_data_bd);
- if (rc) {
- qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
- qede_update_tx_producer(txq);
- return NETDEV_TX_OK;
- }
- }
-
- /* update the first BD with the actual num BDs */
- first_bd->data.nbds = nbd;
-
- netdev_tx_sent_queue(netdev_txq, skb->len);
-
- skb_tx_timestamp(skb);
-
- /* Advance packet producer only before sending the packet since mapping
- * of pages may fail.
- */
- txq->sw_tx_prod++;
-
- /* 'next page' entries are counted in the producer value */
- txq->tx_db.data.bd_prod =
- cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
-
- if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
- qede_update_tx_producer(txq);
-
- if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
- < (MAX_SKB_FRAGS + 1))) {
- if (skb->xmit_more)
- qede_update_tx_producer(txq);
-
- netif_tx_stop_queue(netdev_txq);
- txq->stopped_cnt++;
- DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
- "Stop queue was called\n");
- /* paired memory barrier is in qede_tx_int(), we have to keep
- * ordering of set_bit() in netif_tx_stop_queue() and read of
- * fp->bd_tx_cons
- */
- smp_mb();
-
- if (qed_chain_get_elem_left(&txq->tx_pbl)
- >= (MAX_SKB_FRAGS + 1) &&
- (edev->state == QEDE_STATE_OPEN)) {
- netif_tx_wake_queue(netdev_txq);
- DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
- "Wake queue was called\n");
- }
- }
-
- return NETDEV_TX_OK;
-}
-
-int qede_txq_has_work(struct qede_tx_queue *txq)
-{
- u16 hw_bd_cons;
-
- /* Tell compiler that consumer and producer can change */
- barrier();
- hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
- if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
- return 0;
-
- return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
-}
-
-static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
-{
- struct eth_tx_1st_bd *bd;
- u16 hw_bd_cons;
-
- hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
- barrier();
-
- while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
- bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
-
- dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
- NUM_TX_BDS_MAX]);
-
- txq->sw_tx_cons++;
- txq->xmit_pkts++;
- }
-}
-
-static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
-{
- struct netdev_queue *netdev_txq;
- u16 hw_bd_cons;
- unsigned int pkts_compl = 0, bytes_compl = 0;
- int rc;
-
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
-
- hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
- barrier();
-
- while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
- int len = 0;
-
- rc = qede_free_tx_pkt(edev, txq, &len);
- if (rc) {
- DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
- hw_bd_cons,
- qed_chain_get_cons_idx(&txq->tx_pbl));
- break;
- }
-
- bytes_compl += len;
- pkts_compl++;
- txq->sw_tx_cons++;
- txq->xmit_pkts++;
- }
-
- netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
-
- /* Need to make the tx_bd_cons update visible to start_xmit()
- * before checking for netif_tx_queue_stopped(). Without the
- * memory barrier, there is a small possibility that
- * start_xmit() will miss it and cause the queue to be stopped
- * forever.
- * On the other hand we need an rmb() here to ensure the proper
- * ordering of bit testing in the following
- * netif_tx_queue_stopped(txq) call.
- */
- smp_mb();
-
- if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
- /* Taking tx_lock is needed to prevent reenabling the queue
- * while it's empty. This could have happen if rx_action() gets
- * suspended in qede_tx_int() after the condition before
- * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
- *
- * stops the queue->sees fresh tx_bd_cons->releases the queue->
- * sends some packets consuming the whole queue again->
- * stops the queue
- */
-
- __netif_tx_lock(netdev_txq, smp_processor_id());
-
- if ((netif_tx_queue_stopped(netdev_txq)) &&
- (edev->state == QEDE_STATE_OPEN) &&
- (qed_chain_get_elem_left(&txq->tx_pbl)
- >= (MAX_SKB_FRAGS + 1))) {
- netif_tx_wake_queue(netdev_txq);
- DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
- "Wake queue was called\n");
- }
-
- __netif_tx_unlock(netdev_txq);
- }
-
- return 0;
-}
-
-bool qede_has_rx_work(struct qede_rx_queue *rxq)
-{
- u16 hw_comp_cons, sw_comp_cons;
-
- /* Tell compiler that status block fields can change */
- barrier();
-
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- return hw_comp_cons != sw_comp_cons;
-}
-
-static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
-{
- qed_chain_consume(&rxq->rx_bd_ring);
- rxq->sw_rx_cons++;
-}
-
-/* This function reuses the buffer(from an offset) from
- * consumer index to producer index in the bd ring
- */
-static inline void qede_reuse_page(struct qede_rx_queue *rxq,
- struct sw_rx_data *curr_cons)
-{
- struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *curr_prod;
- dma_addr_t new_mapping;
-
- curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
- *curr_prod = *curr_cons;
-
- new_mapping = curr_prod->mapping + curr_prod->page_offset;
-
- rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
- rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
-
- rxq->sw_rx_prod++;
- curr_cons->data = NULL;
-}
-
-/* In case of allocation failures reuse buffers
- * from consumer index to produce buffers for firmware
- */
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
-{
- struct sw_rx_data *curr_cons;
-
- for (; count > 0; count--) {
- curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- qede_reuse_page(rxq, curr_cons);
- qede_rx_bd_ring_consume(rxq);
- }
-}
-
-static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
-{
- struct sw_rx_data *sw_rx_data;
- struct eth_rx_bd *rx_bd;
- dma_addr_t mapping;
- struct page *data;
-
- data = alloc_pages(GFP_ATOMIC, 0);
- if (unlikely(!data))
- return -ENOMEM;
-
- /* Map the entire page as it would be used
- * for multiple RX buffer segment size mapping.
- */
- mapping = dma_map_page(rxq->dev, data, 0,
- PAGE_SIZE, rxq->data_direction);
- if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
- __free_page(data);
- return -ENOMEM;
- }
-
- sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
- sw_rx_data->page_offset = 0;
- sw_rx_data->data = data;
- sw_rx_data->mapping = mapping;
-
- /* Advance PROD and get BD pointer */
- rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
- WARN_ON(!rx_bd);
- rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-
- rxq->sw_rx_prod++;
-
- return 0;
-}
-
-static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
- struct sw_rx_data *curr_cons)
-{
- /* Move to the next segment in the page */
- curr_cons->page_offset += rxq->rx_buf_seg_size;
-
- if (curr_cons->page_offset == PAGE_SIZE) {
- if (unlikely(qede_alloc_rx_buffer(rxq))) {
- /* Since we failed to allocate new buffer
- * current buffer can be used again.
- */
- curr_cons->page_offset -= rxq->rx_buf_seg_size;
-
- return -ENOMEM;
- }
-
- dma_unmap_page(rxq->dev, curr_cons->mapping,
- PAGE_SIZE, rxq->data_direction);
- } else {
- /* Increment refcount of the page as we don't want
- * network stack to take the ownership of the page
- * which can be recycled multiple times by the driver.
- */
- page_ref_inc(curr_cons->data);
- qede_reuse_page(rxq, curr_cons);
- }
-
- return 0;
-}
-
-void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
-{
- u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
- u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
- struct eth_rx_prod_data rx_prods = {0};
-
- /* Update producers */
- rx_prods.bd_prod = cpu_to_le16(bd_prod);
- rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
-
- /* Make sure that the BD and SGE data is updated before updating the
- * producers since FW might read the BD/SGE right after the producer
- * is updated.
- */
- wmb();
-
- internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
- (u32 *)&rx_prods);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the napi lock is released and another qede_poll is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
-}
-
-static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
-{
- enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
- enum rss_hash_type htype;
- u32 hash = 0;
-
- htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
- if (htype) {
- hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
- (htype == RSS_HASH_TYPE_IPV6)) ?
- PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
- hash = le32_to_cpu(rss_hash);
- }
- skb_set_hash(skb, hash, hash_type);
-}
-
-static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
-{
- skb_checksum_none_assert(skb);
-
- if (csum_flag & QEDE_CSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
- skb->csum_level = 1;
-}
-
-static inline void qede_skb_receive(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct qede_rx_queue *rxq,
- struct sk_buff *skb, u16 vlan_tag)
-{
- if (vlan_tag)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&fp->napi, skb);
- fp->rxq->rcv_pkts++;
-}
-
-static void qede_set_gro_params(struct qede_dev *edev,
- struct sk_buff *skb,
- struct eth_fast_path_rx_tpa_start_cqe *cqe)
-{
- u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
-
- if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
- PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- else
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-
- skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
- cqe->header_len;
-}
-
-static int qede_fill_frag_skb(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
- u8 tpa_agg_index, u16 len_on_bd)
-{
- struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
- NUM_RX_BDS_MAX];
- struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
- struct sk_buff *skb = tpa_info->skb;
-
- if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
- goto out;
-
- /* Add one frag and update the appropriate fields in the skb */
- skb_fill_page_desc(skb, tpa_info->frag_id++,
- current_bd->data, current_bd->page_offset,
- len_on_bd);
-
- if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
- /* Incr page ref count to reuse on allocation failure
- * so that it doesn't get freed while freeing SKB.
- */
- page_ref_inc(current_bd->data);
- goto out;
- }
-
- qed_chain_consume(&rxq->rx_bd_ring);
- rxq->sw_rx_cons++;
-
- skb->data_len += len_on_bd;
- skb->truesize += rxq->rx_buf_seg_size;
- skb->len += len_on_bd;
-
- return 0;
-
-out:
- tpa_info->state = QEDE_AGG_STATE_ERROR;
- qede_recycle_rx_bd_ring(rxq, 1);
-
- return -ENOMEM;
-}
-
-static void qede_tpa_start(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
- struct eth_fast_path_rx_tpa_start_cqe *cqe)
-{
- struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
- struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
- struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *replace_buf = &tpa_info->buffer;
- dma_addr_t mapping = tpa_info->buffer_mapping;
- struct sw_rx_data *sw_rx_data_cons;
- struct sw_rx_data *sw_rx_data_prod;
-
- sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-
- /* Use pre-allocated replacement buffer - we can't release the agg.
- * start until its over and we don't want to risk allocation failing
- * here, so re-allocate when aggregation will be over.
- */
- sw_rx_data_prod->mapping = replace_buf->mapping;
-
- sw_rx_data_prod->data = replace_buf->data;
- rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
- sw_rx_data_prod->page_offset = replace_buf->page_offset;
-
- rxq->sw_rx_prod++;
-
- /* move partial skb from cons to pool (don't unmap yet)
- * save mapping, incase we drop the packet later on.
- */
- tpa_info->buffer = *sw_rx_data_cons;
- mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
- le32_to_cpu(rx_bd_cons->addr.lo));
-
- tpa_info->buffer_mapping = mapping;
- rxq->sw_rx_cons++;
-
- /* set tpa state to start only if we are able to allocate skb
- * for this aggregation, otherwise mark as error and aggregation will
- * be dropped
- */
- tpa_info->skb = netdev_alloc_skb(edev->ndev,
- le16_to_cpu(cqe->len_on_first_bd));
- if (unlikely(!tpa_info->skb)) {
- DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
- tpa_info->state = QEDE_AGG_STATE_ERROR;
- goto cons_buf;
- }
-
- /* Start filling in the aggregation info */
- skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
- tpa_info->frag_id = 0;
- tpa_info->state = QEDE_AGG_STATE_START;
-
- /* Store some information from first CQE */
- tpa_info->start_cqe_placement_offset = cqe->placement_offset;
- tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
- if ((le16_to_cpu(cqe->pars_flags.flags) >>
- PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
- PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
- tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
- else
- tpa_info->vlan_tag = 0;
-
- qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
-
- /* This is needed in order to enable forwarding support */
- qede_set_gro_params(edev, tpa_info->skb, cqe);
-
-cons_buf: /* We still need to handle bd_len_list to consume buffers */
- if (likely(cqe->ext_bd_len_list[0]))
- qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
- le16_to_cpu(cqe->ext_bd_len_list[0]));
-
- if (unlikely(cqe->ext_bd_len_list[1])) {
- DP_ERR(edev,
- "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
- tpa_info->state = QEDE_AGG_STATE_ERROR;
- }
-}
-
-#ifdef CONFIG_INET
-static void qede_gro_ip_csum(struct sk_buff *skb)
-{
- const struct iphdr *iph = ip_hdr(skb);
- struct tcphdr *th;
-
- skb_set_transport_header(skb, sizeof(struct iphdr));
- th = tcp_hdr(skb);
-
- th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
- iph->saddr, iph->daddr, 0);
-
- tcp_gro_complete(skb);
-}
-
-static void qede_gro_ipv6_csum(struct sk_buff *skb)
-{
- struct ipv6hdr *iph = ipv6_hdr(skb);
- struct tcphdr *th;
-
- skb_set_transport_header(skb, sizeof(struct ipv6hdr));
- th = tcp_hdr(skb);
-
- th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
- &iph->saddr, &iph->daddr, 0);
- tcp_gro_complete(skb);
-}
-#endif
-
-static void qede_gro_receive(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct sk_buff *skb,
- u16 vlan_tag)
-{
- /* FW can send a single MTU sized packet from gro flow
- * due to aggregation timeout/last segment etc. which
- * is not expected to be a gro packet. If a skb has zero
- * frags then simply push it in the stack as non gso skb.
- */
- if (unlikely(!skb->data_len)) {
- skb_shinfo(skb)->gso_type = 0;
- skb_shinfo(skb)->gso_size = 0;
- goto send_skb;
- }
-
-#ifdef CONFIG_INET
- if (skb_shinfo(skb)->gso_size) {
- skb_reset_network_header(skb);
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- qede_gro_ip_csum(skb);
- break;
- case htons(ETH_P_IPV6):
- qede_gro_ipv6_csum(skb);
- break;
- default:
- DP_ERR(edev,
- "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
- ntohs(skb->protocol));
- }
- }
-#endif
-
-send_skb:
- skb_record_rx_queue(skb, fp->rxq->rxq_id);
- qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
-}
-
-static inline void qede_tpa_cont(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
- struct eth_fast_path_rx_tpa_cont_cqe *cqe)
-{
- int i;
-
- for (i = 0; cqe->len_list[i]; i++)
- qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
- le16_to_cpu(cqe->len_list[i]));
-
- if (unlikely(i > 1))
- DP_ERR(edev,
- "Strange - TPA cont with more than a single len_list entry\n");
-}
-
-static void qede_tpa_end(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct eth_fast_path_rx_tpa_end_cqe *cqe)
-{
- struct qede_rx_queue *rxq = fp->rxq;
- struct qede_agg_info *tpa_info;
- struct sk_buff *skb;
- int i;
-
- tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
- skb = tpa_info->skb;
-
- for (i = 0; cqe->len_list[i]; i++)
- qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
- le16_to_cpu(cqe->len_list[i]));
- if (unlikely(i > 1))
- DP_ERR(edev,
- "Strange - TPA emd with more than a single len_list entry\n");
-
- if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
- goto err;
-
- /* Sanity */
- if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
- DP_ERR(edev,
- "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
- cqe->num_of_bds, tpa_info->frag_id);
- if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
- DP_ERR(edev,
- "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
- le16_to_cpu(cqe->total_packet_len), skb->len);
-
- memcpy(skb->data,
- page_address(tpa_info->buffer.data) +
- tpa_info->start_cqe_placement_offset +
- tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
-
- /* Finalize the SKB */
- skb->protocol = eth_type_trans(skb, edev->ndev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
- * to skb_shinfo(skb)->gso_segs
- */
- NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
-
- qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
-
- tpa_info->state = QEDE_AGG_STATE_NONE;
-
- return;
-err:
- tpa_info->state = QEDE_AGG_STATE_NONE;
- dev_kfree_skb_any(tpa_info->skb);
- tpa_info->skb = NULL;
-}
-
-static bool qede_tunn_exist(u16 flag)
-{
- return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
-}
-
-static u8 qede_check_tunn_csum(u16 flag)
-{
- u16 csum_flag = 0;
- u8 tcsum = 0;
-
- if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
- csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
-
- if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
- csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
- tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
- }
-
- csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
- PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
- if (csum_flag & flag)
- return QEDE_CSUM_ERROR;
-
- return QEDE_CSUM_UNNECESSARY | tcsum;
-}
-
-static u8 qede_check_notunn_csum(u16 flag)
-{
- u16 csum_flag = 0;
- u8 csum = 0;
-
- if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
- csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
- csum = QEDE_CSUM_UNNECESSARY;
- }
-
- csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
- if (csum_flag & flag)
- return QEDE_CSUM_ERROR;
-
- return csum;
-}
-
-static u8 qede_check_csum(u16 flag)
-{
- if (!qede_tunn_exist(flag))
- return qede_check_notunn_csum(flag);
- else
- return qede_check_tunn_csum(flag);
-}
-
-static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
- u16 flag)
-{
- u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
-
- if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
- ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
- (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
- PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
- return true;
-
- return false;
-}
-
-/* Return true iff packet is to be passed to stack */
-static bool qede_rx_xdp(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct qede_rx_queue *rxq,
- struct bpf_prog *prog,
- struct sw_rx_data *bd,
- struct eth_fast_path_rx_reg_cqe *cqe)
-{
- u16 len = le16_to_cpu(cqe->len_on_first_bd);
- struct xdp_buff xdp;
- enum xdp_action act;
-
- xdp.data = page_address(bd->data) + cqe->placement_offset;
- xdp.data_end = xdp.data + len;
-
- /* Queues always have a full reset currently, so for the time
- * being until there's atomic program replace just mark read
- * side for map helpers.
- */
- rcu_read_lock();
- act = bpf_prog_run_xdp(prog, &xdp);
- rcu_read_unlock();
-
- if (act == XDP_PASS)
- return true;
-
- /* Count number of packets not to be passed to stack */
- rxq->xdp_no_pass++;
-
- switch (act) {
- case XDP_TX:
- /* We need the replacement buffer before transmit. */
- if (qede_alloc_rx_buffer(rxq)) {
- qede_recycle_rx_bd_ring(rxq, 1);
- return false;
- }
-
- /* Now if there's a transmission problem, we'd still have to
- * throw current buffer, as replacement was already allocated.
- */
- if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
- dma_unmap_page(rxq->dev, bd->mapping,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(bd->data);
- }
-
- /* Regardless, we've consumed an Rx BD */
- qede_rx_bd_ring_consume(rxq);
- return false;
-
- default:
- bpf_warn_invalid_xdp_action(act);
- case XDP_ABORTED:
- case XDP_DROP:
- qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
- }
-
- return false;
-}
-
-static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
- struct sw_rx_data *bd, u16 len,
- u16 pad)
-{
- unsigned int offset = bd->page_offset;
- struct skb_frag_struct *frag;
- struct page *page = bd->data;
- unsigned int pull_len;
- struct sk_buff *skb;
- unsigned char *va;
-
- /* Allocate a new SKB with a sufficient large header len */
- skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
- if (unlikely(!skb))
- return NULL;
-
- /* Copy data into SKB - if it's small, we can simply copy it and
- * re-use the already allcoated & mapped memory.
- */
- if (len + pad <= edev->rx_copybreak) {
- memcpy(skb_put(skb, len),
- page_address(page) + pad + offset, len);
- qede_reuse_page(rxq, bd);
- goto out;
- }
-
- frag = &skb_shinfo(skb)->frags[0];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, pad + offset, len, rxq->rx_buf_seg_size);
-
- va = skb_frag_address(frag);
- pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
- /* Align the pull_len to optimize memcpy */
- memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
- /* Correct the skb & frag sizes offset after the pull */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-
- if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
- /* Incr page ref count to reuse on allocation failure so
- * that it doesn't get freed while freeing SKB [as its
- * already mapped there].
- */
- page_ref_inc(page);
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
-out:
- /* We've consumed the first BD and prepared an SKB */
- qede_rx_bd_ring_consume(rxq);
- return skb;
-}
-
-static int qede_rx_build_jumbo(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
- struct sk_buff *skb,
- struct eth_fast_path_rx_reg_cqe *cqe,
- u16 first_bd_len)
-{
- u16 pkt_len = le16_to_cpu(cqe->pkt_len);
- struct sw_rx_data *bd;
- u16 bd_cons_idx;
- u8 num_frags;
-
- pkt_len -= first_bd_len;
-
- /* We've already used one BD for the SKB. Now take care of the rest */
- for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
- u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
- pkt_len;
-
- if (unlikely(!cur_size)) {
- DP_ERR(edev,
- "Still got %d BDs for mapping jumbo, but length became 0\n",
- num_frags);
- goto out;
- }
-
- /* We need a replacement buffer for each BD */
- if (unlikely(qede_alloc_rx_buffer(rxq)))
- goto out;
-
- /* Now that we've allocated the replacement buffer,
- * we can safely consume the next BD and map it to the SKB.
- */
- bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- bd = &rxq->sw_rx_ring[bd_cons_idx];
- qede_rx_bd_ring_consume(rxq);
-
- dma_unmap_page(rxq->dev, bd->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
- bd->data, 0, cur_size);
-
- skb->truesize += PAGE_SIZE;
- skb->data_len += cur_size;
- skb->len += cur_size;
- pkt_len -= cur_size;
- }
-
- if (unlikely(pkt_len))
- DP_ERR(edev,
- "Mapped all BDs of jumbo, but still have %d bytes\n",
- pkt_len);
-
-out:
- return num_frags;
-}
-
-static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct qede_rx_queue *rxq,
- union eth_rx_cqe *cqe,
- enum eth_rx_cqe_type type)
-{
- switch (type) {
- case ETH_RX_CQE_TYPE_TPA_START:
- qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
- return 0;
- case ETH_RX_CQE_TYPE_TPA_CONT:
- qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
- return 0;
- case ETH_RX_CQE_TYPE_TPA_END:
- qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
- return 1;
- default:
- return 0;
- }
-}
-
-static int qede_rx_process_cqe(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct qede_rx_queue *rxq)
-{
- struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
- struct eth_fast_path_rx_reg_cqe *fp_cqe;
- u16 len, pad, bd_cons_idx, parse_flag;
- enum eth_rx_cqe_type cqe_type;
- union eth_rx_cqe *cqe;
- struct sw_rx_data *bd;
- struct sk_buff *skb;
- __le16 flags;
- u8 csum_flag;
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
- cqe_type = cqe->fast_path_regular.type;
-
- /* Process an unlikely slowpath event */
- if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
- struct eth_slow_path_rx_cqe *sp_cqe;
-
- sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
- edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
- return 0;
- }
-
- /* Handle TPA cqes */
- if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
- return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
-
- /* Get the data from the SW ring; Consume it only after it's evident
- * we wouldn't recycle it.
- */
- bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- bd = &rxq->sw_rx_ring[bd_cons_idx];
-
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
-
- /* Run eBPF program if one is attached */
- if (xdp_prog)
- if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
- return 1;
-
- /* If this is an error packet then drop it */
- flags = cqe->fast_path_regular.pars_flags.flags;
- parse_flag = le16_to_cpu(flags);
-
- csum_flag = qede_check_csum(parse_flag);
- if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
- if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
- rxq->rx_ip_frags++;
- } else {
- DP_NOTICE(edev,
- "CQE has error, flags = %x, dropping incoming packet\n",
- parse_flag);
- rxq->rx_hw_errors++;
- qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
- return 0;
- }
- }
-
- /* Basic validation passed; Need to prepare an SKB. This would also
- * guarantee to finally consume the first BD upon success.
- */
- skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
- if (!skb) {
- rxq->rx_alloc_errors++;
- qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
- return 0;
- }
-
- /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
- * by a single cqe.
- */
- if (fp_cqe->bd_num > 1) {
- u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
- fp_cqe, len);
-
- if (unlikely(unmapped_frags > 0)) {
- qede_recycle_rx_bd_ring(rxq, unmapped_frags);
- dev_kfree_skb_any(skb);
- return 0;
- }
- }
-
- /* The SKB contains all the data. Now prepare meta-magic */
- skb->protocol = eth_type_trans(skb, edev->ndev);
- qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
- qede_set_skb_csum(skb, csum_flag);
- skb_record_rx_queue(skb, rxq->rxq_id);
-
- /* SKB is prepared - pass it to stack */
- qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
- return 1;
-}
-
-static int qede_rx_int(struct qede_fastpath *fp, int budget)
-{
- struct qede_rx_queue *rxq = fp->rxq;
- struct qede_dev *edev = fp->edev;
- u16 hw_comp_cons, sw_comp_cons;
- int work_done = 0;
-
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD in the while-loop before reading hw_comp_cons. If the CQE is
- * read before it is written by FW, then FW writes CQE and SB, and then
- * the CPU reads the hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Loop to complete all indicated BDs */
- while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
- qede_rx_process_cqe(edev, fp, rxq);
- qed_chain_recycle_consumed(&rxq->rx_comp_ring);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
- work_done++;
- }
-
- /* Update producers */
- qede_update_rx_prod(edev, rxq);
-
- return work_done;
-}
-
-static bool qede_poll_is_more_work(struct qede_fastpath *fp)
-{
- qed_sb_update_sb_idx(fp->sb_info);
-
- /* *_has_*_work() reads the status block, thus we need to ensure that
- * status block indices have been actually read (qed_sb_update_sb_idx)
- * prior to this check (*_has_*_work) so that we won't write the
- * "newer" value of the status block to HW (if there was a DMA right
- * after qede_has_rx_work and if there is no rmb, the memory reading
- * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
- * In this case there will never be another interrupt until there is
- * another update of the status block, while there is still unhandled
- * work.
- */
- rmb();
-
- if (likely(fp->type & QEDE_FASTPATH_RX))
- if (qede_has_rx_work(fp->rxq))
- return true;
-
- if (fp->type & QEDE_FASTPATH_XDP)
- if (qede_txq_has_work(fp->xdp_tx))
- return true;
-
- if (likely(fp->type & QEDE_FASTPATH_TX))
- if (qede_txq_has_work(fp->txq))
- return true;
-
- return false;
-}
-
-static int qede_poll(struct napi_struct *napi, int budget)
-{
- struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
- napi);
- struct qede_dev *edev = fp->edev;
- int rx_work_done = 0;
-
- if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
- qede_tx_int(edev, fp->txq);
-
- if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
- qede_xdp_tx_int(edev, fp->xdp_tx);
-
- rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
- qede_has_rx_work(fp->rxq)) ?
- qede_rx_int(fp, budget) : 0;
- if (rx_work_done < budget) {
- if (!qede_poll_is_more_work(fp)) {
- napi_complete(napi);
-
- /* Update and reenable interrupts */
- qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
- } else {
- rx_work_done = budget;
- }
- }
-
- if (fp->xdp_xmit) {
- u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
-
- fp->xdp_xmit = 0;
- fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
- qede_update_tx_producer(fp->xdp_tx);
- }
-
- return rx_work_done;
-}
-
-static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
-{
- struct qede_fastpath *fp = fp_cookie;
-
- qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
-
- napi_schedule_irqoff(&fp->napi);
- return IRQ_HANDLED;
-}
-
-/* -------------------------------------------------------------------------
- * END OF FAST-PATH
- * -------------------------------------------------------------------------
- */
-
static int qede_open(struct net_device *ndev);
static int qede_close(struct net_device *ndev);
-static int qede_set_mac_addr(struct net_device *ndev, void *p);
-static void qede_set_rx_mode(struct net_device *ndev);
-static void qede_config_rx_mode(struct net_device *ndev);
-
-static int qede_set_ucast_rx_mac(struct qede_dev *edev,
- enum qed_filter_xcast_params_type opcode,
- unsigned char mac[ETH_ALEN])
-{
- struct qed_filter_params filter_cmd;
-
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.mac_valid = 1;
- ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
-
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
-}
-
-static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
- enum qed_filter_xcast_params_type opcode,
- u16 vid)
-{
- struct qed_filter_params filter_cmd;
-
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.vlan_valid = 1;
- filter_cmd.filter.ucast.vlan = vid;
-
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
-}
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
@@ -2019,9 +399,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
}
-static
-struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void qede_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -2051,8 +430,6 @@ struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
stats->collisions = edev->stats.tx_total_collisions;
stats->rx_crc_errors = edev->stats.rx_crc_errors;
stats->rx_frame_errors = edev->stats.rx_align_errors;
-
- return stats;
}
#ifdef CONFIG_QED_SRIOV
@@ -2096,445 +473,37 @@ static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
}
-#endif
-static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
-{
- struct qed_update_vport_params params;
- int rc;
-
- /* Proceed only if action actually needs to be performed */
- if (edev->accept_any_vlan == action)
- return;
-
- memset(&params, 0, sizeof(params));
-
- params.vport_id = 0;
- params.accept_any_vlan = action;
- params.update_accept_any_vlan_flg = 1;
-
- rc = edev->ops->vport_update(edev->cdev, &params);
- if (rc) {
- DP_ERR(edev, "Failed to %s accept-any-vlan\n",
- action ? "enable" : "disable");
- } else {
- DP_INFO(edev, "%s accept-any-vlan\n",
- action ? "enabled" : "disabled");
- edev->accept_any_vlan = action;
- }
-}
-
-static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_vlan *vlan, *tmp;
- int rc = 0;
-
- DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
-
- vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan) {
- DP_INFO(edev, "Failed to allocate struct for vlan\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&vlan->list);
- vlan->vid = vid;
- vlan->configured = false;
-
- /* Verify vlan isn't already configured */
- list_for_each_entry(tmp, &edev->vlan_list, list) {
- if (tmp->vid == vlan->vid) {
- DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
- "vlan already configured\n");
- kfree(vlan);
- return -EEXIST;
- }
- }
-
- /* If interface is down, cache this VLAN ID and return */
- __qede_lock(edev);
- if (edev->state != QEDE_STATE_OPEN) {
- DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
- "Interface is down, VLAN %d will be configured when interface is up\n",
- vid);
- if (vid != 0)
- edev->non_configured_vlans++;
- list_add(&vlan->list, &edev->vlan_list);
- goto out;
- }
-
- /* Check for the filter limit.
- * Note - vlan0 has a reserved filter and can be added without
- * worrying about quota
- */
- if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
- (vlan->vid == 0)) {
- rc = qede_set_ucast_rx_vlan(edev,
- QED_FILTER_XCAST_TYPE_ADD,
- vlan->vid);
- if (rc) {
- DP_ERR(edev, "Failed to configure VLAN %d\n",
- vlan->vid);
- kfree(vlan);
- goto out;
- }
- vlan->configured = true;
-
- /* vlan0 filter isn't consuming out of our quota */
- if (vlan->vid != 0)
- edev->configured_vlans++;
- } else {
- /* Out of quota; Activate accept-any-VLAN mode */
- if (!edev->non_configured_vlans)
- qede_config_accept_any_vlan(edev, true);
-
- edev->non_configured_vlans++;
- }
-
- list_add(&vlan->list, &edev->vlan_list);
-
-out:
- __qede_unlock(edev);
- return rc;
-}
-
-static void qede_del_vlan_from_list(struct qede_dev *edev,
- struct qede_vlan *vlan)
-{
- /* vlan0 filter isn't consuming out of our quota */
- if (vlan->vid != 0) {
- if (vlan->configured)
- edev->configured_vlans--;
- else
- edev->non_configured_vlans--;
- }
-
- list_del(&vlan->list);
- kfree(vlan);
-}
-
-static int qede_configure_vlan_filters(struct qede_dev *edev)
-{
- int rc = 0, real_rc = 0, accept_any_vlan = 0;
- struct qed_dev_eth_info *dev_info;
- struct qede_vlan *vlan = NULL;
-
- if (list_empty(&edev->vlan_list))
- return 0;
-
- dev_info = &edev->dev_info;
-
- /* Configure non-configured vlans */
- list_for_each_entry(vlan, &edev->vlan_list, list) {
- if (vlan->configured)
- continue;
-
- /* We have used all our credits, now enable accept_any_vlan */
- if ((vlan->vid != 0) &&
- (edev->configured_vlans == dev_info->num_vlan_filters)) {
- accept_any_vlan = 1;
- continue;
- }
-
- DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
-
- rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
- vlan->vid);
- if (rc) {
- DP_ERR(edev, "Failed to configure VLAN %u\n",
- vlan->vid);
- real_rc = rc;
- continue;
- }
-
- vlan->configured = true;
- /* vlan0 filter doesn't consume our VLAN filter's quota */
- if (vlan->vid != 0) {
- edev->non_configured_vlans--;
- edev->configured_vlans++;
- }
- }
-
- /* enable accept_any_vlan mode if we have more VLANs than credits,
- * or remove accept_any_vlan mode if we've actually removed
- * a non-configured vlan, and all remaining vlans are truly configured.
- */
-
- if (accept_any_vlan)
- qede_config_accept_any_vlan(edev, true);
- else if (!edev->non_configured_vlans)
- qede_config_accept_any_vlan(edev, false);
-
- return real_rc;
-}
-
-static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct qede_dev *edev = netdev_priv(dev);
- struct qede_vlan *vlan = NULL;
- int rc = 0;
-
- DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
-
- /* Find whether entry exists */
- __qede_lock(edev);
- list_for_each_entry(vlan, &edev->vlan_list, list)
- if (vlan->vid == vid)
- break;
-
- if (!vlan || (vlan->vid != vid)) {
- DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
- "Vlan isn't configured\n");
- goto out;
- }
-
- if (edev->state != QEDE_STATE_OPEN) {
- /* As interface is already down, we don't have a VPORT
- * instance to remove vlan filter. So just update vlan list
- */
- DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
- "Interface is down, removing VLAN from list only\n");
- qede_del_vlan_from_list(edev, vlan);
- goto out;
- }
-
- /* Remove vlan */
- if (vlan->configured) {
- rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
- vid);
- if (rc) {
- DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
- goto out;
- }
- }
-
- qede_del_vlan_from_list(edev, vlan);
-
- /* We have removed a VLAN - try to see if we can
- * configure non-configured VLAN from the list.
- */
- rc = qede_configure_vlan_filters(edev);
-
-out:
- __qede_unlock(edev);
- return rc;
-}
-
-static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
-{
- struct qede_vlan *vlan = NULL;
-
- if (list_empty(&edev->vlan_list))
- return;
-
- list_for_each_entry(vlan, &edev->vlan_list, list) {
- if (!vlan->configured)
- continue;
-
- vlan->configured = false;
-
- /* vlan0 filter isn't consuming out of our quota */
- if (vlan->vid != 0) {
- edev->non_configured_vlans++;
- edev->configured_vlans--;
- }
-
- DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
- "marked vlan %d as non-configured\n", vlan->vid);
- }
-
- edev->accept_any_vlan = false;
-}
-static void qede_set_features_reload(struct qede_dev *edev,
- struct qede_reload_args *args)
-{
- edev->ndev->features = args->u.features;
-}
-
-int qede_set_features(struct net_device *dev, netdev_features_t features)
-{
- struct qede_dev *edev = netdev_priv(dev);
- netdev_features_t changes = features ^ dev->features;
- bool need_reload = false;
-
- /* No action needed if hardware GRO is disabled during driver load */
- if (changes & NETIF_F_GRO) {
- if (dev->features & NETIF_F_GRO)
- need_reload = !edev->gro_disable;
- else
- need_reload = edev->gro_disable;
- }
-
- if (need_reload) {
- struct qede_reload_args args;
-
- args.u.features = features;
- args.func = &qede_set_features_reload;
-
- /* Make sure that we definitely need to reload.
- * In case of an eBPF attached program, there will be no FW
- * aggregations, so no need to actually reload.
- */
- __qede_lock(edev);
- if (edev->xdp_prog)
- args.func(edev, &args);
- else
- qede_reload(edev, &args, true);
- __qede_unlock(edev);
-
- return 1;
- }
-
- return 0;
-}
-
-static void qede_udp_tunnel_add(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (edev->vxlan_dst_port)
- return;
-
- edev->vxlan_dst_port = t_port;
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
- t_port);
-
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (edev->geneve_dst_port)
- return;
-
- edev->geneve_dst_port = t_port;
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
- t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
- break;
- default:
- return;
- }
+ if (!edev->ops)
+ return -EINVAL;
- schedule_delayed_work(&edev->sp_task, 0);
+ return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
}
+#endif
-static void qede_udp_tunnel_del(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (t_port != edev->vxlan_dst_port)
- return;
- edev->vxlan_dst_port = 0;
+ if (!netif_running(dev))
+ return -EAGAIN;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
- t_port);
-
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (t_port != edev->geneve_dst_port)
- return;
-
- edev->geneve_dst_port = 0;
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
- t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
- break;
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return qede_ptp_hw_ts(edev, ifr);
default:
- return;
- }
-
- schedule_delayed_work(&edev->sp_task, 0);
-}
-
-/* 8B udp header + 8B base tunnel header + 32B option length */
-#define QEDE_MAX_TUN_HDR_LEN 48
-
-static netdev_features_t qede_features_check(struct sk_buff *skb,
- struct net_device *dev,
- netdev_features_t features)
-{
- if (skb->encapsulation) {
- u8 l4_proto = 0;
-
- switch (vlan_get_protocol(skb)) {
- case htons(ETH_P_IP):
- l4_proto = ip_hdr(skb)->protocol;
- break;
- case htons(ETH_P_IPV6):
- l4_proto = ipv6_hdr(skb)->nexthdr;
- break;
- default:
- return features;
- }
-
- /* Disable offloads for geneve tunnels, as HW can't parse
- * the geneve header which has option length greater than 32B.
- */
- if ((l4_proto == IPPROTO_UDP) &&
- ((skb_inner_mac_header(skb) -
- skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
- return features & ~(NETIF_F_CSUM_MASK |
- NETIF_F_GSO_MASK);
- }
-
- return features;
-}
-
-static void qede_xdp_reload_func(struct qede_dev *edev,
- struct qede_reload_args *args)
-{
- struct bpf_prog *old;
-
- old = xchg(&edev->xdp_prog, args->u.new_prog);
- if (old)
- bpf_prog_put(old);
-}
-
-static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
-{
- struct qede_reload_args args;
-
- if (prog && prog->xdp_adjust_head) {
- DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "default IOCTL cmd 0x%x\n", cmd);
return -EOPNOTSUPP;
}
- /* If we're called, there was already a bpf reference increment */
- args.func = &qede_xdp_reload_func;
- args.u.new_prog = prog;
- qede_reload(edev, &args, false);
-
return 0;
}
-static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
-{
- struct qede_dev *edev = netdev_priv(dev);
-
- switch (xdp->command) {
- case XDP_SETUP_PROG:
- return qede_xdp_set(edev, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_attached = !!edev->xdp_prog;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -2543,9 +512,11 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+ .ndo_do_ioctl = qede_ioctl,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
.ndo_set_vf_vlan = qede_set_vf_vlan,
+ .ndo_set_vf_trust = qede_set_vf_trust,
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
@@ -2814,7 +785,7 @@ static void qede_update_pf_params(struct qed_dev *cdev)
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
- pf_params.eth_pf_params.num_cons = 192;
+ pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
qed_ops->common->update_pf_params(cdev, &pf_params);
}
@@ -2883,6 +854,13 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (rc)
goto err3;
+ /* Prepare the lock prior to the registeration of the netdev,
+ * as once it's registered we might reach flows requiring it
+ * [it's even possible to reach a flow needing it directly
+ * from there, although it's unlikely].
+ */
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
rc = register_netdev(edev->ndev);
if (rc) {
DP_NOTICE(edev, "Cannot register net-device\n");
@@ -2891,6 +869,15 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+ /* PTP not supported on VFs */
+ if (!is_vf) {
+ rc = qede_ptp_register_phc(edev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register PHC\n");
+ goto err5;
+ }
+ }
+
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
#ifdef CONFIG_DCB
@@ -2898,14 +885,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_set_dcbnl_ops(edev->ndev);
#endif
- INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
- mutex_init(&edev->qede_lock);
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n");
return 0;
+err5:
+ unregister_netdev(edev->ndev);
err4:
qede_roce_dev_remove(edev);
err3:
@@ -2957,6 +944,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
unregister_netdev(ndev);
+ qede_ptp_remove(edev);
+
qede_roce_dev_remove(edev);
edev->ops->common->set_power_state(cdev, PCI_D0);
@@ -2967,14 +956,20 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
if (edev->xdp_prog)
bpf_prog_put(edev->xdp_prog);
- free_netdev(ndev);
-
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
qed_ops->common->remove(cdev);
+ /* Since this can happen out-of-sync with other flows,
+ * don't release the netdevice until after slowpath stop
+ * has been called to guarantee various other contexts
+ * [e.g., QED register callbacks] won't break anything when
+ * accessing the netdevice.
+ */
+ free_netdev(ndev);
+
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@@ -3215,8 +1210,9 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
goto err;
/* Allocate buffers for the Rx ring */
+ rxq->filled_buffers = 0;
for (i = 0; i < rxq->num_rx_buffers; i++) {
- rc = qede_alloc_rx_buffer(rxq);
+ rc = qede_alloc_rx_buffer(rxq, false);
if (rc) {
DP_ERR(edev,
"Rx buffers allocation failed at index %d\n", i);
@@ -3564,19 +1560,24 @@ static int qede_stop_txq(struct qede_dev *edev,
static int qede_stop_queues(struct qede_dev *edev)
{
- struct qed_update_vport_params vport_update_params;
+ struct qed_update_vport_params *vport_update_params;
struct qed_dev *cdev = edev->cdev;
struct qede_fastpath *fp;
int rc, i;
/* Disable the vport */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 0;
- vport_update_params.update_rss_flg = 0;
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params)
+ return -ENOMEM;
+
+ vport_update_params->vport_id = 0;
+ vport_update_params->update_vport_active_flg = 1;
+ vport_update_params->vport_active_flg = 0;
+ vport_update_params->update_rss_flg = 0;
+
+ rc = edev->ops->vport_update(cdev, vport_update_params);
+ vfree(vport_update_params);
- rc = edev->ops->vport_update(cdev, &vport_update_params);
if (rc) {
DP_ERR(edev, "Failed to update vport\n");
return rc;
@@ -3688,11 +1689,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
- struct qed_update_vport_params vport_update_params;
- struct qed_queue_start_common_params q_params;
struct qed_dev_info *qed_info = &edev->dev_info.common;
+ struct qed_update_vport_params *vport_update_params;
+ struct qed_queue_start_common_params q_params;
struct qed_start_vport_params start = {0};
- bool reset_rss_indir = false;
int rc, i;
if (!edev->num_queues) {
@@ -3701,6 +1701,11 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
return -EINVAL;
}
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params)
+ return -ENOMEM;
+
+ start.handle_ptp_pkts = !!(edev->ptp);
start.gro_enable = !edev->gro_disable;
start.mtu = edev->ndev->mtu;
start.vport_id = 0;
@@ -3712,7 +1717,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
if (rc) {
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
- return rc;
+ goto out;
}
DP_VERBOSE(edev, NETIF_MSG_IFUP,
@@ -3748,7 +1753,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
if (rc) {
DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
rc);
- return rc;
+ goto out;
}
/* Use the return parameters */
@@ -3764,108 +1769,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
if (fp->type & QEDE_FASTPATH_XDP) {
rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
if (rc)
- return rc;
+ goto out;
fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
if (IS_ERR(fp->rxq->xdp_prog)) {
rc = PTR_ERR(fp->rxq->xdp_prog);
fp->rxq->xdp_prog = NULL;
- return rc;
+ goto out;
}
}
if (fp->type & QEDE_FASTPATH_TX) {
rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
if (rc)
- return rc;
+ goto out;
}
}
/* Prepare and send the vport enable */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = start.vport_id;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 1;
+ vport_update_params->vport_id = start.vport_id;
+ vport_update_params->update_vport_active_flg = 1;
+ vport_update_params->vport_active_flg = 1;
if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
qed_info->tx_switching) {
- vport_update_params.update_tx_switching_flg = 1;
- vport_update_params.tx_switching_flg = 1;
+ vport_update_params->update_tx_switching_flg = 1;
+ vport_update_params->tx_switching_flg = 1;
}
- /* Fill struct with RSS params */
- if (QEDE_RSS_COUNT(edev) > 1) {
- vport_update_params.update_rss_flg = 1;
+ qede_fill_rss_params(edev, &vport_update_params->rss_params,
+ &vport_update_params->update_rss_flg);
- /* Need to validate current RSS config uses valid entries */
- for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
- if (edev->rss_params.rss_ind_table[i] >=
- QEDE_RSS_COUNT(edev)) {
- reset_rss_indir = true;
- break;
- }
- }
-
- if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
- reset_rss_indir) {
- u16 val;
-
- for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
- u16 indir_val;
-
- val = QEDE_RSS_COUNT(edev);
- indir_val = ethtool_rxfh_indir_default(i, val);
- edev->rss_params.rss_ind_table[i] = indir_val;
- }
- edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
- }
-
- if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
- netdev_rss_key_fill(edev->rss_params.rss_key,
- sizeof(edev->rss_params.rss_key));
- edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
- }
-
- if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
- edev->rss_params.rss_caps = QED_RSS_IPV4 |
- QED_RSS_IPV6 |
- QED_RSS_IPV4_TCP |
- QED_RSS_IPV6_TCP;
- edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
- }
-
- memcpy(&vport_update_params.rss_params, &edev->rss_params,
- sizeof(vport_update_params.rss_params));
- } else {
- memset(&vport_update_params.rss_params, 0,
- sizeof(vport_update_params.rss_params));
- }
-
- rc = edev->ops->vport_update(cdev, &vport_update_params);
- if (rc) {
+ rc = edev->ops->vport_update(cdev, vport_update_params);
+ if (rc)
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-static int qede_set_mcast_rx_mac(struct qede_dev *edev,
- enum qed_filter_xcast_params_type opcode,
- unsigned char *mac, int num_macs)
-{
- struct qed_filter_params filter_cmd;
- int i;
-
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_MCAST;
- filter_cmd.filter.mcast.type = opcode;
- filter_cmd.filter.mcast.num = num_macs;
-
- for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
- ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+out:
+ vfree(vport_update_params);
+ return rc;
}
enum qede_unload_mode {
@@ -3886,6 +1827,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
+ qede_ptp_stop(edev);
+
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
@@ -3929,7 +1872,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
- struct qed_link_output link_output;
int rc;
DP_INFO(edev, "Starting qede load\n");
@@ -3981,11 +1923,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params);
- /* Query whether link is already-up */
- memset(&link_output, 0, sizeof(link_output));
- edev->ops->common->get_link(edev->cdev, &link_output);
qede_roce_dev_event_open(edev);
- qede_link_update(edev, &link_output);
+
+ qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
edev->state = QEDE_STATE_OPEN;
@@ -4097,192 +2037,3 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
}
}
-
-static int qede_set_mac_addr(struct net_device *ndev, void *p)
-{
- struct qede_dev *edev = netdev_priv(ndev);
- struct sockaddr *addr = p;
- int rc;
-
- ASSERT_RTNL(); /* @@@TBD To be removed */
-
- DP_INFO(edev, "Set_mac_addr called\n");
-
- if (!is_valid_ether_addr(addr->sa_data)) {
- DP_NOTICE(edev, "The MAC address is not valid\n");
- return -EFAULT;
- }
-
- if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
- DP_NOTICE(edev, "qed prevents setting MAC\n");
- return -EINVAL;
- }
-
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
-
- if (!netif_running(ndev)) {
- DP_NOTICE(edev, "The device is currently down\n");
- return 0;
- }
-
- /* Remove the previous primary mac */
- rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
- edev->primary_mac);
- if (rc)
- return rc;
-
- edev->ops->common->update_mac(edev->cdev, addr->sa_data);
-
- /* Add MAC filter according to the new unicast HW MAC address */
- ether_addr_copy(edev->primary_mac, ndev->dev_addr);
- return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
- edev->primary_mac);
-}
-
-static int
-qede_configure_mcast_filtering(struct net_device *ndev,
- enum qed_filter_rx_mode_type *accept_flags)
-{
- struct qede_dev *edev = netdev_priv(ndev);
- unsigned char *mc_macs, *temp;
- struct netdev_hw_addr *ha;
- int rc = 0, mc_count;
- size_t size;
-
- size = 64 * ETH_ALEN;
-
- mc_macs = kzalloc(size, GFP_KERNEL);
- if (!mc_macs) {
- DP_NOTICE(edev,
- "Failed to allocate memory for multicast MACs\n");
- rc = -ENOMEM;
- goto exit;
- }
-
- temp = mc_macs;
-
- /* Remove all previously configured MAC filters */
- rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
- mc_macs, 1);
- if (rc)
- goto exit;
-
- netif_addr_lock_bh(ndev);
-
- mc_count = netdev_mc_count(ndev);
- if (mc_count < 64) {
- netdev_for_each_mc_addr(ha, ndev) {
- ether_addr_copy(temp, ha->addr);
- temp += ETH_ALEN;
- }
- }
-
- netif_addr_unlock_bh(ndev);
-
- /* Check for all multicast @@@TBD resource allocation */
- if ((ndev->flags & IFF_ALLMULTI) ||
- (mc_count > 64)) {
- if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
- *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
- } else {
- /* Add all multicast MAC filters */
- rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
- mc_macs, mc_count);
- }
-
-exit:
- kfree(mc_macs);
- return rc;
-}
-
-static void qede_set_rx_mode(struct net_device *ndev)
-{
- struct qede_dev *edev = netdev_priv(ndev);
-
- set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
- schedule_delayed_work(&edev->sp_task, 0);
-}
-
-/* Must be called with qede_lock held */
-static void qede_config_rx_mode(struct net_device *ndev)
-{
- enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
- struct qede_dev *edev = netdev_priv(ndev);
- struct qed_filter_params rx_mode;
- unsigned char *uc_macs, *temp;
- struct netdev_hw_addr *ha;
- int rc, uc_count;
- size_t size;
-
- netif_addr_lock_bh(ndev);
-
- uc_count = netdev_uc_count(ndev);
- size = uc_count * ETH_ALEN;
-
- uc_macs = kzalloc(size, GFP_ATOMIC);
- if (!uc_macs) {
- DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
- netif_addr_unlock_bh(ndev);
- return;
- }
-
- temp = uc_macs;
- netdev_for_each_uc_addr(ha, ndev) {
- ether_addr_copy(temp, ha->addr);
- temp += ETH_ALEN;
- }
-
- netif_addr_unlock_bh(ndev);
-
- /* Configure the struct for the Rx mode */
- memset(&rx_mode, 0, sizeof(struct qed_filter_params));
- rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
- /* Remove all previous unicast secondary macs and multicast macs
- * (configrue / leave the primary mac)
- */
- rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
- edev->primary_mac);
- if (rc)
- goto out;
-
- /* Check for promiscuous */
- if ((ndev->flags & IFF_PROMISC) ||
- (uc_count > edev->dev_info.num_mac_filters - 1)) {
- accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
- } else {
- /* Add MAC filters according to the unicast secondary macs */
- int i;
-
- temp = uc_macs;
- for (i = 0; i < uc_count; i++) {
- rc = qede_set_ucast_rx_mac(edev,
- QED_FILTER_XCAST_TYPE_ADD,
- temp);
- if (rc)
- goto out;
-
- temp += ETH_ALEN;
- }
-
- rc = qede_configure_mcast_filtering(ndev, &accept_flags);
- if (rc)
- goto out;
- }
-
- /* take care of VLAN mode */
- if (ndev->flags & IFF_PROMISC) {
- qede_config_accept_any_vlan(edev, true);
- } else if (!edev->non_configured_vlans) {
- /* It's possible that accept_any_vlan mode is set due to a
- * previous setting of IFF_PROMISC. If vlan credits are
- * sufficient, disable accept_any_vlan.
- */
- qede_config_accept_any_vlan(edev, false);
- }
-
- rx_mode.filter.accept_flags = accept_flags;
- edev->ops->filter_config(edev->cdev, &rx_mode);
-out:
- kfree(uc_macs);
-}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
new file mode 100644
index 000000000000..2e62dec09bd7
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -0,0 +1,536 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "qede_ptp.h"
+
+struct qede_ptp {
+ const struct qed_eth_ptp_ops *ops;
+ struct ptp_clock_info clock_info;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct ptp_clock *clock;
+ struct work_struct work;
+ struct qede_dev *edev;
+ struct sk_buff *tx_skb;
+
+ /* ptp spinlock is used for protecting the cycle/time counter fields
+ * and, also for serializing the qed PTP API invocations.
+ */
+ spinlock_t lock;
+ bool hw_ts_ioctl_called;
+ u16 tx_type;
+ u16 rx_filter;
+};
+
+/**
+ * qede_ptp_adjfreq
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
+{
+ struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
+ struct qede_dev *edev = ptp->edev;
+ int rc;
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_OPEN) {
+ spin_lock_bh(&ptp->lock);
+ rc = ptp->ops->adjfreq(edev->cdev, ppb);
+ spin_unlock_bh(&ptp->lock);
+ } else {
+ DP_ERR(edev, "PTP adjfreq called while interface is down\n");
+ rc = -EFAULT;
+ }
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
+ delta);
+
+ spin_lock_bh(&ptp->lock);
+ timecounter_adjtime(&ptp->tc, delta);
+ spin_unlock_bh(&ptp->lock);
+
+ return 0;
+}
+
+static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 ns;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ spin_lock_bh(&ptp->lock);
+ ns = timecounter_read(&ptp->tc);
+ spin_unlock_bh(&ptp->lock);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int qede_ptp_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 ns;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ ns = timespec64_to_ns(ts);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
+
+ /* Re-init the timecounter */
+ spin_lock_bh(&ptp->lock);
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ spin_unlock_bh(&ptp->lock);
+
+ return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ DP_ERR(edev, "PHC ancillary features are not supported\n");
+
+ return -ENOTSUPP;
+}
+
+static void qede_ptp_task(struct work_struct *work)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 timestamp, ns;
+ int rc;
+
+ ptp = container_of(work, struct qede_ptp, work);
+ edev = ptp->edev;
+
+ /* Read Tx timestamp registers */
+ spin_lock_bh(&ptp->lock);
+ rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
+ spin_unlock_bh(&ptp->lock);
+ if (rc) {
+ /* Reschedule to keep checking for a valid timestamp value */
+ schedule_work(&ptp->work);
+ return;
+ }
+
+ ns = timecounter_cyc2time(&ptp->tc, timestamp);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+}
+
+/* Read the PHC. This API is invoked with ptp_lock held. */
+static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 phc_cycles;
+ int rc;
+
+ ptp = container_of(cc, struct qede_ptp, cc);
+ edev = ptp->edev;
+ rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
+ if (rc)
+ WARN_ONCE(1, "PHC read err %d\n", rc);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
+
+ return phc_cycles;
+}
+
+static void qede_ptp_init_cc(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+}
+
+static int qede_ptp_cfg_filters(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp = edev->ptp;
+
+ if (!ptp)
+ return -EIO;
+
+ if (!ptp->hw_ts_ioctl_called) {
+ DP_INFO(edev, "TS IOCTL not called\n");
+ return 0;
+ }
+
+ switch (ptp->tx_type) {
+ case HWTSTAMP_TX_ON:
+ edev->flags |= QEDE_TX_TIMESTAMPING_EN;
+ ptp->ops->hwtstamp_tx_on(edev->cdev);
+ break;
+
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ DP_ERR(edev, "One-step timestamping is not supported\n");
+ return -ERANGE;
+ }
+
+ spin_lock_bh(&ptp->lock);
+ switch (ptp->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 events */
+ ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+ ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ /* Initialize PTP detection L2 events */
+ ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+ ptp->ops->cfg_rx_filters(edev->cdev,
+ QED_PTP_FILTER_L2_IPV4_IPV6);
+ break;
+ }
+
+ spin_unlock_bh(&ptp->lock);
+
+ return 0;
+}
+
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EIO;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
+ config.tx_type, config.rx_filter);
+
+ if (config.flags) {
+ DP_ERR(edev, "config.flags is reserved for future use\n");
+ return -EINVAL;
+ }
+
+ ptp->hw_ts_ioctl_called = 1;
+ ptp->tx_type = config.tx_type;
+ ptp->rx_filter = config.rx_filter;
+
+ rc = qede_ptp_cfg_filters(edev);
+ if (rc)
+ return rc;
+
+ config.rx_filter = ptp->rx_filter;
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+static void qede_ptp_init(struct qede_dev *edev, bool init_tc)
+{
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ spin_lock_init(&ptp->lock);
+
+ /* Configure PTP in HW */
+ rc = ptp->ops->enable(edev->cdev);
+ if (rc) {
+ DP_ERR(edev, "Stopping PTP initialization\n");
+ return;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&ptp->work, qede_ptp_task);
+
+ /* Init cyclecounter and timecounter. This is done only in the first
+ * load. If done in every load, PTP application will fail when doing
+ * unload / load (e.g. MTU change) while it is running.
+ */
+ if (init_tc) {
+ qede_ptp_init_cc(edev);
+ timecounter_init(&ptp->tc, &ptp->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n");
+}
+
+void qede_ptp_start(struct qede_dev *edev, bool init_tc)
+{
+ qede_ptp_init(edev, init_tc);
+ qede_ptp_cfg_filters(edev);
+}
+
+void qede_ptp_remove(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp;
+
+ ptp = edev->ptp;
+ if (ptp && ptp->clock) {
+ ptp_clock_unregister(ptp->clock);
+ ptp->clock = NULL;
+ }
+
+ kfree(ptp);
+ edev->ptp = NULL;
+}
+
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+{
+ struct qede_ptp *ptp = edev->ptp;
+
+ if (!ptp)
+ return -EIO;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (ptp->clock)
+ info->phc_index = ptp_clock_index(ptp->clock);
+ else
+ info->phc_index = -1;
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ return 0;
+}
+
+/* Called during unload, to stop PTP-related stuff */
+void qede_ptp_stop(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ /* Cancel PTP work queue. Should be done after the Tx queues are
+ * drained to prevent additional scheduling.
+ */
+ cancel_work_sync(&ptp->work);
+ if (ptp->tx_skb) {
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ }
+
+ /* Disable PTP in HW */
+ spin_lock_bh(&ptp->lock);
+ ptp->ops->disable(edev->cdev);
+ spin_unlock_bh(&ptp->lock);
+}
+
+int qede_ptp_register_phc(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp;
+
+ ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ DP_INFO(edev, "Failed to allocate struct for PTP\n");
+ return -ENOMEM;
+ }
+
+ ptp->edev = edev;
+ ptp->ops = edev->ops->ptp;
+ if (!ptp->ops) {
+ kfree(ptp);
+ edev->ptp = NULL;
+ DP_ERR(edev, "PTP clock registeration failed\n");
+ return -EIO;
+ }
+
+ edev->ptp = ptp;
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ ptp->clock_info.owner = THIS_MODULE;
+ snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
+ ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
+ ptp->clock_info.n_alarm = 0;
+ ptp->clock_info.n_ext_ts = 0;
+ ptp->clock_info.n_per_out = 0;
+ ptp->clock_info.pps = 0;
+ ptp->clock_info.adjfreq = qede_ptp_adjfreq;
+ ptp->clock_info.adjtime = qede_ptp_adjtime;
+ ptp->clock_info.gettime64 = qede_ptp_gettime;
+ ptp->clock_info.settime64 = qede_ptp_settime;
+ ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
+
+ ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ ptp->clock = NULL;
+ kfree(ptp);
+ edev->ptp = NULL;
+ DP_ERR(edev, "PTP clock registeration failed\n");
+ }
+
+ return 0;
+}
+
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+ struct qede_ptp *ptp;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
+ DP_NOTICE(edev,
+ "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ } else if (unlikely(ptp->tx_skb)) {
+ DP_NOTICE(edev,
+ "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* schedule check for Tx timestamp */
+ ptp->tx_skb = skb_get(skb);
+ schedule_work(&ptp->work);
+ }
+}
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+ struct qede_ptp *ptp;
+ u64 timestamp, ns;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ spin_lock_bh(&ptp->lock);
+ rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp);
+ if (rc) {
+ spin_unlock_bh(&ptp->lock);
+ DP_INFO(edev, "Invalid Rx timestamp\n");
+ return;
+ }
+
+ ns = timecounter_cyc2time(&ptp->tc, timestamp);
+ spin_unlock_bh(&ptp->lock);
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
new file mode 100644
index 000000000000..f328f9bba53a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -0,0 +1,65 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QEDE_PTP_H_
+#define _QEDE_PTP_H_
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include "qede.h"
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+void qede_ptp_start(struct qede_dev *edev, bool init_tc);
+void qede_ptp_stop(struct qede_dev *edev);
+void qede_ptp_remove(struct qede_dev *edev);
+int qede_ptp_register_phc(struct qede_dev *edev);
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+
+static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
+ union eth_rx_cqe *cqe,
+ struct sk_buff *skb)
+{
+ /* Check if this packet was timestamped */
+ if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) &
+ (1 << PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT))) {
+ if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags)
+ & (1 << PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT))) {
+ qede_ptp_rx_ts(edev, skb);
+ } else {
+ DP_INFO(edev,
+ "Timestamp recorded for non PTP packets\n");
+ }
+ }
+}
+#endif /* _QEDE_PTP_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
index 49272716a7c4..f00657ce7c8f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -1,5 +1,5 @@
/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 5c100ab86c00..2991179c2fd0 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1707,23 +1707,30 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
return status;
}
-static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int ql_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
+ u32 supported, advertising;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = ql_supported_modes(qdev);
+ supported = ql_supported_modes(qdev);
if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
- ecmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
} else {
- ecmd->port = PORT_TP;
- ecmd->phy_address = qdev->PHYAddr;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = qdev->PHYAddr;
}
- ecmd->advertising = ql_supported_modes(qdev);
- ecmd->autoneg = ql_get_auto_cfg_status(qdev);
- ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
- ecmd->duplex = ql_get_full_dup(qdev);
+ advertising = ql_supported_modes(qdev);
+ cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
+ cmd->base.speed = ql_get_speed(qdev);
+ cmd->base.duplex = ql_get_full_dup(qdev);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -1769,12 +1776,12 @@ static void ql_get_pauseparam(struct net_device *ndev,
}
static const struct ethtool_ops ql3xxx_ethtool_ops = {
- .get_settings = ql_get_settings,
.get_drvinfo = ql_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = ql_get_msglevel,
.set_msglevel = ql_set_msglevel,
.get_pauseparam = ql_get_pauseparam,
+ .get_link_ksettings = ql_get_link_ksettings,
};
static int ql_populate_free_queue(struct ql3_adapter *qdev)
@@ -2025,7 +2032,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
- netif_receive_skb(skb);
+ napi_gro_receive(&qdev->napi, skb);
lrg_buf_cb2->skb = NULL;
if (qdev->device_id == QL3022_DEVICE_ID)
@@ -2095,7 +2102,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
}
skb2->protocol = eth_type_trans(skb2, qdev->ndev);
- netif_receive_skb(skb2);
+ napi_gro_receive(&qdev->napi, skb2);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
lrg_buf_cb2->skb = NULL;
@@ -2105,8 +2112,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
}
-static int ql_tx_rx_clean(struct ql3_adapter *qdev,
- int *tx_cleaned, int *rx_cleaned, int work_to_do)
+static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
{
struct net_rsp_iocb *net_rsp;
struct net_device *ndev = qdev->ndev;
@@ -2114,7 +2120,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
/* While there are entries in the completion queue. */
while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
- qdev->rsp_consumer_index) && (work_done < work_to_do)) {
+ qdev->rsp_consumer_index) && (work_done < budget)) {
net_rsp = qdev->rsp_current;
rmb();
@@ -2130,21 +2136,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
case OPCODE_OB_MAC_IOCB_FN2:
ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
net_rsp);
- (*tx_cleaned)++;
break;
case OPCODE_IB_MAC_IOCB:
case OPCODE_IB_3032_MAC_IOCB:
ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
net_rsp);
- (*rx_cleaned)++;
+ work_done++;
break;
case OPCODE_IB_IP_IOCB:
case OPCODE_IB_3032_IP_IOCB:
ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
net_rsp);
- (*rx_cleaned)++;
+ work_done++;
break;
default: {
u32 *tmp = (u32 *)net_rsp;
@@ -2169,7 +2174,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
qdev->rsp_current++;
}
- work_done = *tx_cleaned + *rx_cleaned;
}
return work_done;
@@ -2178,25 +2182,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
static int ql_poll(struct napi_struct *napi, int budget)
{
struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
- int rx_cleaned = 0, tx_cleaned = 0;
- unsigned long hw_flags;
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
+ int work_done;
- ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
+ work_done = ql_tx_rx_clean(qdev, budget);
- if (tx_cleaned + rx_cleaned != budget) {
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- __napi_complete(napi);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, flags);
ql_update_small_bufq_prod_index(qdev);
ql_update_lrg_bufq_prod_index(qdev);
writel(qdev->rsp_consumer_index,
&port_regs->CommonRegs.rspQConsumerIndex);
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ spin_unlock_irqrestore(&qdev->hw_lock, flags);
ql_enable_interrupts(qdev);
}
- return tx_cleaned + rx_cleaned;
+ return work_done;
}
static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index bdbcd2b088a0..99b187bfdd55 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3252,12 +3252,13 @@ out:
return config;
}
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
- struct ethtool_cmd *ecmd)
+int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter,
+ struct ethtool_link_ksettings *ecmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 config = 0;
int status = 0;
+ u32 supported, advertising;
if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
/* Get port configuration info */
@@ -3271,45 +3272,48 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
if (netif_running(adapter->netdev) && ahw->has_link_events) {
- ethtool_cmd_speed_set(ecmd, ahw->link_speed);
- ecmd->duplex = ahw->link_duplex;
- ecmd->autoneg = ahw->link_autoneg;
+ ecmd->base.speed = ahw->link_speed;
+ ecmd->base.duplex = ahw->link_duplex;
+ ecmd->base.autoneg = ahw->link_autoneg;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
- ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
}
- ecmd->supported = (SUPPORTED_10baseT_Full |
+ supported = (SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_10000baseT_Full |
SUPPORTED_Autoneg);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ ecmd->link_modes.advertising);
+
+ if (ecmd->base.autoneg == AUTONEG_ENABLE) {
if (ahw->port_config & QLC_83XX_10_CAPABLE)
- ecmd->advertising |= SUPPORTED_10baseT_Full;
+ advertising |= SUPPORTED_10baseT_Full;
if (ahw->port_config & QLC_83XX_100_CAPABLE)
- ecmd->advertising |= SUPPORTED_100baseT_Full;
+ advertising |= SUPPORTED_100baseT_Full;
if (ahw->port_config & QLC_83XX_1G_CAPABLE)
- ecmd->advertising |= SUPPORTED_1000baseT_Full;
+ advertising |= SUPPORTED_1000baseT_Full;
if (ahw->port_config & QLC_83XX_10G_CAPABLE)
- ecmd->advertising |= SUPPORTED_10000baseT_Full;
+ advertising |= SUPPORTED_10000baseT_Full;
if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE)
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
} else {
switch (ahw->link_speed) {
case SPEED_10:
- ecmd->advertising = SUPPORTED_10baseT_Full;
+ advertising = SUPPORTED_10baseT_Full;
break;
case SPEED_100:
- ecmd->advertising = SUPPORTED_100baseT_Full;
+ advertising = SUPPORTED_100baseT_Full;
break;
case SPEED_1000:
- ecmd->advertising = SUPPORTED_1000baseT_Full;
+ advertising = SUPPORTED_1000baseT_Full;
break;
case SPEED_10000:
- ecmd->advertising = SUPPORTED_10000baseT_Full;
+ advertising = SUPPORTED_10000baseT_Full;
break;
default:
break;
@@ -3319,56 +3323,58 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
switch (ahw->supported_type) {
case PORT_FIBRE:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_FIBRE;
break;
case PORT_TP:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ ecmd->base.port = PORT_TP;
break;
case PORT_DA:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
- ecmd->transceiver = XCVR_EXTERNAL;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_DA;
break;
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
- ecmd->transceiver = XCVR_EXTERNAL;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_OTHER;
break;
}
- ecmd->phy_address = ahw->physical_port;
+ ecmd->base.phy_address = ahw->physical_port;
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+ advertising);
+
return status;
}
-int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
- struct ethtool_cmd *ecmd)
+int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter,
+ const struct ethtool_link_ksettings *ecmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 config = adapter->ahw->port_config;
int status = 0;
/* 83xx devices do not support Half duplex */
- if (ecmd->duplex == DUPLEX_HALF) {
- netdev_info(adapter->netdev,
- "Half duplex mode not supported\n");
- return -EINVAL;
+ if (ecmd->base.duplex == DUPLEX_HALF) {
+ netdev_info(adapter->netdev,
+ "Half duplex mode not supported\n");
+ return -EINVAL;
}
- if (ecmd->autoneg) {
+ if (ecmd->base.autoneg) {
ahw->port_config |= QLC_83XX_AUTONEG_ENABLE;
ahw->port_config |= (QLC_83XX_100_CAPABLE |
QLC_83XX_1G_CAPABLE |
QLC_83XX_10G_CAPABLE);
} else { /* force speed */
ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE;
- switch (ethtool_cmd_speed(ecmd)) {
+ switch (ecmd->base.speed) {
case SPEED_10:
ahw->port_config &= ~(QLC_83XX_100_CAPABLE |
QLC_83XX_1G_CAPABLE |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 331ae2c20f40..3dfe8e27b51c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -628,8 +628,10 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
-int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter,
+ struct ethtool_link_ksettings *ecmd);
+int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter,
+ const struct ethtool_link_ksettings *ecmd);
void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
struct ethtool_pauseparam *);
int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index daf05155b732..d344e9d43832 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -573,8 +573,10 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
+ if (ptr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
tx_ring->hw_consumer = ptr;
/* cmd desc ring */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 0a2318cad34d..9a869c15d8bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -285,42 +285,43 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->version));
}
-static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
- struct ethtool_cmd *ecmd)
+static int qlcnic_82xx_get_link_ksettings(struct qlcnic_adapter *adapter,
+ struct ethtool_link_ksettings *ecmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
int check_sfp_module = 0, err = 0;
u16 pcifn = ahw->pci_func;
+ u32 supported, advertising;
/* read which mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
- ecmd->advertising = (ADVERTISED_100baseT_Half |
+ advertising = (ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full);
- ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
- ecmd->duplex = adapter->ahw->link_duplex;
- ecmd->autoneg = adapter->ahw->link_autoneg;
+ ecmd->base.speed = adapter->ahw->link_speed;
+ ecmd->base.duplex = adapter->ahw->link_duplex;
+ ecmd->base.autoneg = adapter->ahw->link_autoneg;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val = 0;
val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
- ecmd->supported = SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_1000baseT_Full;
+ supported = SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_1000baseT_Full;
} else {
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
}
if (netif_running(adapter->netdev) && ahw->has_link_events) {
@@ -331,73 +332,72 @@ static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
}
- ethtool_cmd_speed_set(ecmd, ahw->link_speed);
- ecmd->autoneg = ahw->link_autoneg;
- ecmd->duplex = ahw->link_duplex;
+ ecmd->base.speed = ahw->link_speed;
+ ecmd->base.autoneg = ahw->link_autoneg;
+ ecmd->base.duplex = ahw->link_duplex;
goto skip;
}
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
- ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
} else
return -EIO;
skip:
- ecmd->phy_address = adapter->ahw->physical_port;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ecmd->base.phy_address = adapter->ahw->physical_port;
switch (adapter->ahw->board_type) {
case QLCNIC_BRDTYPE_P3P_REF_QG:
case QLCNIC_BRDTYPE_P3P_4_GB:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_Autoneg;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
case QLCNIC_BRDTYPE_P3P_10G_CX4:
case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- ecmd->autoneg = adapter->ahw->link_autoneg;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ ecmd->base.port = PORT_TP;
+ ecmd->base.autoneg = adapter->ahw->link_autoneg;
break;
case QLCNIC_BRDTYPE_P3P_IMEZ:
case QLCNIC_BRDTYPE_P3P_XG_LOM:
case QLCNIC_BRDTYPE_P3P_HMEZ:
- ecmd->supported |= SUPPORTED_MII;
- ecmd->advertising |= ADVERTISED_MII;
- ecmd->port = PORT_MII;
- ecmd->autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_MII;
+ advertising |= ADVERTISED_MII;
+ ecmd->base.port = PORT_MII;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
break;
case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ supported |= SUPPORTED_TP;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
case QLCNIC_BRDTYPE_P3P_10G_XFP:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_FIBRE;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
break;
case QLCNIC_BRDTYPE_P3P_10G_TP:
if (adapter->ahw->port_type == QLCNIC_XGBE) {
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
- ecmd->advertising |=
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ advertising |=
(ADVERTISED_FIBRE | ADVERTISED_TP);
- ecmd->port = PORT_FIBRE;
+ ecmd->base.port = PORT_FIBRE;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
} else {
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
- ecmd->advertising |=
+ ecmd->base.autoneg = AUTONEG_ENABLE;
+ supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ advertising |=
(ADVERTISED_TP | ADVERTISED_Autoneg);
- ecmd->port = PORT_TP;
+ ecmd->base.port = PORT_TP;
}
break;
default:
@@ -412,47 +412,52 @@ skip:
case LINKEVENT_MODULE_OPTICAL_SRLR:
case LINKEVENT_MODULE_OPTICAL_LRM:
case LINKEVENT_MODULE_OPTICAL_SFP_1G:
- ecmd->port = PORT_FIBRE;
+ ecmd->base.port = PORT_FIBRE;
break;
case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
case LINKEVENT_MODULE_TWINAX:
- ecmd->port = PORT_TP;
+ ecmd->base.port = PORT_TP;
break;
default:
- ecmd->port = PORT_OTHER;
+ ecmd->base.port = PORT_OTHER;
}
}
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int qlcnic_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int qlcnic_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
if (qlcnic_82xx_check(adapter))
- return qlcnic_82xx_get_settings(adapter, ecmd);
+ return qlcnic_82xx_get_link_ksettings(adapter, ecmd);
else if (qlcnic_83xx_check(adapter))
- return qlcnic_83xx_get_settings(adapter, ecmd);
+ return qlcnic_83xx_get_link_ksettings(adapter, ecmd);
return -EIO;
}
static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
- struct ethtool_cmd *ecmd)
+ const struct ethtool_link_ksettings *ecmd)
{
u32 ret = 0, config = 0;
/* read which mode */
- if (ecmd->duplex)
+ if (ecmd->base.duplex)
config |= 0x1;
- if (ecmd->autoneg)
+ if (ecmd->base.autoneg)
config |= 0x2;
- switch (ethtool_cmd_speed(ecmd)) {
+ switch (ecmd->base.speed) {
case SPEED_10:
config |= (0 << 8);
break;
@@ -475,7 +480,8 @@ static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
return ret;
}
-static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int qlcnic_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
u32 ret = 0;
struct qlcnic_adapter *adapter = netdev_priv(dev);
@@ -484,16 +490,16 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
if (qlcnic_83xx_check(adapter))
- ret = qlcnic_83xx_set_settings(adapter, ecmd);
+ ret = qlcnic_83xx_set_link_ksettings(adapter, ecmd);
else
ret = qlcnic_set_port_config(adapter, ecmd);
if (!ret)
return ret;
- adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
- adapter->ahw->link_duplex = ecmd->duplex;
- adapter->ahw->link_autoneg = ecmd->autoneg;
+ adapter->ahw->link_speed = ecmd->base.speed;
+ adapter->ahw->link_duplex = ecmd->base.duplex;
+ adapter->ahw->link_autoneg = ecmd->base.autoneg;
if (!netif_running(dev))
return 0;
@@ -1822,8 +1828,6 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
}
const struct ethtool_ops qlcnic_ethtool_ops = {
- .get_settings = qlcnic_get_settings,
- .set_settings = qlcnic_set_settings,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
@@ -1850,10 +1854,11 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_dump_flag = qlcnic_get_dump_flag,
.get_dump_data = qlcnic_get_dump_data,
.set_dump = qlcnic_set_dump,
+ .get_link_ksettings = qlcnic_get_link_ksettings,
+ .set_link_ksettings = qlcnic_set_link_ksettings,
};
const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
- .get_settings = qlcnic_get_settings,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
@@ -1872,12 +1877,13 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
.set_coalesce = qlcnic_set_intr_coalesce,
.set_msglevel = qlcnic_set_msglevel,
.get_msglevel = qlcnic_get_msglevel,
+ .get_link_ksettings = qlcnic_get_link_ksettings,
};
const struct ethtool_ops qlcnic_ethtool_failed_ops = {
- .get_settings = qlcnic_get_settings,
.get_drvinfo = qlcnic_get_drvinfo,
.set_msglevel = qlcnic_set_msglevel,
.get_msglevel = qlcnic_get_msglevel,
.set_dump = qlcnic_set_dump,
+ .get_link_ksettings = qlcnic_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index fedd7366713c..84dd83031a1b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_sds_intr(adapter, sds_ring);
qlcnic_enable_tx_intr(adapter, tx_ring);
@@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
adapter = sds_ring->adapter;
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
qlcnic_enable_sds_intr(adapter, sds_ring);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 4c0cce962585..b6628aaa6e4a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -4220,7 +4220,7 @@ recheck:
if (dev == NULL)
goto done;
- if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(dev)) {
dev = vlan_dev_real_dev(dev);
goto recheck;
}
@@ -4256,7 +4256,7 @@ recheck:
if (dev == NULL)
goto done;
- if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(dev)) {
dev = vlan_dev_real_dev(dev);
goto recheck;
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 5dade1fd08b8..31f40148fa5c 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -375,28 +375,34 @@ ql_get_ethtool_stats(struct net_device *ndev,
}
}
-static int ql_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+static int ql_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ u32 supported, advertising;
+
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
- ecmd->transceiver = XCVR_EXTERNAL;
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
STS_LINK_TYPE_10GBASET) {
- ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
- ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
- ecmd->port = PORT_TP;
- ecmd->autoneg = AUTONEG_ENABLE;
+ supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->base.port = PORT_TP;
+ ecmd->base.autoneg = AUTONEG_ENABLE;
} else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_FIBRE;
}
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- ecmd->duplex = DUPLEX_FULL;
+ ecmd->base.speed = SPEED_10000;
+ ecmd->base.duplex = DUPLEX_FULL;
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+ advertising);
return 0;
}
@@ -706,7 +712,6 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
}
const struct ethtool_ops qlge_ethtool_ops = {
- .get_settings = ql_get_settings,
.get_drvinfo = ql_get_drvinfo,
.get_wol = ql_get_wol,
.set_wol = ql_set_wol,
@@ -724,5 +729,6 @@ const struct ethtool_ops qlge_ethtool_ops = {
.get_sset_count = ql_get_sset_count,
.get_strings = ql_get_strings,
.get_ethtool_stats = ql_get_ethtool_stats,
+ .get_link_ksettings = ql_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 1409412ab39d..e9e647072596 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ql_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile
index 7a6687982dae..fc57cedf4c0c 100644
--- a/drivers/net/ethernet/qualcomm/emac/Makefile
+++ b/drivers/net/ethernet/qualcomm/emac/Makefile
@@ -4,6 +4,6 @@
obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o
-qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o \
+qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \
emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \
emac-sgmii-qdf2400.o
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
new file mode 100644
index 000000000000..bbe24639aa5a
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include "emac.h"
+
+static const char * const emac_ethtool_stat_strings[] = {
+ "rx_ok",
+ "rx_bcast",
+ "rx_mcast",
+ "rx_pause",
+ "rx_ctrl",
+ "rx_fcs_err",
+ "rx_len_err",
+ "rx_byte_cnt",
+ "rx_runt",
+ "rx_frag",
+ "rx_sz_64",
+ "rx_sz_65_127",
+ "rx_sz_128_255",
+ "rx_sz_256_511",
+ "rx_sz_512_1023",
+ "rx_sz_1024_1518",
+ "rx_sz_1519_max",
+ "rx_sz_ov",
+ "rx_rxf_ov",
+ "rx_align_err",
+ "rx_bcast_byte_cnt",
+ "rx_mcast_byte_cnt",
+ "rx_err_addr",
+ "rx_crc_align",
+ "rx_jabbers",
+ "tx_ok",
+ "tx_bcast",
+ "tx_mcast",
+ "tx_pause",
+ "tx_exc_defer",
+ "tx_ctrl",
+ "tx_defer",
+ "tx_byte_cnt",
+ "tx_sz_64",
+ "tx_sz_65_127",
+ "tx_sz_128_255",
+ "tx_sz_256_511",
+ "tx_sz_512_1023",
+ "tx_sz_1024_1518",
+ "tx_sz_1519_max",
+ "tx_1_col",
+ "tx_2_col",
+ "tx_late_col",
+ "tx_abort_col",
+ "tx_underrun",
+ "tx_rd_eop",
+ "tx_len_err",
+ "tx_trunc",
+ "tx_bcast_byte",
+ "tx_mcast_byte",
+ "tx_col",
+};
+
+#define EMAC_STATS_LEN ARRAY_SIZE(emac_ethtool_stat_strings)
+
+static u32 emac_get_msglevel(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ return adpt->msg_enable;
+}
+
+static void emac_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ adpt->msg_enable = data;
+}
+
+static int emac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return EMAC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < EMAC_STATS_LEN; i++) {
+ strlcpy(data, emac_ethtool_stat_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ spin_lock(&adpt->stats.lock);
+
+ emac_update_hw_stats(adpt);
+ memcpy(data, &adpt->stats, EMAC_STATS_LEN * sizeof(u64));
+
+ spin_unlock(&adpt->stats.lock);
+}
+
+static int emac_nway_reset(struct net_device *netdev)
+{
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static void emac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ ring->rx_max_pending = EMAC_MAX_RX_DESCS;
+ ring->tx_max_pending = EMAC_MAX_TX_DESCS;
+ ring->rx_pending = adpt->rx_desc_cnt;
+ ring->tx_pending = adpt->tx_desc_cnt;
+}
+
+static int emac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ /* We don't have separate queues/rings for small/large frames, so
+ * reject any attempt to specify those values separately.
+ */
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ adpt->tx_desc_cnt =
+ clamp_val(ring->tx_pending, EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS);
+
+ adpt->rx_desc_cnt =
+ clamp_val(ring->rx_pending, EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS);
+
+ if (netif_running(netdev))
+ return emac_reinit_locked(adpt);
+
+ return 0;
+}
+
+static void emac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ pause->autoneg = adpt->automatic ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ pause->rx_pause = adpt->rx_flow_control ? 1 : 0;
+ pause->tx_pause = adpt->tx_flow_control ? 1 : 0;
+}
+
+static int emac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ adpt->automatic = pause->autoneg == AUTONEG_ENABLE;
+ adpt->rx_flow_control = pause->rx_pause != 0;
+ adpt->tx_flow_control = pause->tx_pause != 0;
+
+ if (netif_running(netdev))
+ return emac_reinit_locked(adpt);
+
+ return 0;
+}
+
+/* Selected registers that might want to track during runtime. */
+static const u16 emac_regs[] = {
+ EMAC_DMA_MAS_CTRL,
+ EMAC_MAC_CTRL,
+ EMAC_TXQ_CTRL_0,
+ EMAC_RXQ_CTRL_0,
+ EMAC_DMA_CTRL,
+ EMAC_INT_MASK,
+ EMAC_AXI_MAST_CTRL,
+ EMAC_CORE_HW_VERSION,
+ EMAC_MISC_CTRL,
+};
+
+/* Every time emac_regs[] above is changed, increase this version number. */
+#define EMAC_REGS_VERSION 0
+
+#define EMAC_MAX_REG_SIZE ARRAY_SIZE(emac_regs)
+
+static void emac_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *buff)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ u32 *val = buff;
+ unsigned int i;
+
+ regs->version = EMAC_REGS_VERSION;
+ regs->len = EMAC_MAX_REG_SIZE * sizeof(u32);
+
+ for (i = 0; i < EMAC_MAX_REG_SIZE; i++)
+ val[i] = readl(adpt->base + emac_regs[i]);
+}
+
+static int emac_get_regs_len(struct net_device *netdev)
+{
+ return EMAC_MAX_REG_SIZE * sizeof(u32);
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+
+ .get_msglevel = emac_get_msglevel,
+ .set_msglevel = emac_set_msglevel,
+
+ .get_sset_count = emac_get_sset_count,
+ .get_strings = emac_get_strings,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+
+ .get_ringparam = emac_get_ringparam,
+ .set_ringparam = emac_set_ringparam,
+
+ .get_pauseparam = emac_get_pauseparam,
+ .set_pauseparam = emac_set_pauseparam,
+
+ .nway_reset = emac_nway_reset,
+
+ .get_link = ethtool_op_get_link,
+
+ .get_regs_len = emac_get_regs_len,
+ .get_regs = emac_get_regs,
+};
+
+void emac_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &emac_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 0b4deb31e742..cc065ffbe4b5 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -25,58 +25,6 @@
#include "emac.h"
#include "emac-sgmii.h"
-/* EMAC base register offsets */
-#define EMAC_MAC_CTRL 0x001480
-#define EMAC_WOL_CTRL0 0x0014a0
-#define EMAC_RSS_KEY0 0x0014b0
-#define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0
-#define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4
-#define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8
-#define EMAC_INTER_SRAM_PART9 0x001534
-#define EMAC_DESC_CTRL_0 0x001540
-#define EMAC_DESC_CTRL_1 0x001544
-#define EMAC_DESC_CTRL_2 0x001550
-#define EMAC_DESC_CTRL_10 0x001554
-#define EMAC_DESC_CTRL_12 0x001558
-#define EMAC_DESC_CTRL_13 0x00155c
-#define EMAC_DESC_CTRL_3 0x001560
-#define EMAC_DESC_CTRL_4 0x001564
-#define EMAC_DESC_CTRL_5 0x001568
-#define EMAC_DESC_CTRL_14 0x00156c
-#define EMAC_DESC_CTRL_15 0x001570
-#define EMAC_DESC_CTRL_16 0x001574
-#define EMAC_DESC_CTRL_6 0x001578
-#define EMAC_DESC_CTRL_8 0x001580
-#define EMAC_DESC_CTRL_9 0x001584
-#define EMAC_DESC_CTRL_11 0x001588
-#define EMAC_TXQ_CTRL_0 0x001590
-#define EMAC_TXQ_CTRL_1 0x001594
-#define EMAC_TXQ_CTRL_2 0x001598
-#define EMAC_RXQ_CTRL_0 0x0015a0
-#define EMAC_RXQ_CTRL_1 0x0015a4
-#define EMAC_RXQ_CTRL_2 0x0015a8
-#define EMAC_RXQ_CTRL_3 0x0015ac
-#define EMAC_BASE_CPU_NUMBER 0x0015b8
-#define EMAC_DMA_CTRL 0x0015c0
-#define EMAC_MAILBOX_0 0x0015e0
-#define EMAC_MAILBOX_5 0x0015e4
-#define EMAC_MAILBOX_6 0x0015e8
-#define EMAC_MAILBOX_13 0x0015ec
-#define EMAC_MAILBOX_2 0x0015f4
-#define EMAC_MAILBOX_3 0x0015f8
-#define EMAC_MAILBOX_11 0x00160c
-#define EMAC_AXI_MAST_CTRL 0x001610
-#define EMAC_MAILBOX_12 0x001614
-#define EMAC_MAILBOX_9 0x001618
-#define EMAC_MAILBOX_10 0x00161c
-#define EMAC_ATHR_HEADER_CTRL 0x001620
-#define EMAC_CLK_GATE_CTRL 0x001814
-#define EMAC_MISC_CTRL 0x001990
-#define EMAC_MAILBOX_7 0x0019e0
-#define EMAC_MAILBOX_8 0x0019e4
-#define EMAC_MAILBOX_15 0x001bd4
-#define EMAC_MAILBOX_16 0x001bd8
-
/* EMAC_MAC_CTRL */
#define SINGLE_PAUSE_MODE 0x10000000
#define DEBUG_MODE 0x08000000
@@ -103,14 +51,6 @@
#define RXEN 0x00000002
#define TXEN 0x00000001
-
-/* EMAC_WOL_CTRL0 */
-#define LK_CHG_PME 0x20
-#define LK_CHG_EN 0x10
-#define MG_FRAME_PME 0x8
-#define MG_FRAME_EN 0x4
-#define WK_FRAME_EN 0x1
-
/* EMAC_DESC_CTRL_3 */
#define RFD_RING_SIZE_BMSK 0xfff
@@ -314,8 +254,6 @@ struct emac_skb_cb {
RX_PKT_INT2 |\
RX_PKT_INT3)
-#define EMAC_MAC_IRQ_RES "core0"
-
void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
{
u32 crc32, bit, reg, mta;
@@ -558,7 +496,7 @@ void emac_mac_reset(struct emac_adapter *adpt)
emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
}
-void emac_mac_start(struct emac_adapter *adpt)
+static void emac_mac_start(struct emac_adapter *adpt)
{
struct phy_device *phydev = adpt->phydev;
u32 mac, csr1;
@@ -575,11 +513,19 @@ void emac_mac_start(struct emac_adapter *adpt)
mac |= TXEN | RXEN; /* enable RX/TX */
- /* Configure MAC flow control to match the PHY's settings. */
- if (phydev->pause)
- mac |= RXFC;
- if (phydev->pause != phydev->asym_pause)
- mac |= TXFC;
+ /* Configure MAC flow control. If set to automatic, then match
+ * whatever the PHY does. Otherwise, enable or disable it, depending
+ * on what the user configured via ethtool.
+ */
+ mac &= ~(RXFC | TXFC);
+
+ if (adpt->automatic) {
+ /* If it's set to automatic, then update our local values */
+ adpt->rx_flow_control = phydev->pause;
+ adpt->tx_flow_control = phydev->pause != phydev->asym_pause;
+ }
+ mac |= adpt->rx_flow_control ? RXFC : 0;
+ mac |= adpt->tx_flow_control ? TXFC : 0;
/* setup link speed */
mac &= ~SPEED_MASK;
@@ -621,8 +567,6 @@ void emac_mac_start(struct emac_adapter *adpt)
emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
(HEADER_ENABLE | HEADER_CNT_EN), 0);
-
- emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN);
}
void emac_mac_stop(struct emac_adapter *adpt)
@@ -963,12 +907,16 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
static void emac_adjust_link(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
+ struct emac_sgmii *sgmii = &adpt->phy;
struct phy_device *phydev = netdev->phydev;
- if (phydev->link)
+ if (phydev->link) {
emac_mac_start(adpt);
- else
+ sgmii->link_up(adpt);
+ } else {
+ sgmii->link_down(adpt);
emac_mac_stop(adpt);
+ }
phy_print_status(phydev);
}
@@ -977,40 +925,26 @@ static void emac_adjust_link(struct net_device *netdev)
int emac_mac_up(struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- struct emac_irq *irq = &adpt->irq;
int ret;
emac_mac_rx_tx_ring_reset_all(adpt);
emac_mac_config(adpt);
-
- ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq);
- if (ret) {
- netdev_err(adpt->netdev, "could not request %s irq\n",
- EMAC_MAC_IRQ_RES);
- return ret;
- }
-
emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
+ adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
if (ret) {
netdev_err(adpt->netdev, "could not connect phy\n");
- free_irq(irq->irq, irq);
return ret;
}
+ phy_attached_print(adpt->phydev, NULL);
+
/* enable mac irq */
writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
- /* Enable pause frames. Without this feature, the EMAC has been shown
- * to receive (and drop) frames with FCS errors at gigabit connections.
- */
- adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
- adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
phy_start(adpt->phydev);
napi_enable(&adpt->rx_q.napi);
@@ -1036,7 +970,6 @@ void emac_mac_down(struct emac_adapter *adpt)
writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(0, adpt->base + EMAC_INT_MASK);
synchronize_irq(adpt->irq.irq);
- free_irq(adpt->irq.irq, &adpt->irq);
phy_disconnect(adpt->phydev);
@@ -1213,7 +1146,6 @@ void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
(bool)RRD_CVTAG(&rrd));
- netdev->last_rx = jiffies;
(*num_pkts)++;
} while (*num_pkts < max_pkts);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index f3aa24dc4a29..5028fb4bec2b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -230,7 +230,6 @@ struct emac_adapter;
int emac_mac_up(struct emac_adapter *adpt);
void emac_mac_down(struct emac_adapter *adpt);
void emac_mac_reset(struct emac_adapter *adpt);
-void emac_mac_start(struct emac_adapter *adpt);
void emac_mac_stop(struct emac_adapter *adpt);
void emac_mac_mode_config(struct emac_adapter *adpt);
void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 99a14df28b96..441c19366489 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -22,8 +22,6 @@
#include <linux/acpi.h>
#include "emac.h"
#include "emac-mac.h"
-#include "emac-phy.h"
-#include "emac-sgmii.h"
/* EMAC base register offsets */
#define EMAC_MDIO_CTRL 0x001414
@@ -201,6 +199,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
else
adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+ /* of_phy_find_device() claims a reference to the phydev,
+ * so we do that here manually as well. When the driver
+ * later unloads, it can unilaterally drop the reference
+ * without worrying about ACPI vs DT.
+ */
+ if (adpt->phydev)
+ get_device(&adpt->phydev->mdio.dev);
} else {
struct device_node *phy_np;
@@ -221,8 +226,5 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
return -ENODEV;
}
- if (adpt->phydev->drv)
- phy_attached_print(adpt->phydev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
index 49f3701a6dd7..c0c301c72129 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
@@ -13,19 +13,6 @@
#ifndef _EMAC_PHY_H_
#define _EMAC_PHY_H_
-typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt);
-
-/** emac_phy - internal emac phy
- * @base base address
- * @digital per-lane digital block
- * @initialize initialization function
- */
-struct emac_phy {
- void __iomem *base;
- void __iomem *digital;
- emac_sgmii_initialize initialize;
-};
-
struct emac_adapter;
int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
index af690e1a6e7b..10de8d0d9a56 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
@@ -214,7 +214,7 @@ static const struct emac_reg_write tx_rx_setting[] = {
int emac_sgmii_init_fsm9900(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
unsigned int i;
emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
index 5b8419498ef1..f62c215be779 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -174,7 +174,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = {
int emac_sgmii_init_qdf2400(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
void __iomem *phy_regs = phy->base;
void __iomem *laned = phy->digital;
unsigned int i;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
index 6170200d7479..b9c0df7bdd15 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
@@ -167,7 +167,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = {
int emac_sgmii_init_qdf2432(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
void __iomem *phy_regs = phy->base;
void __iomem *laned = phy->digital;
unsigned int i;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index bf722a9bb09d..040b28977ee7 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -25,7 +25,9 @@
#define EMAC_SGMII_PHY_SPEED_CFG1 0x0074
#define EMAC_SGMII_PHY_IRQ_CMD 0x00ac
#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4
#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8
+#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x00d4
#define FORCE_AN_TX_CFG BIT(5)
#define FORCE_AN_RX_CFG BIT(4)
@@ -36,6 +38,8 @@
#define SPDMODE_100 BIT(0)
#define SPDMODE_10 0
+#define CDR_ALIGN_DET BIT(6)
+
#define IRQ_GLOBAL_CLEAR BIT(0)
#define DECODE_CODE_ERR BIT(7)
@@ -44,52 +48,28 @@
#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10
#define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR)
+#define SGMII_ISR_MASK (SGMII_PHY_INTERRUPT_ERR)
#define SERDES_START_WAIT_TIMES 100
-static int emac_sgmii_link_init(struct emac_adapter *adpt)
+/* Initialize the SGMII link between the internal and external PHYs. */
+static void emac_sgmii_link_init(struct emac_adapter *adpt)
{
- struct phy_device *phydev = adpt->phydev;
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
u32 val;
+ /* Always use autonegotiation. It works no matter how the external
+ * PHY is configured.
+ */
val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
-
- if (phydev->autoneg == AUTONEG_ENABLE) {
- val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
- val |= AN_ENABLE;
- writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
- } else {
- u32 speed_cfg;
-
- switch (phydev->speed) {
- case SPEED_10:
- speed_cfg = SPDMODE_10;
- break;
- case SPEED_100:
- speed_cfg = SPDMODE_100;
- break;
- case SPEED_1000:
- speed_cfg = SPDMODE_1000;
- break;
- default:
- return -EINVAL;
- }
-
- if (phydev->duplex == DUPLEX_FULL)
- speed_cfg |= DUPLEX_MODE;
-
- val &= ~AN_ENABLE;
- writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1);
- writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
- }
-
- return 0;
+ val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
+ val |= AN_ENABLE;
+ writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
}
static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
u32 status;
writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
@@ -121,9 +101,54 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
return 0;
}
+/* The number of decode errors that triggers a reset */
+#define DECODE_ERROR_LIMIT 2
+
+static irqreturn_t emac_sgmii_interrupt(int irq, void *data)
+{
+ struct emac_adapter *adpt = data;
+ struct emac_sgmii *phy = &adpt->phy;
+ u32 status;
+
+ status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS);
+ status &= SGMII_ISR_MASK;
+ if (!status)
+ return IRQ_HANDLED;
+
+ /* If we get a decoding error and CDR is not locked, then try
+ * resetting the internal PHY. The internal PHY uses an embedded
+ * clock with Clock and Data Recovery (CDR) to recover the
+ * clock and data.
+ */
+ if (status & SGMII_PHY_INTERRUPT_ERR) {
+ int count;
+
+ /* The SGMII is capable of recovering from some decode
+ * errors automatically. However, if we get multiple
+ * decode errors in a row, then assume that something
+ * is wrong and reset the interface.
+ */
+ count = atomic_inc_return(&phy->decode_error_count);
+ if (count == DECODE_ERROR_LIMIT) {
+ schedule_work(&adpt->work_thread);
+ atomic_set(&phy->decode_error_count, 0);
+ }
+ } else {
+ /* We only care about consecutive decode errors. */
+ atomic_set(&phy->decode_error_count, 0);
+ }
+
+ if (emac_sgmii_irq_clear(adpt, status)) {
+ netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n");
+ schedule_work(&adpt->work_thread);
+ }
+
+ return IRQ_HANDLED;
+}
+
static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
u32 val;
/* Reset PHY */
@@ -145,12 +170,7 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
int ret;
emac_sgmii_reset_prepare(adpt);
-
- ret = emac_sgmii_link_init(adpt);
- if (ret) {
- netdev_err(adpt->netdev, "unsupported link speed\n");
- return;
- }
+ emac_sgmii_link_init(adpt);
ret = adpt->phy.initialize(adpt);
if (ret)
@@ -159,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
ret);
}
+static int emac_sgmii_open(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+ int ret;
+
+ if (sgmii->irq) {
+ /* Make sure interrupts are cleared and disabled first */
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
+ if (ret)
+ return ret;
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0,
+ "emac-sgmii", adpt);
+ if (ret) {
+ netdev_err(adpt->netdev,
+ "could not register handler for internal PHY\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int emac_sgmii_close(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+
+ /* Make sure interrupts are disabled */
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ free_irq(sgmii->irq, adpt);
+
+ return 0;
+}
+
+/* The error interrupts are only valid after the link is up */
+static int emac_sgmii_link_up(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+ int ret;
+
+ /* Clear and enable interrupts */
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
+ if (ret)
+ return ret;
+
+ writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ return 0;
+}
+
+static int emac_sgmii_link_down(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+
+ /* Disable interrupts */
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ synchronize_irq(sgmii->irq);
+
+ return 0;
+}
+
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
#ifdef CONFIG_ACPI
@@ -169,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
- emac_sgmii_initialize *initialize = data;
+ emac_sgmii_function *initialize = data;
if (id) {
acpi_handle handle = ACPI_HANDLE(dev);
@@ -217,7 +299,7 @@ static const struct of_device_id emac_sgmii_dt_match[] = {
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct platform_device *sgmii_pdev = NULL;
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
struct resource *res;
int ret;
@@ -256,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
goto error_put_device;
}
- phy->initialize = (emac_sgmii_initialize)match->data;
+ phy->initialize = (emac_sgmii_function)match->data;
}
+ phy->open = emac_sgmii_open;
+ phy->close = emac_sgmii_close;
+ phy->link_up = emac_sgmii_link_up;
+ phy->link_down = emac_sgmii_link_down;
+
/* Base address is the first address */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -286,7 +373,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
if (ret)
goto error;
- emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+ emac_sgmii_link_init(adpt);
+
+ ret = platform_get_irq(sgmii_pdev, 0);
+ if (ret > 0)
+ phy->irq = ret;
/* We've remapped the addresses, so we don't need the device any
* more. of_find_device_by_node() says we should release it.
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
index 80ed3dc3157a..e7c0c3b2baa4 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -16,6 +16,31 @@
struct emac_adapter;
struct platform_device;
+typedef int (*emac_sgmii_function)(struct emac_adapter *adpt);
+
+/** emac_sgmii - internal emac phy
+ * @base base address
+ * @digital per-lane digital block
+ * @irq the interrupt number
+ * @decode_error_count reference count of consecutive decode errors
+ * @initialize initialization function
+ * @open called when the driver is opened
+ * @close called when the driver is closed
+ * @link_up called when the link comes up
+ * @link_down called when the link comes down
+ */
+struct emac_sgmii {
+ void __iomem *base;
+ void __iomem *digital;
+ unsigned int irq;
+ atomic_t decode_error_count;
+ emac_sgmii_function initialize;
+ emac_sgmii_function open;
+ emac_sgmii_function close;
+ emac_sgmii_function link_up;
+ emac_sgmii_function link_down;
+};
+
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
void emac_sgmii_reset(struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 422289c232bc..28a8cdc36485 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget)
emac_mac_rx_process(adpt, rx_q, &work_done, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
irq->mask |= rx_q->intr;
writel(irq->mask, adpt->base + EMAC_INT_MASK);
@@ -256,22 +256,37 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
static int emac_open(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
+ struct emac_irq *irq = &adpt->irq;
int ret;
+ ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq);
+ if (ret) {
+ netdev_err(adpt->netdev, "could not request emac-core0 irq\n");
+ return ret;
+ }
+
/* allocate rx/tx dma buffer & descriptors */
ret = emac_mac_rx_tx_rings_alloc_all(adpt);
if (ret) {
netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
+ free_irq(irq->irq, irq);
return ret;
}
ret = emac_mac_up(adpt);
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
+ free_irq(irq->irq, irq);
return ret;
}
- emac_mac_start(adpt);
+ ret = adpt->phy.open(adpt);
+ if (ret) {
+ emac_mac_down(adpt);
+ emac_mac_rx_tx_rings_free_all(adpt);
+ free_irq(irq->irq, irq);
+ return ret;
+ }
return 0;
}
@@ -283,9 +298,12 @@ static int emac_close(struct net_device *netdev)
mutex_lock(&adpt->reset_lock);
+ adpt->phy.close(adpt);
emac_mac_down(adpt);
emac_mac_rx_tx_rings_free_all(adpt);
+ free_irq(adpt->irq.irq, &adpt->irq);
+
mutex_unlock(&adpt->reset_lock);
return 0;
@@ -311,45 +329,56 @@ static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
-/* Provide network statistics info for the interface */
-static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *net_stats)
+/**
+ * emac_update_hw_stats - read the EMAC stat registers
+ *
+ * Reads the stats registers and write the values to adpt->stats.
+ *
+ * adpt->stats.lock must be held while calling this function,
+ * and while reading from adpt->stats.
+ */
+void emac_update_hw_stats(struct emac_adapter *adpt)
{
- struct emac_adapter *adpt = netdev_priv(netdev);
- unsigned int addr = REG_MAC_RX_STATUS_BIN;
struct emac_stats *stats = &adpt->stats;
u64 *stats_itr = &adpt->stats.rx_ok;
- u32 val;
-
- spin_lock(&stats->lock);
+ void __iomem *base = adpt->base;
+ unsigned int addr;
+ addr = REG_MAC_RX_STATUS_BIN;
while (addr <= REG_MAC_RX_STATUS_END) {
- val = readl_relaxed(adpt->base + addr);
- *stats_itr += val;
+ *stats_itr += readl_relaxed(base + addr);
stats_itr++;
addr += sizeof(u32);
}
/* additional rx status */
- val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23);
- adpt->stats.rx_crc_align += val;
- val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24);
- adpt->stats.rx_jabbers += val;
+ stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23);
+ stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24);
/* update tx status */
addr = REG_MAC_TX_STATUS_BIN;
- stats_itr = &adpt->stats.tx_ok;
+ stats_itr = &stats->tx_ok;
while (addr <= REG_MAC_TX_STATUS_END) {
- val = readl_relaxed(adpt->base + addr);
- *stats_itr += val;
- ++stats_itr;
+ *stats_itr += readl_relaxed(base + addr);
+ stats_itr++;
addr += sizeof(u32);
}
/* additional tx status */
- val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25);
- adpt->stats.tx_col += val;
+ stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25);
+}
+
+/* Provide network statistics info for the interface */
+static void emac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ struct emac_stats *stats = &adpt->stats;
+
+ spin_lock(&stats->lock);
+
+ emac_update_hw_stats(adpt);
/* return parsed statistics */
net_stats->rx_packets = stats->rx_ok;
@@ -377,8 +406,6 @@ static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
net_stats->tx_window_errors = stats->tx_late_col;
spin_unlock(&stats->lock);
-
- return net_stats;
}
static const struct net_device_ops emac_netdev_ops = {
@@ -409,6 +436,10 @@ static void emac_init_adapter(struct emac_adapter *adpt)
{
u32 reg;
+ adpt->rrd_size = EMAC_RRD_SIZE;
+ adpt->tpd_size = EMAC_TPD_SIZE;
+ adpt->rfd_size = EMAC_RFD_SIZE;
+
/* descriptors */
adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
@@ -429,6 +460,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
/* others */
adpt->preamble = EMAC_PREAMBLE_DEF;
+
+ /* default to automatic flow control */
+ adpt->automatic = true;
}
/* Get the clock */
@@ -593,7 +627,7 @@ static int emac_probe(struct platform_device *pdev)
{
struct net_device *netdev;
struct emac_adapter *adpt;
- struct emac_phy *phy;
+ struct emac_sgmii *phy;
u16 devid, revid;
u32 reg;
int ret;
@@ -620,12 +654,14 @@ static int emac_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
+ emac_set_ethtool_ops(netdev);
adpt = netdev_priv(netdev);
adpt->netdev = netdev;
adpt->msg_enable = EMAC_MSG_DEFAULT;
phy = &adpt->phy;
+ atomic_set(&phy->decode_error_count, 0);
mutex_init(&adpt->reset_lock);
spin_lock_init(&adpt->stats.lock);
@@ -646,10 +682,6 @@ static int emac_probe(struct platform_device *pdev)
netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
netdev->irq = adpt->irq.irq;
- adpt->rrd_size = EMAC_RRD_SIZE;
- adpt->tpd_size = EMAC_TPD_SIZE;
- adpt->rfd_size = EMAC_RFD_SIZE;
-
netdev->netdev_ops = &emac_netdev_ops;
emac_init_adapter(adpt);
@@ -719,8 +751,7 @@ static int emac_probe(struct platform_device *pdev)
err_undo_napi:
netif_napi_del(&adpt->rx_q.napi);
err_undo_mdiobus:
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
err_undo_clocks:
emac_clks_teardown(adpt);
@@ -740,8 +771,7 @@ static int emac_remove(struct platform_device *pdev)
emac_clks_teardown(adpt);
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h
index 0c76e6cb8c9e..8ee4ec6aef2e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -19,37 +19,88 @@
#include <linux/platform_device.h>
#include "emac-mac.h"
#include "emac-phy.h"
+#include "emac-sgmii.h"
/* EMAC base register offsets */
-#define EMAC_DMA_MAS_CTRL 0x001400
-#define EMAC_IRQ_MOD_TIM_INIT 0x001408
-#define EMAC_BLK_IDLE_STS 0x00140c
-#define EMAC_PHY_LINK_DELAY 0x00141c
-#define EMAC_SYS_ALIV_CTRL 0x001434
-#define EMAC_MAC_IPGIFG_CTRL 0x001484
-#define EMAC_MAC_STA_ADDR0 0x001488
-#define EMAC_MAC_STA_ADDR1 0x00148c
-#define EMAC_HASH_TAB_REG0 0x001490
-#define EMAC_HASH_TAB_REG1 0x001494
-#define EMAC_MAC_HALF_DPLX_CTRL 0x001498
-#define EMAC_MAX_FRAM_LEN_CTRL 0x00149c
-#define EMAC_INT_STATUS 0x001600
-#define EMAC_INT_MASK 0x001604
-#define EMAC_RXMAC_STATC_REG0 0x001700
-#define EMAC_RXMAC_STATC_REG22 0x001758
-#define EMAC_TXMAC_STATC_REG0 0x001760
-#define EMAC_TXMAC_STATC_REG24 0x0017c0
-#define EMAC_CORE_HW_VERSION 0x001974
-#define EMAC_IDT_TABLE0 0x001b00
-#define EMAC_RXMAC_STATC_REG23 0x001bc8
-#define EMAC_RXMAC_STATC_REG24 0x001bcc
-#define EMAC_TXMAC_STATC_REG25 0x001bd0
-#define EMAC_INT1_MASK 0x001bf0
-#define EMAC_INT1_STATUS 0x001bf4
-#define EMAC_INT2_MASK 0x001bf8
-#define EMAC_INT2_STATUS 0x001bfc
-#define EMAC_INT3_MASK 0x001c00
-#define EMAC_INT3_STATUS 0x001c04
+#define EMAC_DMA_MAS_CTRL 0x1400
+#define EMAC_IRQ_MOD_TIM_INIT 0x1408
+#define EMAC_BLK_IDLE_STS 0x140c
+#define EMAC_PHY_LINK_DELAY 0x141c
+#define EMAC_SYS_ALIV_CTRL 0x1434
+#define EMAC_MAC_CTRL 0x1480
+#define EMAC_MAC_IPGIFG_CTRL 0x1484
+#define EMAC_MAC_STA_ADDR0 0x1488
+#define EMAC_MAC_STA_ADDR1 0x148c
+#define EMAC_HASH_TAB_REG0 0x1490
+#define EMAC_HASH_TAB_REG1 0x1494
+#define EMAC_MAC_HALF_DPLX_CTRL 0x1498
+#define EMAC_MAX_FRAM_LEN_CTRL 0x149c
+#define EMAC_WOL_CTRL0 0x14a0
+#define EMAC_RSS_KEY0 0x14b0
+#define EMAC_H1TPD_BASE_ADDR_LO 0x14e0
+#define EMAC_H2TPD_BASE_ADDR_LO 0x14e4
+#define EMAC_H3TPD_BASE_ADDR_LO 0x14e8
+#define EMAC_INTER_SRAM_PART9 0x1534
+#define EMAC_DESC_CTRL_0 0x1540
+#define EMAC_DESC_CTRL_1 0x1544
+#define EMAC_DESC_CTRL_2 0x1550
+#define EMAC_DESC_CTRL_10 0x1554
+#define EMAC_DESC_CTRL_12 0x1558
+#define EMAC_DESC_CTRL_13 0x155c
+#define EMAC_DESC_CTRL_3 0x1560
+#define EMAC_DESC_CTRL_4 0x1564
+#define EMAC_DESC_CTRL_5 0x1568
+#define EMAC_DESC_CTRL_14 0x156c
+#define EMAC_DESC_CTRL_15 0x1570
+#define EMAC_DESC_CTRL_16 0x1574
+#define EMAC_DESC_CTRL_6 0x1578
+#define EMAC_DESC_CTRL_8 0x1580
+#define EMAC_DESC_CTRL_9 0x1584
+#define EMAC_DESC_CTRL_11 0x1588
+#define EMAC_TXQ_CTRL_0 0x1590
+#define EMAC_TXQ_CTRL_1 0x1594
+#define EMAC_TXQ_CTRL_2 0x1598
+#define EMAC_RXQ_CTRL_0 0x15a0
+#define EMAC_RXQ_CTRL_1 0x15a4
+#define EMAC_RXQ_CTRL_2 0x15a8
+#define EMAC_RXQ_CTRL_3 0x15ac
+#define EMAC_BASE_CPU_NUMBER 0x15b8
+#define EMAC_DMA_CTRL 0x15c0
+#define EMAC_MAILBOX_0 0x15e0
+#define EMAC_MAILBOX_5 0x15e4
+#define EMAC_MAILBOX_6 0x15e8
+#define EMAC_MAILBOX_13 0x15ec
+#define EMAC_MAILBOX_2 0x15f4
+#define EMAC_MAILBOX_3 0x15f8
+#define EMAC_INT_STATUS 0x1600
+#define EMAC_INT_MASK 0x1604
+#define EMAC_MAILBOX_11 0x160c
+#define EMAC_AXI_MAST_CTRL 0x1610
+#define EMAC_MAILBOX_12 0x1614
+#define EMAC_MAILBOX_9 0x1618
+#define EMAC_MAILBOX_10 0x161c
+#define EMAC_ATHR_HEADER_CTRL 0x1620
+#define EMAC_RXMAC_STATC_REG0 0x1700
+#define EMAC_RXMAC_STATC_REG22 0x1758
+#define EMAC_TXMAC_STATC_REG0 0x1760
+#define EMAC_TXMAC_STATC_REG24 0x17c0
+#define EMAC_CLK_GATE_CTRL 0x1814
+#define EMAC_CORE_HW_VERSION 0x1974
+#define EMAC_MISC_CTRL 0x1990
+#define EMAC_MAILBOX_7 0x19e0
+#define EMAC_MAILBOX_8 0x19e4
+#define EMAC_IDT_TABLE0 0x1b00
+#define EMAC_RXMAC_STATC_REG23 0x1bc8
+#define EMAC_RXMAC_STATC_REG24 0x1bcc
+#define EMAC_TXMAC_STATC_REG25 0x1bd0
+#define EMAC_MAILBOX_15 0x1bd4
+#define EMAC_MAILBOX_16 0x1bd8
+#define EMAC_INT1_MASK 0x1bf0
+#define EMAC_INT1_STATUS 0x1bf4
+#define EMAC_INT2_MASK 0x1bf8
+#define EMAC_INT2_STATUS 0x1bfc
+#define EMAC_INT3_MASK 0x1c00
+#define EMAC_INT3_STATUS 0x1c04
/* EMAC_DMA_MAS_CTRL */
#define DEV_ID_NUM_BMSK 0x7f000000
@@ -166,10 +217,6 @@ enum emac_clk_id {
#define EMAC_MAX_SETUP_LNK_CYCLE 100
-/* Wake On Lan */
-#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */
-#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */
-
struct emac_stats {
/* rx */
u64 rx_ok; /* good packets */
@@ -291,7 +338,7 @@ struct emac_adapter {
void __iomem *base;
void __iomem *csr;
- struct emac_phy phy;
+ struct emac_sgmii phy;
struct emac_stats stats;
struct emac_irq irq;
@@ -309,6 +356,13 @@ struct emac_adapter {
unsigned int rxbuf_size;
+ /* Flow control / pause frames support. If automatic=True, do whatever
+ * the PHY does. Otherwise, use tx_flow_control and rx_flow_control.
+ */
+ bool automatic;
+ bool tx_flow_control;
+ bool rx_flow_control;
+
/* Ring parameter */
u8 tpd_burst;
u8 rfd_burst;
@@ -330,6 +384,8 @@ struct emac_adapter {
int emac_reinit_locked(struct emac_adapter *adpt);
void emac_reg_update32(void __iomem *addr, u32 mask, u32 val);
-irqreturn_t emac_isr(int irq, void *data);
+
+void emac_set_ethtool_ops(struct net_device *netdev);
+void emac_update_hw_stats(struct emac_adapter *adpt);
#endif /* _EMAC_H_ */
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 8e28234dddad..d145df98feff 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -188,14 +188,16 @@ qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p)
}
static int
-qcaspi_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+qcaspi_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->transceiver = XCVR_INTERNAL;
- cmd->supported = SUPPORTED_10baseT_Half;
- ethtool_cmd_speed_set(cmd, SPEED_10);
- cmd->duplex = DUPLEX_HALF;
- cmd->port = PORT_OTHER;
- cmd->autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_HALF;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -295,7 +297,6 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
static const struct ethtool_ops qcaspi_ethtool_ops = {
.get_drvinfo = qcaspi_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_settings = qcaspi_get_settings,
.get_ethtool_stats = qcaspi_get_ethtool_stats,
.get_strings = qcaspi_get_strings,
.get_sset_count = qcaspi_get_sset_count,
@@ -303,6 +304,7 @@ static const struct ethtool_ops qcaspi_ethtool_ops = {
.get_regs = qcaspi_get_regs,
.get_ringparam = qcaspi_get_ringparam,
.set_ringparam = qcaspi_set_ringparam,
+ .get_link_ksettings = qcaspi_get_link_ksettings,
};
void qcaspi_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 0b3cd58093d5..672f6b696069 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
struct cp_private *cp = container_of(napi, struct cp_private, napi);
struct net_device *dev = cp->dev;
unsigned int rx_tail = cp->rx_tail;
- int rx;
+ int rx = 0;
- rx = 0;
-rx_status_loop:
cpw16(IntrStatus, cp_rx_intr_mask);
while (rx < budget) {
@@ -556,15 +554,10 @@ rx_next:
/* if we did not reach work limit, then we're done with
* this round of polling
*/
- if (rx < budget) {
+ if (rx < budget && napi_complete_done(napi, rx)) {
unsigned long flags;
- if (cpr16(IntrStatus) & cp_rx_intr_mask)
- goto rx_status_loop;
-
- napi_gro_flush(napi, false);
spin_lock_irqsave(&cp->lock, flags);
- __napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask);
spin_unlock_irqrestore(&cp->lock, flags);
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 9bc047ac883b..89631753e799 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -653,9 +653,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget);
static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance);
static int rtl8139_close (struct net_device *dev);
static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64
- *stats);
+static void rtl8139_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static void rtl8139_set_rx_mode (struct net_device *dev);
static void __set_rx_mode (struct net_device *dev);
static void rtl8139_hw_start (struct net_device *dev);
@@ -2136,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
if (likely(RTL_R16(IntrStatus) & RxAckBits))
work_done += rtl8139_rx(dev, tp, budget);
- if (work_done < budget) {
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags;
- /*
- * Order is important since data can get interrupted
- * again when we think we are done.
- */
+
spin_lock_irqsave(&tp->lock, flags);
- __napi_complete(napi);
RTL_W16_F(IntrMask, rtl8139_intr_mask);
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -2516,7 +2511,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
-static struct rtnl_link_stats64 *
+static void
rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8139_private *tp = netdev_priv(dev);
@@ -2544,8 +2539,6 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
-
- return stats;
}
/* Set or clear the multicast filter for this adaptor.
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 570ed3bd3cbf..9bcd4aefc9c5 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -170,7 +170,7 @@ struct net_local {
spinlock_t lock;
struct net_device *next_module;
struct timer_list timer; /* Media selection timer. */
- long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
int saved_tx_size;
unsigned int tx_unit_busy:1;
unsigned char re_tx, /* Number of packet retransmissions. */
@@ -668,11 +668,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
}
num_tx_since_rx++;
} else if (num_tx_since_rx > 8 &&
- time_after(jiffies, dev->last_rx + HZ)) {
+ time_after(jiffies, lp->last_rx_time + HZ)) {
if (net_debug > 2)
printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
"%ld jiffies status %02x CMR1 %02x.\n", dev->name,
- num_tx_since_rx, jiffies - dev->last_rx, status,
+ num_tx_since_rx, jiffies - lp->last_rx_time, status,
(read_nibble(ioaddr, CMR1) >> 3) & 15);
dev->stats.rx_missed_errors++;
hardware_init(dev);
@@ -789,7 +789,6 @@ static void net_rx(struct net_device *dev)
read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 8f1623bf2134..81f18a833527 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7583,7 +7583,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
rtl_irq_enable(tp, enable_mask);
mmiowb();
@@ -7755,7 +7755,7 @@ err_pm_runtime_put:
goto out;
}
-static struct rtnl_link_stats64 *
+static void
rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -7809,8 +7809,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
le16_to_cpu(tp->tc_offset.tx_aborted);
pm_runtime_put_noidle(&pdev->dev);
-
- return stats;
}
static void rtl8169_net_suspend(struct net_device *dev)
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index f1109661a533..0525bd696d5d 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -76,6 +76,7 @@ enum ravb_reg {
CDAR20 = 0x0060,
CDAR21 = 0x0064,
ESR = 0x0088,
+ APSR = 0x008C, /* R-Car Gen3 only */
RCR = 0x0090,
RQC0 = 0x0094,
RQC1 = 0x0098,
@@ -248,6 +249,15 @@ enum ESR_BIT {
ESR_EIL = 0x00001000,
};
+/* APSR */
+enum APSR_BIT {
+ APSR_MEMS = 0x00000002,
+ APSR_CMSW = 0x00000010,
+ APSR_DM = 0x00006000, /* Undocumented? */
+ APSR_DM_RDM = 0x00002000,
+ APSR_DM_TDM = 0x00004000,
+};
+
/* RCR */
enum RCR_BIT {
RCR_EFFS = 0x00000001,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 92d7692c840d..8cfc4a54f2dc 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include <asm/div64.h>
@@ -179,6 +180,49 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ bool txed;
+
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
+ desc = &priv->tx_ring[q][entry];
+ txed = desc->die_dt == DT_FEMPTY;
+ if (free_txed_only && !txed)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ if (txed)
+ stats->tx_packets++;
+ }
+ free_num++;
+ }
+ if (txed)
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
@@ -194,19 +238,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
- /* Free TX skb ringbuffer */
- if (priv->tx_skb[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- dev_kfree_skb(priv->tx_skb[q][i]);
- }
- kfree(priv->tx_skb[q]);
- priv->tx_skb[q] = NULL;
-
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +261,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
}
if (priv->tx_ring[q]) {
+ ravb_tx_free(ndev, q, false);
+
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
+
+ /* Free TX skb ringbuffer.
+ * SKBs are freed by ravb_tx_free() call above.
+ */
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +485,6 @@ static int ravb_dmac_init(struct net_device *ndev)
return 0;
}
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &priv->stats[q];
- struct ravb_tx_desc *desc;
- int free_num = 0;
- int entry;
- u32 size;
-
- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
- desc = &priv->tx_ring[q][entry];
- if (desc->die_dt != DT_FEMPTY)
- break;
- /* Descriptor type must be checked before all other reads */
- dma_rmb();
- size = le16_to_cpu(desc->ds_tagl) & TX_DS;
- /* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- size, DMA_TO_DEVICE);
- /* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
- stats->tx_packets++;
- }
- free_num++;
- }
- stats->tx_bytes += size;
- desc->die_dt = DT_EEMPTY;
- }
- return free_num;
-}
-
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +918,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
- ravb_tx_free(ndev, q);
+ ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -926,14 +942,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
- netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
- }
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
- }
out:
return budget - quota;
}
@@ -977,6 +989,11 @@ static void ravb_adjust_link(struct net_device *ndev)
phy_print_status(phydev);
}
+static const struct soc_device_attribute r8a7795es10[] = {
+ { .soc_id = "r8a7795", .revision = "ES1.0", },
+ { /* sentinel */ }
+};
+
/* PHY init function */
static int ravb_phy_init(struct net_device *ndev)
{
@@ -1012,10 +1029,10 @@ static int ravb_phy_init(struct net_device *ndev)
goto err_deregister_fixed_link;
}
- /* This driver only support 10/100Mbit speeds on Gen3
+ /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
* at this time.
*/
- if (priv->chip_id == RCAR_GEN3) {
+ if (soc_device_match(r8a7795es10)) {
err = phy_set_max_speed(phydev, SPEED_100);
if (err) {
netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
@@ -1508,6 +1525,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / NUM_TX_DESC * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ /* Zero length DMA descriptors are problematic as they seem to
+ * terminate DMA transfers. Avoid them by simply using a length of
+ * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
+ *
+ * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
+ * data by the call to skb_put_padto() above this is safe with
+ * respect to both the length of the first DMA descriptor (len)
+ * overflowing the available data and the length of the second DMA
+ * descriptor (skb->len - len) being negative.
+ */
+ if (len == 0)
+ len = DPTR_ALIGN;
+
memcpy(buffer, skb->data, len);
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
@@ -1558,7 +1588,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ !ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:
@@ -1895,6 +1926,23 @@ static void ravb_set_config_mode(struct net_device *ndev)
}
}
+/* Set tx and rx clock internal delay modes */
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int set = 0;
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ set |= APSR_DM_RDM;
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ set |= APSR_DM_TDM;
+
+ ravb_modify(ndev, APSR, APSR_DM, set);
+}
+
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -2007,6 +2055,9 @@ static int ravb_probe(struct platform_device *pdev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ if (priv->chip_id != RCAR_GEN2)
+ ravb_set_delay_mode(ndev);
+
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
@@ -2143,6 +2194,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ if (priv->chip_id != RCAR_GEN2)
+ ravb_set_delay_mode(ndev);
+
/* Restore descriptor base address table */
ravb_write(ndev, priv->desc_bat_dma, DBAT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f729a6b43958..54248775f227 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,9 +1,9 @@
/* SuperH Ethernet device driver
*
- * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2014 Renesas Solutions Corp.
- * Copyright (C) 2013-2016 Cogent Embedded, Inc.
+ * Copyright (C) 2013-2017 Cogent Embedded, Inc.
* Copyright (C) 2014 Codethink Limited
*
* This program is free software; you can redistribute it and/or modify it
@@ -518,12 +518,19 @@ static struct sh_eth_cpu_data r7s72100_data = {
.ecsr_value = ECSR_ICD,
.ecsipr_value = ECSIPR_ICDIP,
- .eesipr_value = 0xe77f009f,
+ .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
+ EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
+ EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
- EESR_TDE | EESR_ECI,
+ EESR_TDE,
.fdr_value = 0x0000070f,
.no_psr = 1,
@@ -535,9 +542,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
- .hw_crc = 1,
+ .hw_checksum = 1,
.tsu = 1,
- .shift_rd0 = 1,
};
static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
@@ -557,12 +563,19 @@ static struct sh_eth_cpu_data r8a7740_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
- EESR_TDE | EESR_ECI,
+ EESR_TDE,
.fdr_value = 0x0000070f,
.apr = 1,
@@ -574,10 +587,10 @@ static struct sh_eth_cpu_data r8a7740_data = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
- .hw_crc = 1,
+ .hw_checksum = 1,
.tsu = 1,
.select_mii = 1,
- .shift_rd0 = 1,
+ .magic = 1,
};
/* There is CPU dependent code */
@@ -604,12 +617,16 @@ static struct sh_eth_cpu_data r8a777x_data = {
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
- .eesipr_value = 0x01ff009f,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
.apr = 1,
@@ -625,14 +642,19 @@ static struct sh_eth_cpu_data r8a779x_data = {
.register_type = SH_ETH_REG_FAST_RCAR,
- .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
- .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
- .eesipr_value = 0x01ff009f,
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
+ ECSIPR_MPDIP,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
.trscer_err_mask = DESC_I_RINT8,
@@ -642,6 +664,7 @@ static struct sh_eth_cpu_data r8a779x_data = {
.tpauser = 1,
.hw_swap = 1,
.rmiimode = 1,
+ .magic = 1,
};
#endif /* CONFIG_OF */
@@ -668,12 +691,16 @@ static struct sh_eth_cpu_data sh7724_data = {
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
- .eesipr_value = 0x01ff009f,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.apr = 1,
.mpr = 1,
@@ -704,12 +731,18 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.irq_flags = IRQF_SHARED,
.apr = 1,
@@ -772,12 +805,19 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
- EESR_TDE | EESR_ECI,
+ EESR_TDE,
.fdr_value = 0x0000072f,
.irq_flags = IRQF_SHARED,
@@ -803,12 +843,18 @@ static struct sh_eth_cpu_data sh7734_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+ EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
- EESR_TDE | EESR_ECI,
+ EESR_TDE,
.apr = 1,
.mpr = 1,
@@ -818,9 +864,9 @@ static struct sh_eth_cpu_data sh7734_data = {
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
- .hw_crc = 1,
+ .hw_checksum = 1,
.select_mii = 1,
- .shift_rd0 = 1,
+ .magic = 1,
};
/* SH7763 */
@@ -833,12 +879,17 @@ static struct sh_eth_cpu_data sh7763_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+ EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
- EESR_ECI,
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.apr = 1,
.mpr = 1,
@@ -849,12 +900,20 @@ static struct sh_eth_cpu_data sh7763_data = {
.no_ade = 1,
.tsu = 1,
.irq_flags = IRQF_SHARED,
+ .magic = 1,
};
static struct sh_eth_cpu_data sh7619_data = {
.register_type = SH_ETH_REG_FAST_SH3_SH2,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.apr = 1,
.mpr = 1,
@@ -865,7 +924,14 @@ static struct sh_eth_cpu_data sh7619_data = {
static struct sh_eth_cpu_data sh771x_data = {
.register_type = SH_ETH_REG_FAST_SH3_SH2,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tsu = 1,
};
@@ -936,7 +1002,7 @@ static int sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, 0x0, RDFFR);
/* Reset HW CRC register */
- if (mdp->cd->hw_crc)
+ if (mdp->cd->hw_checksum)
sh_eth_write(ndev, 0x0, CSMR);
/* Select MII mode */
@@ -1421,7 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
* the RFS bits are from bit 25 to bit 16. So, the
* driver needs right shifting by 16.
*/
- if (mdp->cd->shift_rd0)
+ if (mdp->cd->hw_checksum)
desc_status >>= 16;
skb = mdp->rx_skbuff[entry];
@@ -1528,44 +1594,46 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev)
sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
}
-/* error control function */
-static void sh_eth_error(struct net_device *ndev, u32 intr_status)
+/* E-MAC interrupt handler */
+static void sh_eth_emac_interrupt(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 felic_stat;
u32 link_stat;
- u32 mask;
- if (intr_status & EESR_ECI) {
- felic_stat = sh_eth_read(ndev, ECSR);
- sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
- if (felic_stat & ECSR_ICD)
- ndev->stats.tx_carrier_errors++;
- if (felic_stat & ECSR_LCHNG) {
- /* Link Changed */
- if (mdp->cd->no_psr || mdp->no_ether_link) {
- goto ignore_link;
- } else {
- link_stat = (sh_eth_read(ndev, PSR));
- if (mdp->ether_link_active_low)
- link_stat = ~link_stat;
- }
- if (!(link_stat & PHY_ST_LINK)) {
- sh_eth_rcv_snd_disable(ndev);
- } else {
- /* Link Up */
- sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
- /* clear int */
- sh_eth_modify(ndev, ECSR, 0, 0);
- sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
- DMAC_M_ECI);
- /* enable tx and rx */
- sh_eth_rcv_snd_enable(ndev);
- }
+ felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
+ sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
+ if (felic_stat & ECSR_ICD)
+ ndev->stats.tx_carrier_errors++;
+ if (felic_stat & ECSR_MPD)
+ pm_wakeup_event(&mdp->pdev->dev, 0);
+ if (felic_stat & ECSR_LCHNG) {
+ /* Link Changed */
+ if (mdp->cd->no_psr || mdp->no_ether_link)
+ return;
+ link_stat = sh_eth_read(ndev, PSR);
+ if (mdp->ether_link_active_low)
+ link_stat = ~link_stat;
+ if (!(link_stat & PHY_ST_LINK)) {
+ sh_eth_rcv_snd_disable(ndev);
+ } else {
+ /* Link Up */
+ sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
+ /* clear int */
+ sh_eth_modify(ndev, ECSR, 0, 0);
+ sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
+ /* enable tx and rx */
+ sh_eth_rcv_snd_enable(ndev);
}
}
+}
+
+/* error control function */
+static void sh_eth_error(struct net_device *ndev, u32 intr_status)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 mask;
-ignore_link:
if (intr_status & EESR_TWB) {
/* Unused write back interrupt */
if (intr_status & EESR_TABT) { /* Transmit Abort int */
@@ -1646,14 +1714,16 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
/* Get interrupt status */
intr_status = sh_eth_read(ndev, EESR);
- /* Mask it with the interrupt mask, forcing ECI interrupt to be always
- * enabled since it's the one that comes thru regardless of the mask,
- * and we need to fully handle it in sh_eth_error() in order to quench
- * it as it doesn't get cleared by just writing 1 to the ECI bit...
+ /* Mask it with the interrupt mask, forcing ECI interrupt to be always
+ * enabled since it's the one that comes thru regardless of the mask,
+ * and we need to fully handle it in sh_eth_emac_interrupt() in order
+ * to quench it as it doesn't get cleared by just writing 1 to the ECI
+ * bit...
*/
intr_enable = sh_eth_read(ndev, EESIPR);
- intr_status &= intr_enable | DMAC_M_ECI;
- if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
+ intr_status &= intr_enable | EESIPR_ECIIP;
+ if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
+ cd->eesr_err_check))
ret = IRQ_HANDLED;
else
goto out;
@@ -1685,6 +1755,10 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
netif_wake_queue(ndev);
}
+ /* E-MAC interrupt */
+ if (intr_status & EESR_ECI)
+ sh_eth_emac_interrupt(ndev);
+
if (intr_status & cd->eesr_err_check) {
/* Clear error interrupts */
sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
@@ -1989,7 +2063,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(MAFCR);
if (cd->rtrate)
add_reg(RTRATE);
- if (cd->hw_crc)
+ if (cd->hw_checksum)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
@@ -2201,6 +2275,33 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return 0;
}
+static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (mdp->cd->magic && mdp->clk) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
+ }
+}
+
+static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
+
+ device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
+
+ return 0;
+}
+
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_regs_len = sh_eth_get_regs_len,
.get_regs = sh_eth_get_regs,
@@ -2215,6 +2316,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.set_ringparam = sh_eth_set_ringparam,
.get_link_ksettings = sh_eth_get_link_ksettings,
.set_link_ksettings = sh_eth_set_link_ksettings,
+ .get_wol = sh_eth_get_wol,
+ .set_wol = sh_eth_set_wol,
};
/* network device open function */
@@ -3017,6 +3120,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
+ /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
+ mdp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mdp->clk))
+ mdp->clk = NULL;
+
ndev->base_addr = res->start;
spin_lock_init(&mdp->lock);
@@ -3111,6 +3219,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_napi_del;
+ if (mdp->cd->magic && mdp->clk)
+ device_set_wakeup_capable(&pdev->dev, 1);
+
/* print device information */
netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
@@ -3150,15 +3261,67 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
+static int sh_eth_wol_setup(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ /* Only allow ECI interrupts */
+ synchronize_irq(ndev->irq);
+ napi_disable(&mdp->napi);
+ sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
+
+ /* Enable MagicPacket */
+ sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
+
+ /* Increased clock usage so device won't be suspended */
+ clk_enable(mdp->clk);
+
+ return enable_irq_wake(ndev->irq);
+}
+
+static int sh_eth_wol_restore(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ napi_enable(&mdp->napi);
+
+ /* Disable MagicPacket */
+ sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
+
+ /* The device needs to be reset to restore MagicPacket logic
+ * for next wakeup. If we close and open the device it will
+ * both be reset and all registers restored. This is what
+ * happens during suspend and resume without WoL enabled.
+ */
+ ret = sh_eth_close(ndev);
+ if (ret < 0)
+ return ret;
+ ret = sh_eth_open(ndev);
+ if (ret < 0)
+ return ret;
+
+ /* Restore clock usage count */
+ clk_disable(mdp->clk);
+
+ return disable_irq_wake(ndev->irq);
+}
+
static int sh_eth_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
+ struct sh_eth_private *mdp = netdev_priv(ndev);
int ret = 0;
- if (netif_running(ndev)) {
- netif_device_detach(ndev);
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+
+ if (mdp->wol_enabled)
+ ret = sh_eth_wol_setup(ndev);
+ else
ret = sh_eth_close(ndev);
- }
return ret;
}
@@ -3166,14 +3329,21 @@ static int sh_eth_suspend(struct device *dev)
static int sh_eth_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
+ struct sh_eth_private *mdp = netdev_priv(ndev);
int ret = 0;
- if (netif_running(ndev)) {
+ if (!netif_running(ndev))
+ return 0;
+
+ if (mdp->wol_enabled)
+ ret = sh_eth_wol_restore(ndev);
+ else
ret = sh_eth_open(ndev);
- if (ret < 0)
- return ret;
- netif_device_attach(ndev);
- }
+
+ if (ret < 0)
+ return ret;
+
+ netif_device_attach(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d050f37f3e0f..a6753ccba711 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -265,22 +265,38 @@ enum EESR_BIT {
EESR_RTO)
#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
EESR_RDE | EESR_RFRMER | EESR_ADE | \
- EESR_TFE | EESR_TDE | EESR_ECI)
+ EESR_TFE | EESR_TDE)
/* EESIPR */
-enum DMAC_IM_BIT {
- DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
- DMAC_M_RABT = 0x02000000,
- DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
- DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
- DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
- DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
- DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
- DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
- DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
- DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
- DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
- DMAC_M_RINT1 = 0x00000001,
+enum EESIPR_BIT {
+ EESIPR_TWB1IP = 0x80000000,
+ EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */
+ EESIPR_TC1IP = 0x20000000,
+ EESIPR_TUCIP = 0x10000000,
+ EESIPR_ROCIP = 0x08000000,
+ EESIPR_TABTIP = 0x04000000,
+ EESIPR_RABTIP = 0x02000000,
+ EESIPR_RFCOFIP = 0x01000000,
+ EESIPR_ADEIP = 0x00800000,
+ EESIPR_ECIIP = 0x00400000,
+ EESIPR_FTCIP = 0x00200000, /* same as TC0IP */
+ EESIPR_TDEIP = 0x00100000,
+ EESIPR_TFUFIP = 0x00080000,
+ EESIPR_FRIP = 0x00040000,
+ EESIPR_RDEIP = 0x00020000,
+ EESIPR_RFOFIP = 0x00010000,
+ EESIPR_CNDIP = 0x00000800,
+ EESIPR_DLCIP = 0x00000400,
+ EESIPR_CDIP = 0x00000200,
+ EESIPR_TROIP = 0x00000100,
+ EESIPR_RMAFIP = 0x00000080,
+ EESIPR_CEEFIP = 0x00000040,
+ EESIPR_CELFIP = 0x00000020,
+ EESIPR_RRFIP = 0x00000010,
+ EESIPR_RTLFIP = 0x00000008,
+ EESIPR_RTSFIP = 0x00000004,
+ EESIPR_PREIP = 0x00000002,
+ EESIPR_CERFIP = 0x00000001,
};
/* Receive descriptor 0 bits */
@@ -339,7 +355,7 @@ enum FELIC_MODE_BIT {
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
- ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+ ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
};
@@ -488,11 +504,11 @@ struct sh_eth_cpu_data {
unsigned rpadir:1; /* E-DMAC have RPADIR */
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
- unsigned hw_crc:1; /* E-DMAC have CSMR */
+ unsigned hw_checksum:1; /* E-DMAC has CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
- unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
unsigned rtrate:1; /* EtherC has RTRATE register */
+ unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */
};
struct sh_eth_private {
@@ -501,6 +517,7 @@ struct sh_eth_private {
const u16 *reg_offset;
void __iomem *addr;
void __iomem *tsu_addr;
+ struct clk *clk;
u32 num_rx_ring;
u32 num_tx_ring;
dma_addr_t rx_desc_dma;
@@ -529,6 +546,7 @@ struct sh_eth_private {
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
unsigned is_opened:1;
+ unsigned wol_enabled:1;
};
static inline void sh_eth_soft_swap(char *src, int len)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 7c450b5a1138..0f63a44a955d 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2517,7 +2517,7 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
}
if (credits < budget)
- napi_complete(napi);
+ napi_complete_done(napi, credits);
rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index cddcff5a00a7..d54490d3f7ad 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1563,7 +1563,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
work_done = sxgbe_rx(priv, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
}
@@ -1706,11 +1706,9 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
* This function is a driver entry point whenever ifconfig command gets
* executed to see device statistics. Statistics are number of
* bytes sent or received, errors occurred etc.
- * Return value:
- * This function returns various statistical information of device.
*/
-static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void sxgbe_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->ioaddr;
@@ -1761,8 +1759,6 @@ static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
SXGBE_MMC_TXUFLWHI_GBCNT_REG);
writel(0, ioaddr + SXGBE_MMC_CTL_REG);
spin_unlock(&priv->stats_lock);
-
- return stats;
}
/* sxgbe_set_features - entry point to set offload features of the device.
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 17d83f37fbf2..41ad07d45144 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -433,6 +433,9 @@ typedef union efx_oword {
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
} while (0)
+#define EFX_AND_QWORD(qword, from, mask) \
+ (qword).u64[0] = (from).u64[0] & (mask).u64[0]
+
#define EFX_OR_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 5eb0e684fd76..92e1c6d8b293 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -60,15 +60,33 @@ struct efx_ef10_vlan {
u16 vid;
};
+enum efx_ef10_default_filters {
+ EFX_EF10_BCAST,
+ EFX_EF10_UCDEF,
+ EFX_EF10_MCDEF,
+ EFX_EF10_VXLAN4_UCDEF,
+ EFX_EF10_VXLAN4_MCDEF,
+ EFX_EF10_VXLAN6_UCDEF,
+ EFX_EF10_VXLAN6_MCDEF,
+ EFX_EF10_NVGRE4_UCDEF,
+ EFX_EF10_NVGRE4_MCDEF,
+ EFX_EF10_NVGRE6_UCDEF,
+ EFX_EF10_NVGRE6_MCDEF,
+ EFX_EF10_GENEVE4_UCDEF,
+ EFX_EF10_GENEVE4_MCDEF,
+ EFX_EF10_GENEVE6_UCDEF,
+ EFX_EF10_GENEVE6_MCDEF,
+
+ EFX_EF10_NUM_DEFAULT_FILTERS
+};
+
/* Per-VLAN filters information */
struct efx_ef10_filter_vlan {
struct list_head list;
u16 vid;
u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
- u16 ucdef;
- u16 bcast;
- u16 mcdef;
+ u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
};
struct efx_ef10_dev_addr {
@@ -78,7 +96,7 @@ struct efx_ef10_dev_addr {
struct efx_ef10_filter_table {
/* The MCDI match masks supported by this fw & hw, in order of priority */
u32 rx_match_mcdi_flags[
- MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
struct {
@@ -114,6 +132,23 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
struct efx_ef10_filter_vlan *vlan);
static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
+static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
+
+static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
+{
+ WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
+ return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
+}
+
+static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
+{
+ return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
+}
+
+static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
+{
+ return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
+}
static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
{
@@ -197,11 +232,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
- if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
GET_CAPABILITIES_V2_OUT_FLAGS2);
- else
+ nic_data->piobuf_size = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
+ } else {
nic_data->datapath_caps2 = 0;
+ nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
+ }
/* record the DPCPU firmware IDs to determine VEB vswitching support.
*/
@@ -547,7 +586,6 @@ static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
static int efx_ef10_probe(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data;
- struct net_device *net_dev = efx->net_dev;
int i, rc;
/* We can have one VI for each 8K region. However, until we
@@ -603,6 +641,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail2;
+ mutex_init(&nic_data->udp_tunnels_lock);
+
/* Reset (most) configuration for this function */
rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc)
@@ -637,7 +677,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc < 0)
goto fail5;
efx->port_num = rc;
- net_dev->dev_port = rc;
rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
if (rc)
@@ -692,6 +731,14 @@ fail5:
fail4:
device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
fail3:
+ efx_mcdi_detach(efx);
+
+ mutex_lock(&nic_data->udp_tunnels_lock);
+ memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
+ (void)efx_ef10_set_udp_tnl_ports(efx, true);
+ mutex_unlock(&nic_data->udp_tunnels_lock);
+ mutex_destroy(&nic_data->udp_tunnels_lock);
+
efx_mcdi_fini(efx);
fail2:
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
@@ -825,8 +872,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
offset = ((efx->tx_channel_offset + efx->n_tx_channels -
tx_queue->channel->channel - 1) *
efx_piobuf_size);
- index = offset / ER_DZ_TX_PIOBUF_SIZE;
- offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+ index = offset / nic_data->piobuf_size;
+ offset = offset % nic_data->piobuf_size;
/* When the host page size is 4K, the first
* host page in the WC mapping may be within
@@ -961,6 +1008,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+ efx_mcdi_detach(efx);
+
+ memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
+ mutex_lock(&nic_data->udp_tunnels_lock);
+ (void)efx_ef10_set_udp_tnl_ports(efx, true);
+ mutex_unlock(&nic_data->udp_tunnels_lock);
+
+ mutex_destroy(&nic_data->udp_tunnels_lock);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
@@ -1161,14 +1217,20 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* functions of the controller.
*/
if (efx_piobuf_size != 0 &&
- ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+ nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
efx->n_tx_channels) {
unsigned int n_piobufs =
DIV_ROUND_UP(efx->n_tx_channels,
- ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+ nic_data->piobuf_size / efx_piobuf_size);
rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
- if (rc)
+ if (rc == -ENOSPC)
+ netif_dbg(efx, probe, efx->net_dev,
+ "out of PIO buffers; cannot allocate more\n");
+ else if (rc == -EPERM)
+ netif_dbg(efx, probe, efx->net_dev,
+ "not permitted to allocate PIO buffers\n");
+ else if (rc)
netif_err(efx, probe, efx->net_dev,
"failed to allocate PIO buffers (%d)\n", rc);
else
@@ -1315,15 +1377,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
efx_ef10_free_piobufs(efx);
}
- /* Log an error on failure, but this is non-fatal */
- if (rc)
+ /* Log an error on failure, but this is non-fatal.
+ * Permission errors are less important - we've presumably
+ * had the PIO buffer licence removed.
+ */
+ if (rc == -EPERM)
+ netif_dbg(efx, drv, efx->net_dev,
+ "not permitted to restore PIO buffers\n");
+ else if (rc)
netif_err(efx, drv, efx->net_dev,
"failed to restore PIO buffers (%d)\n", rc);
nic_data->must_restore_piobufs = false;
}
/* don't fail init if RSS setup doesn't work */
- rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+ rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
efx->rss_active = (rc == 0);
return 0;
@@ -2360,7 +2428,11 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
/* Create TX descriptor ring entry */
if (buffer->flags & EFX_TX_BUF_OPTION) {
*txd = buffer->option;
+ if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
+ /* PIO descriptor */
+ tx_queue->packet_write_count = tx_queue->write_count;
} else {
+ tx_queue->packet_write_count = tx_queue->write_count;
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_3(
*txd,
@@ -2529,7 +2601,7 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
}
static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table, const u8 *key)
{
MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -2540,6 +2612,11 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+ /* This iterates over the length of efx->rx_indir_table, but copies
+ * bytes from rx_indir_table. That's because the latter is a pointer
+ * rather than an array, but should have the same length.
+ * The efx->rx_hash_key loop below is similar.
+ */
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
MCDI_PTR(tablebuf,
RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
@@ -2555,8 +2632,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
- MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
- efx->rx_hash_key[i];
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
sizeof(keybuf), NULL, 0, NULL);
@@ -2589,7 +2665,8 @@ static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
}
static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table,
+ const u8 *key)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
@@ -2608,7 +2685,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
}
rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
- rx_indir_table);
+ rx_indir_table, key);
if (rc != 0)
goto fail2;
@@ -2619,6 +2696,9 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
if (rx_indir_table != efx->rx_indir_table)
memcpy(efx->rx_indir_table, rx_indir_table,
sizeof(efx->rx_indir_table));
+ if (key != efx->rx_hash_key)
+ memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size);
+
return 0;
fail2:
@@ -2629,15 +2709,69 @@ fail1:
return rc;
}
+static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
+ size_t outlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
+ MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+ return -ENOENT;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
+ nic_data->rx_rss_context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
+ tablebuf, sizeof(tablebuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] = MCDI_PTR(tablebuf,
+ RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
+ nic_data->rx_rss_context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
+ keybuf, sizeof(keybuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+ efx->rx_hash_key[i] = MCDI_PTR(
+ keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
+
+ return 0;
+}
+
static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table,
+ const u8 *key)
{
int rc;
if (efx->rss_spread == 1)
return 0;
- rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+ if (!key)
+ key = efx->rx_hash_key;
+
+ rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
if (rc == -ENOBUFS && !user) {
unsigned context_size;
@@ -2675,6 +2809,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
__attribute__ ((unused)))
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -3054,13 +3190,103 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
}
+static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
+ unsigned int n_packets,
+ unsigned int rx_encap_hdr,
+ unsigned int rx_l3_class,
+ unsigned int rx_l4_class,
+ const efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
+ if (!efx->loopback_selftest)
+ channel->n_rx_eth_crc_err += n_packets;
+ return EFX_RX_PKT_DISCARD;
+ }
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
+ if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
+ netdev_WARN(efx->net_dev,
+ "invalid class for RX_IPCKSUM_ERR: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ if (!efx->loopback_selftest)
+ *(rx_encap_hdr ?
+ &channel->n_rx_outer_ip_hdr_chksum_err :
+ &channel->n_rx_ip_hdr_chksum_err) += n_packets;
+ return 0;
+ }
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+ if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
+ ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
+ (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
+ netdev_WARN(efx->net_dev,
+ "invalid class for RX_TCPUDP_CKSUM_ERR: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ if (!efx->loopback_selftest)
+ *(rx_encap_hdr ?
+ &channel->n_rx_outer_tcp_udp_chksum_err :
+ &channel->n_rx_tcp_udp_chksum_err) += n_packets;
+ return 0;
+ }
+ if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
+ if (unlikely(!rx_encap_hdr))
+ netdev_WARN(efx->net_dev,
+ "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
+ netdev_WARN(efx->net_dev,
+ "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ if (!efx->loopback_selftest)
+ channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
+ return 0;
+ }
+ if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
+ if (unlikely(!rx_encap_hdr))
+ netdev_WARN(efx->net_dev,
+ "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+ rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
+ (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
+ netdev_WARN(efx->net_dev,
+ "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ if (!efx->loopback_selftest)
+ channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
+ return 0;
+ }
+
+ WARN_ON(1); /* No error bits were recognised */
+ return 0;
+}
+
static int efx_ef10_handle_rx_event(struct efx_channel *channel,
const efx_qword_t *event)
{
- unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
+ unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
+ unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
unsigned int n_descs, n_packets, i;
struct efx_nic *efx = channel->efx;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct efx_rx_queue *rx_queue;
+ efx_qword_t errors;
bool rx_cont;
u16 flags = 0;
@@ -3071,8 +3297,14 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
+ rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
+ rx_encap_hdr =
+ nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
+ EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
+ ESE_EZ_ENCAP_HDR_NONE;
if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
@@ -3132,17 +3364,38 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
n_packets = 1;
}
- if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
- flags |= EFX_RX_PKT_DISCARD;
-
- if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
- channel->n_rx_ip_hdr_chksum_err += n_packets;
- } else if (unlikely(EFX_QWORD_FIELD(*event,
- ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
- channel->n_rx_tcp_udp_chksum_err += n_packets;
- } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
- rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
- flags |= EFX_RX_PKT_CSUMMED;
+ EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
+ ESF_DZ_RX_IPCKSUM_ERR, 1,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
+ ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
+ ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
+ EFX_AND_QWORD(errors, *event, errors);
+ if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
+ flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
+ rx_encap_hdr,
+ rx_l3_class, rx_l4_class,
+ event);
+ } else {
+ bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_DZ_L4_CLASS_UDP;
+
+ switch (rx_encap_hdr) {
+ case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
+ flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
+ if (tcpudp)
+ flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
+ break;
+ case ESE_EZ_ENCAP_HDR_GRE:
+ case ESE_EZ_ENCAP_HDR_NONE:
+ if (tcpudp)
+ flags |= EFX_RX_PKT_CSUMMED;
+ break;
+ default:
+ netdev_WARN(efx->net_dev,
+ "unknown encapsulation type: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ }
}
if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
@@ -3510,6 +3763,104 @@ efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
table->entry[filter_idx].spec = (unsigned long)spec | flags;
}
+static void
+efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ u32 match_fields = 0, uc_match, mc_match;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /* Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+#define COPY_VALUE(value, mcdi_field) \
+ do { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(value)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &value, sizeof(value)); \
+ } while (0)
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ COPY_VALUE(spec->gen_field, mcdi_field); \
+ }
+ /* Handle encap filters first. They will always be mismatch
+ * (unknown UC or MC) filters
+ */
+ if (encap_type) {
+ /* ether_type and outer_ip_proto need to be variables
+ * because COPY_VALUE wants to memcpy them
+ */
+ __be16 ether_type =
+ htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
+ ETH_P_IPV6 : ETH_P_IP);
+ u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
+ u8 outer_ip_proto;
+
+ switch (encap_type & EFX_ENCAP_TYPES_MASK) {
+ case EFX_ENCAP_TYPE_VXLAN:
+ vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
+ /* fallthrough */
+ case EFX_ENCAP_TYPE_GENEVE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_UDP;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ /* We always need to set the type field, even
+ * though we're not matching on the TNI.
+ */
+ MCDI_POPULATE_DWORD_1(inbuf,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VNI_TYPE,
+ vni_type);
+ break;
+ case EFX_ENCAP_TYPE_NVGRE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_GRE;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+#undef COPY_VALUE
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+}
+
static void efx_ef10_filter_push_prep(struct efx_nic *efx,
const struct efx_filter_spec *spec,
efx_dword_t *inbuf, u64 handle,
@@ -3518,7 +3869,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 flags = spec->flags;
- memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+ memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
/* Remove RSS flag if we don't have an RSS context. */
if (flags & EFX_FILTER_FLAG_RX_RSS &&
@@ -3531,46 +3882,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_OP_REPLACE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
} else {
- u32 match_fields = 0;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_INSERT :
- MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
-
- /* Convert match flags and values. Unlike almost
- * everything else in MCDI, these fields are in
- * network byte order.
- */
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
- match_fields |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
- 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
-#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
- if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
- match_fields |= \
- 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN; \
- BUILD_BUG_ON( \
- MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
- sizeof(spec->gen_field)); \
- memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
- &spec->gen_field, sizeof(spec->gen_field)); \
- }
- COPY_FIELD(REM_HOST, rem_host, SRC_IP);
- COPY_FIELD(LOC_HOST, loc_host, DST_IP);
- COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
- COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
- COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
- COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
- COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
- COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
- COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
- COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
-#undef COPY_FIELD
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
- match_fields);
+ efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
@@ -3599,8 +3911,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
const struct efx_filter_spec *spec,
u64 *handle, bool replacing)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
int rc;
efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
@@ -3615,37 +3927,58 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
unsigned int match_flags = spec->match_flags;
+ unsigned int uc_match, mc_match;
u32 mcdi_flags = 0;
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
+ unsigned int old_match_flags = match_flags; \
+ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
+ if (match_flags != old_match_flags) \
+ mcdi_flags |= \
+ (1 << ((encap) ? \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
+ mcdi_field ## _LBN : \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
+ mcdi_field ## _LBN)); \
+ }
+ /* inner or outer based on encap type */
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
+ /* always outer */
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+ /* special handling for encap type, and mismatch */
+ if (encap_type) {
+ match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
+ mcdi_flags |=
+ (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
mcdi_flags |=
is_multicast_ether_addr(spec->loc_mac) ?
- (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
- (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
+ 1 << mc_match :
+ 1 << uc_match;
}
-#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \
- unsigned int old_match_flags = match_flags; \
- match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
- if (match_flags != old_match_flags) \
- mcdi_flags |= \
- (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN); \
- }
- MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
- MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
- MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
- MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
- MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
- MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
- MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
- MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
- MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
- MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
-#undef MAP_FILTER_TO_MCDI_FLAG
-
/* Did we map them all? */
WARN_ON_ONCE(match_flags);
@@ -3877,7 +4210,7 @@ found:
/* If successful, return the inserted filter ID */
if (rc == 0)
- rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
+ rc = efx_ef10_make_filter_id(match_pri, ins_index);
wake_up_all(&table->waitq);
out_unlock:
@@ -3900,7 +4233,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
unsigned int priority_mask,
u32 filter_id, bool by_index)
{
- unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
struct efx_ef10_filter_table *table = efx->filter_state;
MCDI_DECLARE_BUF(inbuf,
MC_CMD_FILTER_OP_IN_HANDLE_OFST +
@@ -3927,7 +4260,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
if (!spec ||
(!by_index &&
efx_ef10_filter_pri(table, spec) !=
- filter_id / HUNT_FILTER_TBL_ROWS)) {
+ efx_ef10_filter_get_unsafe_pri(filter_id))) {
rc = -ENOENT;
goto out_unlock;
}
@@ -3976,13 +4309,18 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
- inbuf, sizeof(inbuf), NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
spin_lock_bh(&efx->filter_lock);
- if (rc == 0) {
+ if ((rc == 0) || (rc == -ENOENT)) {
+ /* Filter removed OK or didn't actually exist */
kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
+ MC_CMD_FILTER_OP_IN_LEN,
+ NULL, 0, rc);
}
}
@@ -4002,11 +4340,6 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
filter_id, false);
}
-static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
-{
- return filter_id % HUNT_FILTER_TBL_ROWS;
-}
-
static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
@@ -4020,7 +4353,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec)
{
- unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
struct efx_ef10_filter_table *table = efx->filter_state;
const struct efx_filter_spec *saved_spec;
int rc;
@@ -4029,7 +4362,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (saved_spec && saved_spec->priority == priority &&
efx_ef10_filter_pri(table, saved_spec) ==
- filter_id / HUNT_FILTER_TBL_ROWS) {
+ efx_ef10_filter_get_unsafe_pri(filter_id)) {
*spec = *saved_spec;
rc = 0;
} else {
@@ -4081,7 +4414,7 @@ static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
+ return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
}
static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
@@ -4101,8 +4434,9 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
count = -EMSGSIZE;
break;
}
- buf[count++] = (efx_ef10_filter_pri(table, spec) *
- HUNT_FILTER_TBL_ROWS +
+ buf[count++] =
+ efx_ef10_make_filter_id(
+ efx_ef10_filter_pri(table, spec),
filter_idx);
}
}
@@ -4305,29 +4639,54 @@ efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
#endif /* CONFIG_RFS_ACCEL */
-static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
{
int match_flags = 0;
-#define MAP_FLAG(gen_flag, mcdi_field) { \
+#define MAP_FLAG(gen_flag, mcdi_field) do { \
u32 old_mcdi_flags = mcdi_flags; \
- mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN); \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
if (mcdi_flags != old_mcdi_flags) \
match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ } while (0)
+
+ if (encap) {
+ /* encap filters must specify encap type */
+ match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ /* and imply ethertype and ip proto */
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ /* VLAN tags refer to the outer packet */
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ /* everything else refers to the inner packet */
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, IFRM_SRC_IP);
+ MAP_FLAG(LOC_HOST, IFRM_DST_IP);
+ MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
+ MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
+ MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
+ MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
+ MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
+ MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
+ } else {
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
}
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, SRC_IP);
- MAP_FLAG(LOC_HOST, DST_IP);
- MAP_FLAG(REM_MAC, SRC_MAC);
- MAP_FLAG(REM_PORT, SRC_PORT);
- MAP_FLAG(LOC_MAC, DST_MAC);
- MAP_FLAG(LOC_PORT, DST_PORT);
- MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- MAP_FLAG(IP_PROTO, IP_PROTO);
#undef MAP_FLAG
/* Did we map them all? */
@@ -4354,6 +4713,7 @@ static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
}
static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
+ bool encap,
enum efx_filter_match_flags match_flags)
{
unsigned int match_pri;
@@ -4362,7 +4722,7 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
for (match_pri = 0;
match_pri < table->rx_match_count;
match_pri++) {
- mf = efx_ef10_filter_match_flags_from_mcdi(
+ mf = efx_ef10_filter_match_flags_from_mcdi(encap,
table->rx_match_mcdi_flags[match_pri]);
if (mf == match_flags)
return true;
@@ -4371,39 +4731,30 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
return false;
}
-static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+static int
+efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
+ struct efx_ef10_filter_table *table,
+ bool encap)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
unsigned int pd_match_pri, pd_match_count;
- struct efx_ef10_filter_table *table;
- struct efx_ef10_vlan *vlan;
size_t outlen;
int rc;
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- if (efx->filter_state) /* already probed */
- return 0;
-
- table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
/* Find out which RX filter types are supported, and their priorities */
MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
&outlen);
if (rc)
- goto fail;
+ return rc;
+
pd_match_count = MCDI_VAR_ARRAY_LEN(
outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
- table->rx_match_count = 0;
for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
u32 mcdi_flags =
@@ -4411,7 +4762,7 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
outbuf,
GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
pd_match_pri);
- rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+ rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
if (rc < 0) {
netif_dbg(efx, probe, efx->net_dev,
"%s: fw flags %#x pri %u not supported in driver\n",
@@ -4426,10 +4777,40 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
}
}
+ return 0;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_ef10_filter_table *table;
+ struct efx_ef10_vlan *vlan;
+ int rc;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ table->rx_match_count = 0;
+ rc = efx_ef10_filter_table_probe_matches(efx, table, false);
+ if (rc)
+ goto fail;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ rc = efx_ef10_filter_table_probe_matches(efx, table, true);
+ if (rc)
+ goto fail;
if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(efx_ef10_filter_match_supported(table,
+ !(efx_ef10_filter_match_supported(table, false,
(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
- efx_ef10_filter_match_supported(table,
+ efx_ef10_filter_match_supported(table, false,
(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
netif_info(efx, probe, net_dev,
"VLAN filters are not supported in this firmware variant\n");
@@ -4475,10 +4856,13 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int invalid_filters = 0, failed = 0;
+ struct efx_ef10_filter_vlan *vlan;
struct efx_filter_spec *spec;
unsigned int filter_idx;
- bool failed = false;
- int rc;
+ u32 mcdi_flags;
+ int match_pri;
+ int rc, i;
WARN_ON(!rwsem_is_locked(&efx->filter_sem));
@@ -4495,6 +4879,20 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
if (!spec)
continue;
+ mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
+ match_pri = 0;
+ while (match_pri < table->rx_match_count &&
+ table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
+ ++match_pri;
+ if (match_pri >= table->rx_match_count) {
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+ spec->rss_context != nic_data->rx_rss_context)
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with specific RSS context.\n");
+
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
@@ -4502,10 +4900,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
&table->entry[filter_idx].handle,
false);
if (rc)
- failed = true;
-
+ failed++;
spin_lock_bh(&efx->filter_lock);
+
if (rc) {
+not_restored:
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
+ if (vlan->default_filters[i] == filter_idx)
+ vlan->default_filters[i] =
+ EFX_EF10_FILTER_ID_INVALID;
+
kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
} else {
@@ -4516,9 +4921,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
spin_unlock_bh(&efx->filter_lock);
+ /* This can happen validly if the MC's capabilities have changed, so
+ * is not an error.
+ */
+ if (invalid_filters)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Did not restore %u filters that are now unsupported.\n",
+ invalid_filters);
+
if (failed)
netif_err(efx, hw, efx->net_dev,
- "unable to restore all filters\n");
+ "unable to restore %u filters\n", failed);
else
nic_data->must_restore_filters = false;
}
@@ -4575,7 +4988,7 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
unsigned int filter_idx;
if (*id != EFX_EF10_FILTER_ID_INVALID) {
- filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
+ filter_idx = efx_ef10_filter_get_unsafe_id(*id);
if (!table->entry[filter_idx].spec)
netif_dbg(efx, drv, efx->net_dev,
"marked null spec old %04x:%04x\n", *id,
@@ -4596,9 +5009,8 @@ static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
for (i = 0; i < table->dev_mc_count; i++)
efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
- efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
- efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
- efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
}
/* Mark old filters that may need to be removed.
@@ -4711,11 +5123,13 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
rc = EFX_EF10_FILTER_ID_INVALID;
}
}
- ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
+ ids[i] = efx_ef10_filter_get_unsafe_id(rc);
}
if (multicast && rollback) {
/* Also need an Ethernet broadcast filter */
+ EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, vlan->vid, baddr);
@@ -4732,9 +5146,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
}
return rc;
} else {
- EFX_WARN_ON_PARANOID(vlan->bcast !=
- EFX_EF10_FILTER_ID_INVALID);
- vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_ef10_filter_get_unsafe_id(rc);
}
}
@@ -4743,6 +5156,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
static int efx_ef10_filter_insert_def(struct efx_nic *efx,
struct efx_ef10_filter_vlan *vlan,
+ enum efx_encap_type encap_type,
bool multicast, bool rollback)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4750,6 +5164,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
int rc;
+ u16 *id;
filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
@@ -4760,19 +5175,75 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
else
efx_filter_set_uc_def(&spec);
+ if (encap_type) {
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ efx_filter_set_encap_type(&spec, encap_type);
+ else
+ /* don't insert encap filters on non-supporting
+ * platforms. ID will be left as INVALID.
+ */
+ return 0;
+ }
+
if (vlan->vid != EFX_FILTER_VID_UNSPEC)
efx_filter_set_eth_local(&spec, vlan->vid, NULL);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
- efx->net_dev,
- "%scast mismatch filter insert failed rc=%d\n",
- multicast ? "Multi" : "Uni", rc);
+ const char *um = multicast ? "Multicast" : "Unicast";
+ const char *encap_name = "";
+ const char *encap_ipv = "";
+
+ if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_VXLAN)
+ encap_name = "VXLAN ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_NVGRE)
+ encap_name = "NVGRE ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_GENEVE)
+ encap_name = "GENEVE ";
+ if (encap_type & EFX_ENCAP_FLAG_IPV6)
+ encap_ipv = "IPv6 ";
+ else if (encap_type)
+ encap_ipv = "IPv4 ";
+
+ /* unprivileged functions can't insert mismatch filters
+ * for encapsulated or unicast traffic, so downgrade
+ * those warnings to debug.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ rc == -EPERM && (encap_type || !multicast), warn,
+ "%s%s%s mismatch filter insert failed rc=%d\n",
+ encap_name, encap_ipv, um, rc);
} else if (multicast) {
- EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
- vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
- if (!nic_data->workaround_26807) {
+ /* mapping from encap types to default filter IDs (multicast) */
+ static enum efx_ef10_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_MCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = efx_ef10_filter_get_unsafe_id(rc);
+ if (!nic_data->workaround_26807 && !encap_type) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
@@ -4787,20 +5258,44 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
/* Roll back the mc_def filter */
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- vlan->mcdef);
- vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+ *id);
+ *id = EFX_EF10_FILTER_ID_INVALID;
return rc;
}
} else {
- EFX_WARN_ON_PARANOID(vlan->bcast !=
- EFX_EF10_FILTER_ID_INVALID);
- vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(
+ vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_ef10_filter_get_unsafe_id(rc);
}
}
rc = 0;
} else {
- EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
- vlan->ucdef = rc;
+ /* mapping from encap types to default filter IDs (unicast) */
+ static enum efx_ef10_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_UCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = rc;
rc = 0;
}
return rc;
@@ -4894,7 +5389,7 @@ restore_filters:
if (rc2)
goto reset_nic;
- netif_device_attach(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
return rc;
@@ -4923,7 +5418,8 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* Insert/renew unicast filters */
if (table->uc_promisc) {
- efx_ef10_filter_insert_def(efx, vlan, false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
+ false, false);
efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
} else {
/* If any of the filters failed to insert, fall back to
@@ -4931,8 +5427,25 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
* our individual unicast filters.
*/
if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
- efx_ef10_filter_insert_def(efx, vlan, false, false);
+ efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ false, false);
}
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
/* Insert/renew multicast filters */
/* If changing promiscuous state with cascaded multicast filters, remove
@@ -4946,7 +5459,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
*/
- if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
+ if (efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true)) {
/* Changing promisc state, so remove old filters */
efx_ef10_filter_remove_old(efx);
efx_ef10_filter_insert_addr_list(efx, vlan,
@@ -4956,7 +5471,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* If we failed to insert promiscuous filters, don't
* rollback. Regardless, also insert the mc_list
*/
- efx_ef10_filter_insert_def(efx, vlan, true, false);
+ efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, false);
efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
}
} else {
@@ -4969,11 +5486,28 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* Changing promisc state, so remove old filters */
if (nic_data->workaround_26807)
efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, vlan, true, true))
+ if (efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true))
efx_ef10_filter_insert_addr_list(efx, vlan,
true, false);
}
}
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
}
/* Caller must hold efx->filter_sem for read if race against
@@ -5060,9 +5594,8 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
- vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
- vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
- vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
list_add_tail(&vlan->list, &table->vlan_list);
@@ -5089,9 +5622,10 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
vlan->mc[i]);
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->default_filters[i]);
kfree(vlan);
}
@@ -5141,7 +5675,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
if (was_enabled)
efx_net_open(efx->net_dev);
- netif_device_attach(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
#ifdef CONFIG_SFC_SRIOV
if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
@@ -5540,6 +6074,20 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
+
static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
{
if (proto != htons(ETH_P_8021Q))
@@ -5556,6 +6104,271 @@ static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
return efx_ef10_del_vlan(efx, vid);
}
+/* We rely on the MCDI wiping out our TX rings if it made any changes to the
+ * ports table, ensuring that any TSO descriptors that were made on a now-
+ * removed tunnel port will be blown away and won't break things when we try
+ * to transmit them using the new ports table.
+ */
+static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
+ bool will_reset = false;
+ size_t num_entries = 0;
+ size_t inlen, outlen;
+ size_t i;
+ int rc;
+ efx_dword_t flags_and_num_entries;
+
+ WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
+
+ nic_data->udp_tunnels_dirty = false;
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
+ efx_device_attach_if_not_resetting(efx);
+ return 0;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
+
+ for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
+ if (nic_data->udp_tunnels[i].count &&
+ nic_data->udp_tunnels[i].port) {
+ efx_dword_t entry;
+
+ EFX_POPULATE_DWORD_2(entry,
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
+ ntohs(nic_data->udp_tunnels[i].port),
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
+ nic_data->udp_tunnels[i].type);
+ *_MCDI_ARRAY_DWORD(inbuf,
+ SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
+ num_entries++) = entry;
+ }
+ }
+
+ BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
+ EFX_WORD_1_LBN);
+ BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
+ EFX_WORD_1_WIDTH);
+ EFX_POPULATE_DWORD_2(flags_and_num_entries,
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
+ !!unloading,
+ EFX_WORD_1, num_entries);
+ *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
+ flags_and_num_entries;
+
+ inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
+ inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
+ if (rc == -EIO) {
+ /* Most likely the MC rebooted due to another function also
+ * setting its tunnel port list. Mark the tunnel port list as
+ * dirty, so it will be pushed upon coming up from the reboot.
+ */
+ nic_data->udp_tunnels_dirty = true;
+ return 0;
+ }
+
+ if (rc) {
+ /* expected not available on unprivileged functions */
+ if (rc != -EPERM)
+ netif_warn(efx, drv, efx->net_dev,
+ "Unable to set UDP tunnel ports; rc=%d.\n", rc);
+ } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
+ (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
+ netif_info(efx, drv, efx->net_dev,
+ "Rebooting MC due to UDP tunnel port list change\n");
+ will_reset = true;
+ if (unloading)
+ /* Delay for the MC reset to complete. This will make
+ * unloading other functions a bit smoother. This is a
+ * race, but the other unload will work whichever way
+ * it goes, this just avoids an unnecessary error
+ * message.
+ */
+ msleep(100);
+ }
+ if (!will_reset && !unloading) {
+ /* The caller will have detached, relying on the MC reset to
+ * trigger a re-attach. Since there won't be an MC reset, we
+ * have to do the attach ourselves.
+ */
+ efx_device_attach_if_not_resetting(efx);
+ }
+
+ return rc;
+}
+
+static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = 0;
+
+ mutex_lock(&nic_data->udp_tunnels_lock);
+ if (nic_data->udp_tunnels_dirty) {
+ /* Make sure all TX are stopped while we modify the table, else
+ * we might race against an efx_features_check().
+ */
+ efx_device_detach_sync(efx);
+ rc = efx_ef10_set_udp_tnl_ports(efx, false);
+ }
+ mutex_unlock(&nic_data->udp_tunnels_lock);
+ return rc;
+}
+
+static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
+ __be16 port)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
+ if (!nic_data->udp_tunnels[i].count)
+ continue;
+ if (nic_data->udp_tunnels[i].port == port)
+ return &nic_data->udp_tunnels[i];
+ }
+ return NULL;
+}
+
+static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
+ struct efx_udp_tunnel tnl)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_udp_tunnel *match;
+ char typebuf[8];
+ size_t i;
+ int rc;
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
+ return 0;
+
+ efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
+ netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
+ typebuf, ntohs(tnl.port));
+
+ mutex_lock(&nic_data->udp_tunnels_lock);
+ /* Make sure all TX are stopped while we add to the table, else we
+ * might race against an efx_features_check().
+ */
+ efx_device_detach_sync(efx);
+
+ match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
+ if (match != NULL) {
+ if (match->type == tnl.type) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Referencing existing tunnel entry\n");
+ match->count++;
+ /* No need to cause an MCDI update */
+ rc = 0;
+ goto unlock_out;
+ }
+ efx_get_udp_tunnel_type_name(match->type,
+ typebuf, sizeof(typebuf));
+ netif_dbg(efx, drv, efx->net_dev,
+ "UDP port %d is already in use by %s\n",
+ ntohs(tnl.port), typebuf);
+ rc = -EEXIST;
+ goto unlock_out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
+ if (!nic_data->udp_tunnels[i].count) {
+ nic_data->udp_tunnels[i] = tnl;
+ nic_data->udp_tunnels[i].count = 1;
+ rc = efx_ef10_set_udp_tnl_ports(efx, false);
+ goto unlock_out;
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
+ typebuf, ntohs(tnl.port));
+
+ rc = -ENOMEM;
+
+unlock_out:
+ mutex_unlock(&nic_data->udp_tunnels_lock);
+ return rc;
+}
+
+/* Called under the TX lock with the TX queue running, hence no-one can be
+ * in the middle of updating the UDP tunnels table. However, they could
+ * have tried and failed the MCDI, in which case they'll have set the dirty
+ * flag before dropping their locks.
+ */
+static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
+ return false;
+
+ if (nic_data->udp_tunnels_dirty)
+ /* SW table may not match HW state, so just assume we can't
+ * use any UDP tunnel offloads.
+ */
+ return false;
+
+ return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
+}
+
+static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
+ struct efx_udp_tunnel tnl)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_udp_tunnel *match;
+ char typebuf[8];
+ int rc;
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
+ return 0;
+
+ efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
+ netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
+ typebuf, ntohs(tnl.port));
+
+ mutex_lock(&nic_data->udp_tunnels_lock);
+ /* Make sure all TX are stopped while we remove from the table, else we
+ * might race against an efx_features_check().
+ */
+ efx_device_detach_sync(efx);
+
+ match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
+ if (match != NULL) {
+ if (match->type == tnl.type) {
+ if (--match->count) {
+ /* Port is still in use, so nothing to do */
+ netif_dbg(efx, drv, efx->net_dev,
+ "UDP tunnel port %d remains active\n",
+ ntohs(tnl.port));
+ rc = 0;
+ goto out_unlock;
+ }
+ rc = efx_ef10_set_udp_tnl_ports(efx, false);
+ goto out_unlock;
+ }
+ efx_get_udp_tunnel_type_name(match->type,
+ typebuf, sizeof(typebuf));
+ netif_warn(efx, drv, efx->net_dev,
+ "UDP port %d is actually in use by %s, not removing\n",
+ ntohs(tnl.port), typebuf);
+ }
+ rc = -ENOENT;
+
+out_unlock:
+ mutex_unlock(&nic_data->udp_tunnels_lock);
+ return rc;
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -5609,6 +6422,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -5647,11 +6461,11 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.vswitching_probe = efx_ef10_vswitching_probe_vf,
.vswitching_restore = efx_ef10_vswitching_restore_vf,
.vswitching_remove = efx_ef10_vswitching_remove_vf,
- .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
#endif
.get_mac_address = efx_ef10_get_mac_address_vf,
.set_mac_address = efx_ef10_set_mac_address,
+ .get_phys_port_id = efx_ef10_get_phys_port_id,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
@@ -5659,6 +6473,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
@@ -5666,6 +6481,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
+ .rx_hash_key_size = 40,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -5716,6 +6532,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -5756,6 +6573,10 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
.vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
.vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
+ .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
+ .udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
+ .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
+ .udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
@@ -5776,6 +6597,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.set_mac_address = efx_ef10_set_mac_address,
.tso_versions = efx_ef10_tso_versions,
+ .get_phys_port_id = efx_ef10_get_phys_port_id,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
@@ -5783,6 +6605,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
+ .option_descriptors = true,
+ .min_interrupt_mode = EFX_INT_MODE_LEGACY,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
@@ -5790,4 +6614,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
+ .rx_hash_key_size = 40,
};
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index a949b9d27329..b7e4345c990d 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -6,6 +6,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/module.h>
#include "net_driver.h"
@@ -548,13 +549,13 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
vf->efx->type->filter_table_probe(vf->efx);
up_write(&vf->efx->filter_sem);
efx_net_open(vf->efx->net_dev);
- netif_device_attach(vf->efx->net_dev);
+ efx_device_attach_if_not_resetting(vf->efx);
}
return 0;
fail:
- memset(vf->mac, 0, ETH_ALEN);
+ eth_zero_addr(vf->mac);
return rc;
}
@@ -666,7 +667,7 @@ restore_filters:
if (rc2)
goto reset_nic;
- netif_device_attach(vf->efx->net_dev);
+ efx_device_attach_if_not_resetting(vf->efx);
}
return rc;
@@ -760,17 +761,3 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
return 0;
}
-
-int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
- struct netdev_phys_item_id *ppid)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- if (!is_valid_ether_addr(nic_data->port_id))
- return -EOPNOTSUPP;
-
- ppid->id_len = ETH_ALEN;
- memcpy(ppid->id, nic_data->port_id, ppid->id_len);
-
- return 0;
-}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index 9ceb7ef0a210..2aa444ed42de 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -56,9 +56,6 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
int link_state);
-int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
- struct netdev_phys_item_id *ppid);
-
int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 5a5dcad8c49a..334bcc6df6b2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -23,12 +23,15 @@
#include <linux/aer.h>
#include <linux/interrupt.h>
#include "net_driver.h"
+#include <net/gre.h>
+#include <net/udp_tunnel.h>
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "sriov.h"
#include "mcdi.h"
+#include "mcdi_pcol.h"
#include "workarounds.h"
/**************************************************************************
@@ -88,6 +91,21 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
};
+/* UDP tunnel type names */
+static const char *const efx_udp_tunnel_type_names[] = {
+ [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
+ [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
+};
+
+void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
+{
+ if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
+ efx_udp_tunnel_type_names[type] != NULL)
+ snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
+ else
+ snprintf(buf, buflen, "type %d", type);
+}
+
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -308,9 +326,6 @@ static int efx_poll(struct napi_struct *napi, int budget)
struct efx_nic *efx = channel->efx;
int spent;
- if (!efx_channel_lock_napi(channel))
- return budget;
-
netif_vdbg(efx, intr, efx->net_dev,
"channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
@@ -331,11 +346,10 @@ static int efx_poll(struct napi_struct *napi, int budget)
* since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
- napi_complete(napi);
- efx_nic_eventq_read_ack(channel);
+ if (napi_complete_done(napi, spent))
+ efx_nic_eventq_read_ack(channel);
}
- efx_channel_unlock_napi(channel);
return spent;
}
@@ -391,7 +405,6 @@ void efx_start_eventq(struct efx_channel *channel)
channel->enabled = true;
smp_wmb();
- efx_channel_enable(channel);
napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
}
@@ -403,8 +416,6 @@ void efx_stop_eventq(struct efx_channel *channel)
return;
napi_disable(&channel->napi_str);
- while (!efx_channel_disable(channel))
- usleep_range(1000, 20000);
channel->enabled = false;
}
@@ -865,7 +876,7 @@ out:
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
} else {
efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
}
return rc;
@@ -1409,9 +1420,12 @@ static int efx_probe_interrupts(struct efx_nic *efx)
xentries, 1, n_channels);
if (rc < 0) {
/* Fall back to single channel MSI */
- efx->interrupt_mode = EFX_INT_MODE_MSI;
netif_err(efx, drv, efx->net_dev,
"could not enable MSI-X\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ else
+ return rc;
} else if (rc < n_channels) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
@@ -1454,7 +1468,10 @@ static int efx_probe_interrupts(struct efx_nic *efx)
} else {
netif_err(efx, drv, efx->net_dev,
"could not enable MSI\n");
- efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ else
+ return rc;
}
}
@@ -2088,7 +2105,6 @@ static void efx_init_napi_channel(struct efx_channel *channel)
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
- efx_channel_busy_poll_init(channel);
}
static void efx_init_napi(struct efx_nic *efx)
@@ -2138,37 +2154,6 @@ static void efx_netpoll(struct net_device *net_dev)
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int efx_busy_poll(struct napi_struct *napi)
-{
- struct efx_channel *channel =
- container_of(napi, struct efx_channel, napi_str);
- struct efx_nic *efx = channel->efx;
- int budget = 4;
- int old_rx_packets, rx_packets;
-
- if (!netif_running(efx->net_dev))
- return LL_FLUSH_FAILED;
-
- if (!efx_channel_try_lock_poll(channel))
- return LL_FLUSH_BUSY;
-
- old_rx_packets = channel->rx_queue.rx_packets;
- efx_process_channel(channel, budget);
-
- rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
-
- /* There is no race condition with NAPI here.
- * NAPI will automatically be rescheduled if it yielded during busy
- * polling, because it was not able to take the lock and thus returned
- * the full budget.
- */
- efx_channel_unlock_poll(channel);
-
- return rx_packets;
-}
-#endif
-
/**************************************************************************
*
* Kernel net device interface
@@ -2197,6 +2182,8 @@ int efx_net_open(struct net_device *net_dev)
efx_link_status_changed(efx);
efx_start_all(efx);
+ if (efx->state == STATE_DISABLED || efx->reset_pending)
+ netif_device_detach(efx->net_dev);
efx_selftest_async_start(efx);
return 0;
}
@@ -2219,16 +2206,14 @@ int efx_net_stop(struct net_device *net_dev)
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
+static void efx_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
-
- return stats;
}
/* Context: netif_tx_lock held, BHs disabled. */
@@ -2265,7 +2250,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
return 0;
}
@@ -2336,6 +2321,27 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
+static int efx_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->get_phys_port_id)
+ return efx->type->get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int efx_get_phys_port_name(struct net_device *net_dev,
+ char *name, size_t len)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (snprintf(name, len, "p%u", efx->port_num) >= len)
+ return -EINVAL;
+ return 0;
+}
+
static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2356,6 +2362,52 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
return -EOPNOTSUPP;
}
+static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
+{
+ switch (in) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
+ default:
+ return -1;
+ }
+}
+
+static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+ struct efx_udp_tunnel tnl;
+ int efx_tunnel_type;
+
+ efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
+ if (efx_tunnel_type < 0)
+ return;
+
+ tnl.type = (u16)efx_tunnel_type;
+ tnl.port = ti->port;
+
+ if (efx->type->udp_tnl_add_port)
+ (void)efx->type->udp_tnl_add_port(efx, tnl);
+}
+
+static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+ struct efx_udp_tunnel tnl;
+ int efx_tunnel_type;
+
+ efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
+ if (efx_tunnel_type < 0)
+ return;
+
+ tnl.type = (u16)efx_tunnel_type;
+ tnl.port = ti->port;
+
+ if (efx->type->udp_tnl_add_port)
+ (void)efx->type->udp_tnl_del_port(efx, tnl);
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -2376,18 +2428,18 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
.ndo_get_vf_config = efx_sriov_get_vf_config,
.ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
- .ndo_get_phys_port_id = efx_sriov_get_phys_port_id,
#endif
+ .ndo_get_phys_port_id = efx_get_phys_port_id,
+ .ndo_get_phys_port_name = efx_get_phys_port_name,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
.ndo_setup_tc = efx_setup_tc,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = efx_busy_poll,
-#endif
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
+ .ndo_udp_tunnel_add = efx_udp_tunnel_add,
+ .ndo_udp_tunnel_del = efx_udp_tunnel_del,
};
static void efx_update_name(struct efx_nic *efx)
@@ -2627,6 +2679,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
efx_start_all(efx);
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
return 0;
fail:
@@ -2691,7 +2746,7 @@ out:
efx->state = STATE_DISABLED;
} else {
netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
- netif_device_attach(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
}
return rc;
}
@@ -2888,7 +2943,7 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
static int efx_init_struct(struct efx_nic *efx,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
- int i;
+ int rc = -ENOMEM, i;
/* Initialise common structures */
INIT_LIST_HEAD(&efx->node);
@@ -2929,8 +2984,15 @@ static int efx_init_struct(struct efx_nic *efx,
}
/* Higher numbered interrupt modes are less capable! */
+ if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
+ efx->type->min_interrupt_mode)) {
+ rc = -EIO;
+ goto fail;
+ }
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
+ efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+ interrupt_mode);
/* Would be good to use the net_dev name, but we're too early */
snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
@@ -2943,7 +3005,7 @@ static int efx_init_struct(struct efx_nic *efx,
fail:
efx_fini_struct(efx);
- return -ENOMEM;
+ return rc;
}
static void efx_fini_struct(struct efx_nic *efx)
@@ -3158,6 +3220,51 @@ static int efx_pci_probe_main(struct efx_nic *efx)
return rc;
}
+static int efx_pci_probe_post_io(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ int rc = efx_pci_probe_main(efx);
+
+ if (rc)
+ return rc;
+
+ if (efx->type->sriov_init) {
+ rc = efx->type->sriov_init(efx);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "SR-IOV can't be enabled rc %d\n", rc);
+ }
+
+ /* Determine netdevice features */
+ net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_RXCSUM);
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+ net_dev->features |= NETIF_F_TSO6;
+ /* Check whether device supports TSO */
+ if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+ net_dev->features &= ~NETIF_F_ALL_TSO;
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+
+ net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+
+ /* Disable VLAN filtering by default. It may be enforced if
+ * the feature is fixed (i.e. VLAN filters are required to
+ * receive VLAN tagged packets due to vPort restrictions).
+ */
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->features |= efx->fixed_features;
+
+ rc = efx_register_netdev(efx);
+ if (!rc)
+ return 0;
+
+ efx_pci_remove_main(efx);
+ return rc;
+}
+
/* NIC initialisation
*
* This is called at module load (or hotplug insertion,
@@ -3200,42 +3307,28 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail2;
- rc = efx_pci_probe_main(efx);
+ rc = efx_pci_probe_post_io(efx);
+ if (rc) {
+ /* On failure, retry once immediately.
+ * If we aborted probe due to a scheduled reset, dismiss it.
+ */
+ efx->reset_pending = 0;
+ rc = efx_pci_probe_post_io(efx);
+ if (rc) {
+ /* On another failure, retry once more
+ * after a 50-305ms delay.
+ */
+ unsigned char r;
+
+ get_random_bytes(&r, 1);
+ msleep((unsigned int)r + 50);
+ efx->reset_pending = 0;
+ rc = efx_pci_probe_post_io(efx);
+ }
+ }
if (rc)
goto fail3;
- net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_RXCSUM);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
- net_dev->features |= NETIF_F_TSO6;
- /* Check whether device supports TSO */
- if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
- net_dev->features &= ~NETIF_F_ALL_TSO;
- /* Mask for features that also apply to VLAN devices */
- net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
- NETIF_F_RXCSUM);
-
- net_dev->hw_features = net_dev->features & ~efx->fixed_features;
-
- /* Disable VLAN filtering by default. It may be enforced if
- * the feature is fixed (i.e. VLAN filters are required to
- * receive VLAN tagged packets due to vPort restrictions).
- */
- net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- net_dev->features |= efx->fixed_features;
-
- rc = efx_register_netdev(efx);
- if (rc)
- goto fail4;
-
- if (efx->type->sriov_init) {
- rc = efx->type->sriov_init(efx);
- if (rc)
- netif_err(efx, probe, efx->net_dev,
- "SR-IOV can't be enabled rc %d\n", rc);
- }
-
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
/* Try to create MTDs, but allow this to fail */
@@ -3252,10 +3345,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
"PCIE error reporting unavailable (%d).\n",
rc);
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
return 0;
- fail4:
- efx_pci_remove_main(efx);
fail3:
efx_fini_io(efx);
fail2:
@@ -3325,7 +3419,7 @@ static int efx_pm_thaw(struct device *dev)
efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
efx->state = STATE_READY;
@@ -3585,3 +3679,4 @@ MODULE_AUTHOR("Solarflare Communications and "
MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
+MODULE_VERSION(EFX_DRIVER_VERSION);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 342ae16e1f2d..ee14662415c5 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -276,6 +276,12 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
netif_tx_unlock_bh(dev);
}
+static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
+{
+ if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
+ netif_device_attach(efx->net_dev);
+}
+
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
{
if (WARN_ON(down_read_trylock(sem))) {
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 18ebaea44e82..3747b5644110 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -77,6 +77,11 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
@@ -1278,15 +1283,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table);
}
+static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->type->rx_hash_key_size;
+}
+
static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->rx_pull_rss_config(efx);
+ if (rc)
+ return rc;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ if (key)
+ memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size);
return 0;
}
@@ -1295,14 +1314,18 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
{
struct efx_nic *efx = netdev_priv(net_dev);
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ /* Hash function is Toeplitz, cannot be changed */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
+ if (!indir && !key)
return 0;
- return efx->type->rx_push_rss_config(efx, true, indir);
+ if (!key)
+ key = efx->rx_hash_key;
+ if (!indir)
+ indir = efx->rx_indir_table;
+
+ return efx->type->rx_push_rss_config(efx, true, indir, key);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
@@ -1377,6 +1400,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
+ .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
.get_ts_info = efx_ethtool_get_ts_info,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 5c5cb3c4c12e..f5e5cd1659a1 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -304,9 +304,6 @@ static int ef4_poll(struct napi_struct *napi, int budget)
struct ef4_nic *efx = channel->efx;
int spent;
- if (!ef4_channel_lock_napi(channel))
- return budget;
-
netif_vdbg(efx, intr, efx->net_dev,
"channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
@@ -327,11 +324,10 @@ static int ef4_poll(struct napi_struct *napi, int budget)
* since ef4_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
- napi_complete(napi);
+ napi_complete_done(napi, spent);
ef4_nic_eventq_read_ack(channel);
}
- ef4_channel_unlock_napi(channel);
return spent;
}
@@ -387,7 +383,6 @@ void ef4_start_eventq(struct ef4_channel *channel)
channel->enabled = true;
smp_wmb();
- ef4_channel_enable(channel);
napi_enable(&channel->napi_str);
ef4_nic_eventq_read_ack(channel);
}
@@ -399,8 +394,6 @@ void ef4_stop_eventq(struct ef4_channel *channel)
return;
napi_disable(&channel->napi_str);
- while (!ef4_channel_disable(channel))
- usleep_range(1000, 20000);
channel->enabled = false;
}
@@ -986,7 +979,7 @@ void ef4_mac_reconfigure(struct ef4_nic *efx)
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
- * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
* through ef4_monitor().
*
* Callers must hold the mac_lock
@@ -2029,7 +2022,6 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str,
ef4_poll, napi_weight);
- ef4_channel_busy_poll_init(channel);
}
static void ef4_init_napi(struct ef4_nic *efx)
@@ -2079,37 +2071,6 @@ static void ef4_netpoll(struct net_device *net_dev)
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int ef4_busy_poll(struct napi_struct *napi)
-{
- struct ef4_channel *channel =
- container_of(napi, struct ef4_channel, napi_str);
- struct ef4_nic *efx = channel->efx;
- int budget = 4;
- int old_rx_packets, rx_packets;
-
- if (!netif_running(efx->net_dev))
- return LL_FLUSH_FAILED;
-
- if (!ef4_channel_try_lock_poll(channel))
- return LL_FLUSH_BUSY;
-
- old_rx_packets = channel->rx_queue.rx_packets;
- ef4_process_channel(channel, budget);
-
- rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
-
- /* There is no race condition with NAPI here.
- * NAPI will automatically be rescheduled if it yielded during busy
- * polling, because it was not able to take the lock and thus returned
- * the full budget.
- */
- ef4_channel_unlock_poll(channel);
-
- return rx_packets;
-}
-#endif
-
/**************************************************************************
*
* Kernel net device interface
@@ -2158,16 +2119,14 @@ int ef4_net_stop(struct net_device *net_dev)
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct rtnl_link_stats64 *ef4_net_stats(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
+static void ef4_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
{
struct ef4_nic *efx = netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
-
- return stats;
}
/* Context: netif_tx_lock held, BHs disabled. */
@@ -2291,9 +2250,6 @@ static const struct net_device_ops ef4_netdev_ops = {
.ndo_poll_controller = ef4_netpoll,
#endif
.ndo_setup_tc = ef4_setup_tc,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = ef4_busy_poll,
-#endif
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = ef4_filter_rfs,
#endif
@@ -3348,3 +3304,4 @@ MODULE_AUTHOR("Solarflare Communications and "
MODULE_DESCRIPTION("Solarflare Falcon network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ef4_pci_table);
+MODULE_VERSION(EF4_DRIVER_VERSION);
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 8e1929b01a32..56049157a5af 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -115,44 +115,47 @@ static int ef4_ethtool_phys_id(struct net_device *net_dev,
}
/* This must be called with rtnl_lock held. */
-static int ef4_ethtool_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+ef4_ethtool_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_link_state *link_state = &efx->link_state;
mutex_lock(&efx->mac_lock);
- efx->phy_op->get_settings(efx, ecmd);
+ efx->phy_op->get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
- ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
if (LOOPBACK_INTERNAL(efx)) {
- ethtool_cmd_speed_set(ecmd, link_state->speed);
- ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.speed = link_state->speed;
+ cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/* This must be called with rtnl_lock held. */
-static int ef4_ethtool_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+ef4_ethtool_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ef4_nic *efx = netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
- if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
- (ecmd->duplex != DUPLEX_FULL)) {
+ if ((cmd->base.speed == SPEED_1000) &&
+ (cmd->base.duplex != DUPLEX_FULL)) {
netif_dbg(efx, drv, efx->net_dev,
"rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
}
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_settings(efx, ecmd);
+ rc = efx->phy_op->set_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -1310,8 +1313,6 @@ static int ef4_ethtool_get_module_info(struct net_device *net_dev,
}
const struct ethtool_ops ef4_ethtool_ops = {
- .get_settings = ef4_ethtool_get_settings,
- .set_settings = ef4_ethtool_set_settings,
.get_drvinfo = ef4_ethtool_get_drvinfo,
.get_regs_len = ef4_ethtool_get_regs_len,
.get_regs = ef4_ethtool_get_regs,
@@ -1340,4 +1341,6 @@ const struct ethtool_ops ef4_ethtool_ops = {
.set_rxfh = ef4_ethtool_set_rxfh,
.get_module_info = ef4_ethtool_get_module_info,
.get_module_eeprom = ef4_ethtool_get_module_eeprom,
+ .get_link_ksettings = ef4_ethtool_get_link_ksettings,
+ .set_link_ksettings = ef4_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.c b/drivers/net/ethernet/sfc/falcon/mdio_10g.c
index e7d7c09296aa..ee0713f03d01 100644
--- a/drivers/net/ethernet/sfc/falcon/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.c
@@ -226,33 +226,45 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx,
}
/**
- * ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO.
+ * ef4_mdio_set_link_ksettings - Set (some of) the PHY settings over MDIO.
* @efx: Efx NIC
- * @ecmd: New settings
+ * @cmd: New settings
*/
-int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+int ef4_mdio_set_link_ksettings(struct ef4_nic *efx,
+ const struct ethtool_link_ksettings *cmd)
{
- struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
-
- efx->phy_op->get_settings(efx, &prev);
-
- if (ecmd->advertising == prev.advertising &&
- ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
- ecmd->duplex == prev.duplex &&
- ecmd->port == prev.port &&
- ecmd->autoneg == prev.autoneg)
+ struct ethtool_link_ksettings prev = {
+ .base.cmd = ETHTOOL_GLINKSETTINGS
+ };
+ u32 prev_advertising, advertising;
+ u32 prev_supported;
+
+ efx->phy_op->get_link_ksettings(efx, &prev);
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+ ethtool_convert_link_mode_to_legacy_u32(&prev_advertising,
+ prev.link_modes.advertising);
+ ethtool_convert_link_mode_to_legacy_u32(&prev_supported,
+ prev.link_modes.supported);
+
+ if (advertising == prev_advertising &&
+ cmd->base.speed == prev.base.speed &&
+ cmd->base.duplex == prev.base.duplex &&
+ cmd->base.port == prev.base.port &&
+ cmd->base.autoneg == prev.base.autoneg)
return 0;
/* We can only change these settings for -T PHYs */
- if (prev.port != PORT_TP || ecmd->port != PORT_TP)
+ if (prev.base.port != PORT_TP || cmd->base.port != PORT_TP)
return -EINVAL;
/* Check that PHY supports these settings */
- if (!ecmd->autoneg ||
- (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
+ if (!cmd->base.autoneg ||
+ (advertising | SUPPORTED_Autoneg) & ~prev_supported)
return -EINVAL;
- ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
+ ef4_link_set_advertising(efx, advertising | ADVERTISED_Autoneg);
ef4_mdio_an_reconfigure(efx);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.h b/drivers/net/ethernet/sfc/falcon/mdio_10g.h
index 885cf7a834a6..53cb5cc4ad37 100644
--- a/drivers/net/ethernet/sfc/falcon/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.h
@@ -83,7 +83,8 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power,
unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
-int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
+int ef4_mdio_set_link_ksettings(struct ef4_nic *efx,
+ const struct ethtool_link_ksettings *cmd);
/* Push advertising flags and restart autonegotiation */
void ef4_mdio_an_reconfigure(struct ef4_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index 210b28f7d2a1..37a8bdf32206 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -448,131 +448,6 @@ struct ef4_channel {
struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES];
};
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum ef4_channel_busy_poll_state {
- EF4_CHANNEL_STATE_IDLE = 0,
- EF4_CHANNEL_STATE_NAPI = BIT(0),
- EF4_CHANNEL_STATE_NAPI_REQ_BIT = 1,
- EF4_CHANNEL_STATE_NAPI_REQ = BIT(1),
- EF4_CHANNEL_STATE_POLL_BIT = 2,
- EF4_CHANNEL_STATE_POLL = BIT(2),
- EF4_CHANNEL_STATE_DISABLE_BIT = 3,
-};
-
-static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel)
-{
- WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE);
-}
-
-/* Called from the device poll routine to get ownership of a channel. */
-static inline bool ef4_channel_lock_napi(struct ef4_channel *channel)
-{
- unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
-
- while (1) {
- switch (old) {
- case EF4_CHANNEL_STATE_POLL:
- /* Ensure ef4_channel_try_lock_poll() wont starve us */
- set_bit(EF4_CHANNEL_STATE_NAPI_REQ_BIT,
- &channel->busy_poll_state);
- /* fallthrough */
- case EF4_CHANNEL_STATE_POLL | EF4_CHANNEL_STATE_NAPI_REQ:
- return false;
- default:
- break;
- }
- prev = cmpxchg(&channel->busy_poll_state, old,
- EF4_CHANNEL_STATE_NAPI);
- if (unlikely(prev != old)) {
- /* This is likely to mean we've just entered polling
- * state. Go back round to set the REQ bit.
- */
- old = prev;
- continue;
- }
- return true;
- }
-}
-
-static inline void ef4_channel_unlock_napi(struct ef4_channel *channel)
-{
- /* Make sure write has completed from ef4_channel_lock_napi() */
- smp_wmb();
- WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE);
-}
-
-/* Called from ef4_busy_poll(). */
-static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel)
-{
- return cmpxchg(&channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE,
- EF4_CHANNEL_STATE_POLL) == EF4_CHANNEL_STATE_IDLE;
-}
-
-static inline void ef4_channel_unlock_poll(struct ef4_channel *channel)
-{
- clear_bit_unlock(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline bool ef4_channel_busy_polling(struct ef4_channel *channel)
-{
- return test_bit(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline void ef4_channel_enable(struct ef4_channel *channel)
-{
- clear_bit_unlock(EF4_CHANNEL_STATE_DISABLE_BIT,
- &channel->busy_poll_state);
-}
-
-/* Stop further polling or napi access.
- * Returns false if the channel is currently busy polling.
- */
-static inline bool ef4_channel_disable(struct ef4_channel *channel)
-{
- set_bit(EF4_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
- /* Implicit barrier in ef4_channel_busy_polling() */
- return !ef4_channel_busy_polling(channel);
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_lock_napi(struct ef4_channel *channel)
-{
- return true;
-}
-
-static inline void ef4_channel_unlock_napi(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel)
-{
- return false;
-}
-
-static inline void ef4_channel_unlock_poll(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_busy_polling(struct ef4_channel *channel)
-{
- return false;
-}
-
-static inline void ef4_channel_enable(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_disable(struct ef4_channel *channel)
-{
- return true;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* struct ef4_msi_context - Context for each MSI
* @efx: The associated NIC
@@ -684,8 +559,8 @@ static inline bool ef4_link_state_equal(const struct ef4_link_state *left,
* @reconfigure: Reconfigure PHY (e.g. for new link parameters)
* @poll: Update @link_state and report whether it changed.
* Serialised by the mac_lock.
- * @get_settings: Get ethtool settings. Serialised by the mac_lock.
- * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
* @test_alive: Test that PHY is 'alive' (online)
@@ -700,10 +575,10 @@ struct ef4_phy_operations {
void (*remove) (struct ef4_nic *efx);
int (*reconfigure) (struct ef4_nic *efx);
bool (*poll) (struct ef4_nic *efx);
- void (*get_settings) (struct ef4_nic *efx,
- struct ethtool_cmd *ecmd);
- int (*set_settings) (struct ef4_nic *efx,
- struct ethtool_cmd *ecmd);
+ void (*get_link_ksettings)(struct ef4_nic *efx,
+ struct ethtool_link_ksettings *cmd);
+ int (*set_link_ksettings)(struct ef4_nic *efx,
+ const struct ethtool_link_ksettings *cmd);
void (*set_npage_adv) (struct ef4_nic *efx, u32);
int (*test_alive) (struct ef4_nic *efx);
const char *(*test_name) (struct ef4_nic *efx, unsigned int index);
diff --git a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c
index d29331652548..f5e0f18d4ea8 100644
--- a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c
@@ -437,9 +437,10 @@ static int qt202x_phy_reconfigure(struct ef4_nic *efx)
return 0;
}
-static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+static void qt202x_phy_get_link_ksettings(struct ef4_nic *efx,
+ struct ethtool_link_ksettings *cmd)
{
- mdio45_ethtool_gset(&efx->mdio, ecmd);
+ mdio45_ethtool_ksettings_get(&efx->mdio, cmd);
}
static void qt202x_phy_remove(struct ef4_nic *efx)
@@ -487,8 +488,8 @@ const struct ef4_phy_operations falcon_qt202x_phy_ops = {
.poll = qt202x_phy_poll,
.fini = ef4_port_dummy_op_void,
.remove = qt202x_phy_remove,
- .get_settings = qt202x_phy_get_settings,
- .set_settings = ef4_mdio_set_settings,
+ .get_link_ksettings = qt202x_phy_get_link_ksettings,
+ .set_link_ksettings = ef4_mdio_set_link_ksettings,
.test_alive = ef4_mdio_test_alive,
.get_module_eeprom = qt202x_phy_get_module_eeprom,
.get_module_info = qt202x_phy_get_module_info,
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 250458cbdb4d..6a8406dc0c2b 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -674,8 +674,7 @@ void __ef4_rx_packet(struct ef4_channel *channel)
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
- if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb &&
- !ef4_channel_busy_polling(channel))
+ if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
diff --git a/drivers/net/ethernet/sfc/falcon/tenxpress.c b/drivers/net/ethernet/sfc/falcon/tenxpress.c
index acc548a1c4d6..ff9b4e2b590c 100644
--- a/drivers/net/ethernet/sfc/falcon/tenxpress.c
+++ b/drivers/net/ethernet/sfc/falcon/tenxpress.c
@@ -351,9 +351,6 @@ static int tenxpress_phy_reconfigure(struct ef4_nic *efx)
return 0;
}
-static void
-tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
-
/* Poll for link state changes */
static bool tenxpress_phy_poll(struct ef4_nic *efx)
{
@@ -443,7 +440,8 @@ sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
}
static void
-tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+tenxpress_get_link_ksettings(struct ef4_nic *efx,
+ struct ethtool_link_ksettings *cmd)
{
u32 adv = 0, lpa = 0;
int reg;
@@ -455,20 +453,22 @@ tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
if (reg & MDIO_AN_10GBT_STAT_LP10G)
lpa |= ADVERTISED_10000baseT_Full;
- mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
+ mdio45_ethtool_ksettings_get_npage(&efx->mdio, cmd, adv, lpa);
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
if (LOOPBACK_EXTERNAL(efx))
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
}
-static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+static int
+tenxpress_set_link_ksettings(struct ef4_nic *efx,
+ const struct ethtool_link_ksettings *cmd)
{
- if (!ecmd->autoneg)
+ if (!cmd->base.autoneg)
return -EINVAL;
- return ef4_mdio_set_settings(efx, ecmd);
+ return ef4_mdio_set_link_ksettings(efx, cmd);
}
static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising)
@@ -485,8 +485,8 @@ const struct ef4_phy_operations falcon_sfx7101_phy_ops = {
.poll = tenxpress_phy_poll,
.fini = sfx7101_phy_fini,
.remove = tenxpress_phy_remove,
- .get_settings = tenxpress_get_settings,
- .set_settings = tenxpress_set_settings,
+ .get_link_ksettings = tenxpress_get_link_ksettings,
+ .set_link_ksettings = tenxpress_set_link_ksettings,
.set_npage_adv = sfx7101_set_npage_adv,
.test_alive = ef4_mdio_test_alive,
.test_name = sfx7101_test_name,
diff --git a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c
index 18421f5e880f..3c55fd23c271 100644
--- a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c
@@ -540,9 +540,10 @@ static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
return rc;
}
-static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+static void txc43128_get_link_ksettings(struct ef4_nic *efx,
+ struct ethtool_link_ksettings *cmd)
{
- mdio45_ethtool_gset(&efx->mdio, ecmd);
+ mdio45_ethtool_ksettings_get(&efx->mdio, cmd);
}
const struct ef4_phy_operations falcon_txc_phy_ops = {
@@ -552,8 +553,8 @@ const struct ef4_phy_operations falcon_txc_phy_ops = {
.poll = txc43128_phy_poll,
.fini = txc43128_phy_fini,
.remove = txc43128_phy_remove,
- .get_settings = txc43128_get_settings,
- .set_settings = ef4_mdio_set_settings,
+ .get_link_ksettings = txc43128_get_link_ksettings,
+ .set_link_ksettings = ef4_mdio_set_link_ksettings,
.test_alive = ef4_mdio_test_alive,
.run_tests = txc43128_run_tests,
.test_name = txc43128_test_name,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index e4ca2161af70..ba45150f53c7 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1649,6 +1649,22 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
}
}
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ efx_readd(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
+ }
+}
+
/* Looks at available SRAM resources and works out how many queues we
* can support, and where things like descriptor caches should live.
*
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index d0ed7f71ea7e..8189a1cd973f 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -27,6 +27,7 @@
* @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
* @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
* @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type.
* Used for RX default unicast and multicast/broadcast filters.
*
* Only some combinations are supported, depending on NIC type:
@@ -54,6 +55,7 @@ enum efx_filter_match_flags {
EFX_FILTER_MATCH_OUTER_VID = 0x0100,
EFX_FILTER_MATCH_IP_PROTO = 0x0200,
EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
+ EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800,
};
/**
@@ -98,6 +100,26 @@ enum efx_filter_flags {
EFX_FILTER_FLAG_TX = 0x10,
};
+/** enum efx_encap_type - types of encapsulation
+ * @EFX_ENCAP_TYPE_NONE: no encapsulation
+ * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation
+ * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation
+ * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation
+ * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame
+ *
+ * Contains both enumerated types and flags.
+ * To get just the type, OR with @EFX_ENCAP_TYPES_MASK.
+ */
+enum efx_encap_type {
+ EFX_ENCAP_TYPE_NONE = 0,
+ EFX_ENCAP_TYPE_VXLAN = 1,
+ EFX_ENCAP_TYPE_NVGRE = 2,
+ EFX_ENCAP_TYPE_GENEVE = 3,
+
+ EFX_ENCAP_TYPES_MASK = 7,
+ EFX_ENCAP_FLAG_IPV6 = 8,
+};
+
/**
* struct efx_filter_spec - specification for a hardware filter
* @match_flags: Match type flags, from &enum efx_filter_match_flags
@@ -118,6 +140,8 @@ enum efx_filter_flags {
* @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
* @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
* @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
+ * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if
+ * %EFX_FILTER_MATCH_ENCAP_TYPE is set
*
* The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
* used to initialise the structure. The efx_filter_set_*() functions
@@ -144,7 +168,8 @@ struct efx_filter_spec {
__be32 rem_host[4];
__be16 loc_port;
__be16 rem_port;
- /* total 64 bytes */
+ u32 encap_type:4;
+ /* total 65 bytes */
};
enum {
@@ -269,4 +294,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
return 0;
}
+static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
+ enum efx_encap_type encap_type)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ spec->encap_type = encap_type;
+}
+
+static inline enum efx_encap_type efx_filter_get_encap_type(
+ const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE)
+ return spec->encap_type;
+ return EFX_ENCAP_TYPE_NONE;
+}
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 995651341b94..b9422450deb8 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -128,7 +128,7 @@ fail:
return rc;
}
-void efx_mcdi_fini(struct efx_nic *efx)
+void efx_mcdi_detach(struct efx_nic *efx)
{
if (!efx->mcdi)
return;
@@ -137,6 +137,12 @@ void efx_mcdi_fini(struct efx_nic *efx)
/* Relinquish the device (back to the BMC, if this is a LOM) */
efx_mcdi_drv_attach(efx, false, NULL);
+}
+
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
#ifdef CONFIG_SFC_MCDI_LOGGING
free_page((unsigned long)efx->mcdi->iface.logging_buffer);
@@ -716,8 +722,11 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
if (cmd == MC_CMD_REBOOT && rc == -EIO) {
/* Don't reset if MC_CMD_REBOOT returns EIO */
} else if (rc == -EIO || rc == -EINTR) {
- netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
- -rc);
+ netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
+ netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
+ cmd, -rc);
+ if (efx->type->mcdi_reboot_detected)
+ efx->type->mcdi_reboot_detected(efx);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else if (proxy_handle && (rc == -EPROTO) &&
efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
@@ -837,11 +846,9 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
outbuf, outlen, outlen_actual,
quiet, NULL, raw_rc);
} else {
- netif_printk(efx, hw,
- rc == -EPERM ? KERN_DEBUG : KERN_ERR,
- efx->net_dev,
- "MC command 0x%x failed after proxy auth rc=%d\n",
- cmd, rc);
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+ "MC command 0x%x failed after proxy auth rc=%d\n",
+ cmd, rc);
if (rc == -EINTR || rc == -EIO)
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
@@ -1084,10 +1091,9 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
code = MCDI_DWORD(outbuf, ERR_CODE);
if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
err_arg = MCDI_DWORD(outbuf, ERR_ARG);
- netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR,
- efx->net_dev,
- "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
- cmd, inlen, rc, code, err_arg);
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+ "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
+ cmd, inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -2057,8 +2063,8 @@ fail:
/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
* terrifying. The call site will have to deal with it though.
*/
- netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
- efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
+ "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 4472107ca8c1..154ef41d1927 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -142,6 +142,7 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
#endif
int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_detach(struct efx_nic *efx);
void efx_mcdi_fini(struct efx_nic *efx);
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 35cc3d4fa5f6..47ced8a898ca 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -11913,6 +11913,27 @@
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
/***********************************/
/* MC_CMD_RX_BALANCING
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 1c62c1a00fca..c0537ea06c9a 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -208,6 +208,12 @@ struct efx_tx_buffer {
* @write_count: Current write pointer
* This is the number of buffers that have been added to the
* hardware ring.
+ * @packet_write_count: Completable write pointer
+ * This is the write pointer of the last packet written.
+ * Normally this will equal @write_count, but as option descriptors
+ * don't produce completion events, they won't update this.
+ * Filled in iff @efx->type->option_descriptors; only used for PIO.
+ * Thus, this is written and used on EF10, and neither on farch.
* @old_read_count: The value of read_count when last checked.
* This is here for performance reasons. The xmit path will
* only get the up-to-date value of read_count if this
@@ -255,6 +261,7 @@ struct efx_tx_queue {
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count;
+ unsigned int packet_write_count;
unsigned int old_read_count;
unsigned int tso_bursts;
unsigned int tso_long_headers;
@@ -300,6 +307,7 @@ struct efx_rx_buffer {
#define EFX_RX_PKT_DISCARD 0x0004
#define EFX_RX_PKT_TCP 0x0040
#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
+#define EFX_RX_PKT_CSUM_LEVEL 0x0200
/**
* struct efx_rx_page_state - Page-based rx buffer state
@@ -462,13 +470,18 @@ struct efx_channel {
u32 *rps_flow_id;
#endif
- unsigned n_rx_tobe_disc;
- unsigned n_rx_ip_hdr_chksum_err;
- unsigned n_rx_tcp_udp_chksum_err;
- unsigned n_rx_mcast_mismatch;
- unsigned n_rx_frm_trunc;
- unsigned n_rx_overlength;
- unsigned n_skbuff_leaks;
+ unsigned int n_rx_tobe_disc;
+ unsigned int n_rx_ip_hdr_chksum_err;
+ unsigned int n_rx_tcp_udp_chksum_err;
+ unsigned int n_rx_outer_ip_hdr_chksum_err;
+ unsigned int n_rx_outer_tcp_udp_chksum_err;
+ unsigned int n_rx_inner_ip_hdr_chksum_err;
+ unsigned int n_rx_inner_tcp_udp_chksum_err;
+ unsigned int n_rx_eth_crc_err;
+ unsigned int n_rx_mcast_mismatch;
+ unsigned int n_rx_frm_trunc;
+ unsigned int n_rx_overlength;
+ unsigned int n_skbuff_leaks;
unsigned int n_rx_nodesc_trunc;
unsigned int n_rx_merge_events;
unsigned int n_rx_merge_packets;
@@ -484,131 +497,6 @@ struct efx_channel {
u32 sync_timestamp_minor;
};
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum efx_channel_busy_poll_state {
- EFX_CHANNEL_STATE_IDLE = 0,
- EFX_CHANNEL_STATE_NAPI = BIT(0),
- EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
- EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
- EFX_CHANNEL_STATE_POLL_BIT = 2,
- EFX_CHANNEL_STATE_POLL = BIT(2),
- EFX_CHANNEL_STATE_DISABLE_BIT = 3,
-};
-
-static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
-{
- WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
-}
-
-/* Called from the device poll routine to get ownership of a channel. */
-static inline bool efx_channel_lock_napi(struct efx_channel *channel)
-{
- unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
-
- while (1) {
- switch (old) {
- case EFX_CHANNEL_STATE_POLL:
- /* Ensure efx_channel_try_lock_poll() wont starve us */
- set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
- &channel->busy_poll_state);
- /* fallthrough */
- case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
- return false;
- default:
- break;
- }
- prev = cmpxchg(&channel->busy_poll_state, old,
- EFX_CHANNEL_STATE_NAPI);
- if (unlikely(prev != old)) {
- /* This is likely to mean we've just entered polling
- * state. Go back round to set the REQ bit.
- */
- old = prev;
- continue;
- }
- return true;
- }
-}
-
-static inline void efx_channel_unlock_napi(struct efx_channel *channel)
-{
- /* Make sure write has completed from efx_channel_lock_napi() */
- smp_wmb();
- WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
-}
-
-/* Called from efx_busy_poll(). */
-static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
-{
- return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
- EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
-}
-
-static inline void efx_channel_unlock_poll(struct efx_channel *channel)
-{
- clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline bool efx_channel_busy_polling(struct efx_channel *channel)
-{
- return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline void efx_channel_enable(struct efx_channel *channel)
-{
- clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
- &channel->busy_poll_state);
-}
-
-/* Stop further polling or napi access.
- * Returns false if the channel is currently busy polling.
- */
-static inline bool efx_channel_disable(struct efx_channel *channel)
-{
- set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
- /* Implicit barrier in efx_channel_busy_polling() */
- return !efx_channel_busy_polling(channel);
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_lock_napi(struct efx_channel *channel)
-{
- return true;
-}
-
-static inline void efx_channel_unlock_napi(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
-{
- return false;
-}
-
-static inline void efx_channel_unlock_poll(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_busy_polling(struct efx_channel *channel)
-{
- return false;
-}
-
-static inline void efx_channel_enable(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_disable(struct efx_channel *channel)
-{
- return true;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* struct efx_msi_context - Context for each MSI
* @efx: The associated NIC
@@ -666,6 +554,8 @@ extern const unsigned int efx_reset_type_max;
#define RESET_TYPE(type) \
STRING_TABLE_LOOKUP(type, efx_reset_type)
+void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen);
+
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
@@ -1105,6 +995,15 @@ struct efx_mtd_partition {
char name[IFNAMSIZ + 20];
};
+struct efx_udp_tunnel {
+ u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */
+ __be16 port;
+ /* Count of repeated adds of the same port. Used only inside the list,
+ * not in request arguments.
+ */
+ u16 count;
+};
+
/**
* struct efx_nic_type - Efx device type definition
* @mem_bar: Get the memory BAR
@@ -1174,6 +1073,7 @@ struct efx_mtd_partition {
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
* @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
+ * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1220,9 +1120,14 @@ struct efx_mtd_partition {
* @ptp_set_ts_config: Set hardware timestamp configuration. The flags
* and tx_type will already have been validated but this operation
* must validate and update rx_filter.
+ * @get_phys_port_id: Get the underlying physical port id.
* @set_mac_address: Set the MAC address of the device
* @tso_versions: Returns mask of firmware-assisted TSO versions supported.
* If %NULL, then device does not support any TSO version.
+ * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
+ * @udp_tnl_add_port: Add a UDP tunnel port
+ * @udp_tnl_has_port: Check if a port has been added as UDP tunnel
+ * @udp_tnl_del_port: Remove a UDP tunnel port
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1236,8 +1141,11 @@ struct efx_mtd_partition {
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
+ * @option_descriptors: NIC supports TX option descriptors
+ * @min_interrupt_mode: Lowest capability interrupt mode supported
+ * from &enum efx_int_mode.
* @max_interrupt_mode: Highest capability interrupt mode supported
- * from &enum efx_init_mode.
+ * from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
@@ -1302,7 +1210,8 @@ struct efx_nic_type {
unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table);
+ const u32 *rx_indir_table, const u8 *key);
+ int (*rx_pull_rss_config)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1358,6 +1267,8 @@ struct efx_nic_type {
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+ int (*get_phys_port_id)(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
@@ -1372,14 +1283,16 @@ struct efx_nic_type {
struct ifla_vf_info *ivi);
int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
int link_state);
- int (*sriov_get_phys_port_id)(struct efx_nic *efx,
- struct netdev_phys_item_id *ppid);
int (*vswitching_probe)(struct efx_nic *efx);
int (*vswitching_restore)(struct efx_nic *efx);
void (*vswitching_remove)(struct efx_nic *efx);
int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
int (*set_mac_address)(struct efx_nic *efx);
u32 (*tso_versions)(struct efx_nic *efx);
+ int (*udp_tnl_push_ports)(struct efx_nic *efx);
+ int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
+ bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
+ int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1394,12 +1307,15 @@ struct efx_nic_type {
unsigned int rx_buffer_padding;
bool can_rx_scatter;
bool always_rx_scatter;
+ bool option_descriptors;
+ unsigned int min_interrupt_mode;
unsigned int max_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
int mcdi_max_ver;
unsigned int max_rx_ip_filters;
u32 hwtstamp_filters;
+ unsigned int rx_hash_key_size;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 223774635cba..7b916aa21bde 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -85,6 +85,17 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
}
+/* Report whether the NIC considers this TX queue empty, using
+ * packet_write_count (the write count recorded for the last completable
+ * doorbell push). May return false negative. EF10 only, which is OK
+ * because only EF10 supports PIO.
+ */
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+ EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
+ return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
+}
+
/* Decide whether we can use TX PIO, ie. write packet data directly into
* a buffer on the device. This can reduce latency at the expense of
* throughput, so we only do this if both hardware and software TX rings
@@ -94,9 +105,9 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
{
struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
- return tx_queue->piobuf &&
- __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) &&
- __efx_nic_tx_is_empty(partner, partner->insert_count);
+
+ return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
+ efx_nic_tx_is_empty(partner);
}
/* Decide whether to push a TX descriptor to the NIC vs merely writing
@@ -332,6 +343,7 @@ enum {
* @pio_write_base: Base address for writing PIO buffers
* @pio_write_vi_base: Relative VI number for @pio_write_base
* @piobuf_handle: Handle of each PIO buffer allocated
+ * @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
@@ -357,6 +369,10 @@ enum {
* @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
* @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
* @vlan_lock: Lock to serialize access to vlan_list.
+ * @udp_tunnels: UDP tunnel port numbers and types.
+ * @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
+ * @udp_tunnels to hardware and thus the push must be re-done.
+ * @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -369,6 +385,7 @@ struct efx_ef10_nic_data {
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+ u16 piobuf_size;
bool must_restore_piobufs;
u32 rx_rss_context;
bool rx_rss_context_exclusive;
@@ -392,6 +409,9 @@ struct efx_ef10_nic_data {
u8 vport_mac[ETH_ALEN];
struct list_head vlan_list;
struct mutex vlan_lock;
+ struct efx_udp_tunnel udp_tunnels[16];
+ bool udp_tunnels_dirty;
+ struct mutex udp_tunnels_lock;
};
int efx_init_sriov(void);
@@ -613,6 +633,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx);
void efx_ef10_handle_drain_event(struct efx_nic *efx);
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 5f4ad4f3518f..42443f434569 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -434,6 +434,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -621,8 +622,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
/* Set the SKB flags */
skb_checksum_none_assert(skb);
- if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
+ if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+ }
efx_rx_skb_attach_timestamp(channel, skb);
@@ -665,8 +668,7 @@ void __efx_rx_packet(struct efx_channel *channel)
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
- if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
- !efx_channel_busy_polling(channel))
+ if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index cd38b44ae23a..dab286a337a6 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -768,7 +768,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
- netif_device_attach(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
return rc_test;
}
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 4e54e5dc9fcb..a617f657eae3 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -326,18 +326,40 @@ fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
+ efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
fail1:
kfree(efx->nic_data);
return rc;
}
+static int siena_rx_pull_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the
+ * first 128 bits of the same key, assuming it's been set by
+ * siena_rx_push_rss_config, below)
+ */
+ efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(efx->rx_hash_key, &temp, sizeof(temp));
+ efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp));
+ efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp,
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_farch_rx_pull_indir_table(efx);
+ return 0;
+}
+
static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table, const u8 *key)
{
efx_oword_t temp;
/* Set hash key for IPv4 */
+ if (key)
+ memcpy(efx->rx_hash_key, key, sizeof(temp));
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
@@ -402,7 +424,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
+ siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
efx->rss_active = true;
/* Enable event logging */
@@ -429,6 +451,7 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
/* Tear down the private nic state */
@@ -979,6 +1002,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
.rx_push_rss_config = siena_rx_push_rss_config,
+ .rx_pull_rss_config = siena_rx_pull_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -1044,6 +1068,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
+ .option_descriptors = false,
+ .min_interrupt_mode = EFX_INT_MODE_LEGACY,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1053,4 +1079,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
+ .rx_hash_key_size = 16,
};
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 9abcf4aded30..0b766fdbcddb 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -73,14 +73,3 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
else
return -EOPNOTSUPP;
}
-
-int efx_sriov_get_phys_port_id(struct net_device *net_dev,
- struct netdev_phys_item_id *ppid)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- if (efx->type->sriov_get_phys_port_id)
- return efx->type->sriov_get_phys_port_id(efx, ppid);
- else
- return -EOPNOTSUPP;
-}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
index ba1762e7f216..84c7984edcaf 100644
--- a/drivers/net/ethernet/sfc/sriov.h
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -23,9 +23,6 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi);
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
int link_state);
-int efx_sriov_get_phys_port_id(struct net_device *net_dev,
- struct netdev_phys_item_id *ppid);
-
#endif /* CONFIG_SFC_SRIOV */
#endif /* EFX_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 3c0151424d12..ff88d60aa6d5 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -28,7 +28,6 @@
#ifdef EFX_USE_PIO
-#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
@@ -817,6 +816,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
+ tx_queue->packet_write_count = 0;
tx_queue->old_write_count = 0;
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 55a95e1d69d6..5f2737189c72 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -264,7 +264,6 @@ struct epic_private {
spinlock_t lock; /* Group with Tx control cache line. */
spinlock_t napi_lock;
struct napi_struct napi;
- unsigned int reschedule_in_poll;
unsigned int cur_tx, dirty_tx;
unsigned int cur_rx, dirty_rx;
@@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&ep->lock);
spin_lock_init(&ep->napi_lock);
- ep->reschedule_in_poll = 0;
/* Bring the chip out of low-power mode. */
ew32(GENCTL, 0x4200);
@@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
handled = 1;
- if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
+ if (status & EpicNapiEvent) {
spin_lock(&ep->napi_lock);
if (napi_schedule_prep(&ep->napi)) {
epic_napi_irq_off(dev, ep);
__napi_schedule(&ep->napi);
- } else
- ep->reschedule_in_poll++;
+ }
spin_unlock(&ep->napi_lock);
}
status &= ~EpicNapiEvent;
@@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget)
{
struct epic_private *ep = container_of(napi, struct epic_private, napi);
struct net_device *dev = ep->mii.dev;
- int work_done = 0;
void __iomem *ioaddr = ep->ioaddr;
-
-rx_action:
+ int work_done;
epic_tx(dev, ep);
- work_done += epic_rx(dev, budget);
+ work_done = epic_rx(dev, budget);
epic_rx_err(dev, ep);
- if (work_done < budget) {
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags;
- int more;
-
- /* A bit baroque but it avoids a (space hungry) spin_unlock */
spin_lock_irqsave(&ep->napi_lock, flags);
- more = ep->reschedule_in_poll;
- if (!more) {
- __napi_complete(napi);
- ew32(INTSTAT, EpicNapiEvent);
- epic_napi_irq_on(dev, ep);
- } else
- ep->reschedule_in_poll--;
-
+ ew32(INTSTAT, EpicNapiEvent);
+ epic_napi_irq_on(dev, ep);
spin_unlock_irqrestore(&ep->napi_lock, flags);
-
- if (more)
- goto rx_action;
}
return work_done;
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 67154621abcf..97280daba27f 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -113,6 +113,7 @@ struct smc_private {
struct mii_if_info mii_if;
int duplex;
int rx_ovrn;
+ unsigned long last_rx;
};
/* Special definitions for Megahertz multifunction cards */
@@ -1491,6 +1492,7 @@ static void smc_rx(struct net_device *dev)
if (!(rx_status & RS_ERRORS)) {
/* do stuff to make a new packet */
struct sk_buff *skb;
+ struct smc_private *smc = netdev_priv(dev);
/* Note: packet_length adds 5 or 6 extra bytes here! */
skb = netdev_alloc_skb(dev, packet_length+2);
@@ -1509,7 +1511,7 @@ static void smc_rx(struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies;
+ smc->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += packet_length;
if (rx_status & RS_MULTICAST)
@@ -1790,7 +1792,7 @@ static void media_check(u_long arg)
}
/* Ignore collisions unless we've had no rx's recently */
- if (time_after(jiffies, dev->last_rx + HZ)) {
+ if (time_after(jiffies, smc->last_rx + HZ)) {
if (smc->tx_err || (smc->media_status & EPH_16COL))
media |= EPH_16COL;
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3174aebb322f..2fa3c1d03abc 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -861,7 +861,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
smsc9420_pci_flush_write(pd);
if (work_done < budget) {
- napi_complete(&pd->napi);
+ napi_complete_done(&pd->napi, work_done);
/* re-enable RX DMA interrupts */
dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig
index 1c1157d2bd40..ecd7a5edef5d 100644
--- a/drivers/net/ethernet/stmicro/Kconfig
+++ b/drivers/net/ethernet/stmicro/Kconfig
@@ -7,7 +7,8 @@ config NET_VENDOR_STMICRO
default y
depends on HAS_IOMEM
---help---
- If you have a network (Ethernet) card belonging to this class, say Y.
+ If you have a network (Ethernet) card based on Synopsys Ethernet IP
+ Cores, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index ab66248a4b78..cfbe3634dfa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -1,5 +1,5 @@
config STMMAC_ETH
- tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+ tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
select PHYLIB
@@ -7,9 +7,8 @@ config STMMAC_ETH
imply PTP_1588_CLOCK
select RESET_CONTROLLER
---help---
- This is the driver for the Ethernet IPs are built around a
- Synopsys IP Core and only tested on the STMicroelectronics
- platforms.
+ This is the driver for the Ethernet IPs built around a
+ Synopsys IP Core.
if STMMAC_ETH
@@ -29,6 +28,15 @@ config STMMAC_PLATFORM
if STMMAC_PLATFORM
+config DWMAC_DWC_QOS_ETH
+ tristate "Support for snps,dwc-qos-ethernet.txt DT binding."
+ select PHYLIB
+ select CRC32
+ select MII
+ depends on OF && HAS_DMA
+ help
+ Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
+
config DWMAC_GENERIC
tristate "Generic driver for DWMAC"
default STMMAC_PLATFORM
@@ -143,11 +151,11 @@ config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
---help---
- This is to select the Synopsys DWMAC available on PCI devices,
- if you have a controller with this interface, say Y or M here.
+ This selects the platform specific bus support for the stmmac driver.
+ This driver was tested on XLINX XC2V3000 FF1152AMT0221
+ D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit.
- This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
- D1215994A VIRTEX FPGA board.
+ If you have a controller with this interface, say Y or M here.
If unsure, say N.
endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 8f83a86ba13c..700c60336674 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 026e8e9cb942..01a8c020d6db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -16,10 +16,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b13a144f72ad..144fe84e8a53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -71,7 +67,7 @@ struct stmmac_extra_stats {
unsigned long overflow_error;
unsigned long ipc_csum_error;
unsigned long rx_collision;
- unsigned long rx_crc;
+ unsigned long rx_crc_errors;
unsigned long dribbling_bit;
unsigned long rx_length;
unsigned long rx_mii;
@@ -323,6 +319,9 @@ struct dma_features {
/* TX and RX number of channels */
unsigned int number_rx_channel;
unsigned int number_tx_channel;
+ /* TX and RX number of queues */
+ unsigned int number_rx_queues;
+ unsigned int number_tx_queues;
/* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
};
@@ -340,7 +339,7 @@ struct dma_features {
/* Common MAC defines */
#define MAC_CTRL_REG 0x00000000 /* MAC Control */
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
/* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS 0x3E8
@@ -454,6 +453,8 @@ struct stmmac_ops {
void (*core_init)(struct mac_device_info *hw, int mtu);
/* Enable and verify that the IPC module is supported */
int (*rx_ipc)(struct mac_device_info *hw);
+ /* Enable RX Queues */
+ void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
/* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw);
/* Handle extra events on specific interrupts hw dependent */
@@ -471,7 +472,8 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw);
+ void (*set_eee_mode)(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating);
void (*reset_eee_mode)(struct mac_device_info *hw);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index faeeef75d7f1..0c2432b1ce67 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -11,10 +11,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 1d181e205d6e..ca9d7e48034c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -17,10 +17,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
new file mode 100644
index 000000000000..1a3fa3d9f855
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -0,0 +1,202 @@
+/*
+ * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ * Copyright (C) 2016 Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 burst_map = 0;
+ u32 bit_index = 0;
+ u32 a_index = 0;
+
+ if (!plat_dat->axi) {
+ plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+
+ if (!plat_dat->axi)
+ return -ENOMEM;
+ }
+
+ plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi");
+ if (of_property_read_u32(np, "snps,write-requests",
+ &plat_dat->axi->axi_wr_osr_lmt)) {
+ /**
+ * Since the register has a reset value of 1, if property
+ * is missing, default to 1.
+ */
+ plat_dat->axi->axi_wr_osr_lmt = 1;
+ } else {
+ /**
+ * If property exists, to keep the behavior from dwc_eth_qos,
+ * subtract one after parsing.
+ */
+ plat_dat->axi->axi_wr_osr_lmt--;
+ }
+
+ if (of_property_read_u32(np, "read,read-requests",
+ &plat_dat->axi->axi_rd_osr_lmt)) {
+ /**
+ * Since the register has a reset value of 1, if property
+ * is missing, default to 1.
+ */
+ plat_dat->axi->axi_rd_osr_lmt = 1;
+ } else {
+ /**
+ * If property exists, to keep the behavior from dwc_eth_qos,
+ * subtract one after parsing.
+ */
+ plat_dat->axi->axi_rd_osr_lmt--;
+ }
+ of_property_read_u32(np, "snps,burst-map", &burst_map);
+
+ /* converts burst-map bitmask to burst array */
+ for (bit_index = 0; bit_index < 7; bit_index++) {
+ if (burst_map & (1 << bit_index)) {
+ switch (bit_index) {
+ case 0:
+ plat_dat->axi->axi_blen[a_index] = 4; break;
+ case 1:
+ plat_dat->axi->axi_blen[a_index] = 8; break;
+ case 2:
+ plat_dat->axi->axi_blen[a_index] = 16; break;
+ case 3:
+ plat_dat->axi->axi_blen[a_index] = 32; break;
+ case 4:
+ plat_dat->axi->axi_blen[a_index] = 64; break;
+ case 5:
+ plat_dat->axi->axi_blen[a_index] = 128; break;
+ case 6:
+ plat_dat->axi->axi_blen[a_index] = 256; break;
+ default:
+ break;
+ }
+ a_index++;
+ }
+ }
+
+ /* dwc-qos needs GMAC4, AAL, TSO and PMT */
+ plat_dat->has_gmac4 = 1;
+ plat_dat->dma_cfg->aal = 1;
+ plat_dat->tso_en = 1;
+ plat_dat->pmt = 1;
+
+ return 0;
+}
+
+static int dwc_eth_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct resource *res;
+ int ret;
+
+ memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
+
+ /**
+ * Since stmmac_platform supports name IRQ only, basic platform
+ * resource initialization is done in the glue logic.
+ */
+ stmmac_res.irq = platform_get_irq(pdev, 0);
+ if (stmmac_res.irq < 0) {
+ if (stmmac_res.irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "IRQ configuration information not found\n");
+
+ return stmmac_res.irq;
+ }
+ stmmac_res.wol_irq = stmmac_res.irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(stmmac_res.addr))
+ return PTR_ERR(stmmac_res.addr);
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(plat_dat->stmmac_clk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ ret = PTR_ERR(plat_dat->stmmac_clk);
+ plat_dat->stmmac_clk = NULL;
+ goto err_remove_config_dt;
+ }
+ clk_prepare_enable(plat_dat->stmmac_clk);
+
+ plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(plat_dat->pclk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ ret = PTR_ERR(plat_dat->pclk);
+ plat_dat->pclk = NULL;
+ goto err_out_clk_dis_phy;
+ }
+ clk_prepare_enable(plat_dat->pclk);
+
+ ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
+ if (ret)
+ goto err_out_clk_dis_aper;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_out_clk_dis_aper;
+
+ return 0;
+
+err_out_clk_dis_aper:
+ clk_disable_unprepare(plat_dat->pclk);
+err_out_clk_dis_phy:
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int dwc_eth_dwmac_remove(struct platform_device *pdev)
+{
+ return stmmac_pltfr_remove(pdev);
+}
+
+static const struct of_device_id dwc_eth_dwmac_match[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
+
+static struct platform_driver dwc_eth_dwmac_driver = {
+ .probe = dwc_eth_dwmac_probe,
+ .remove = dwc_eth_dwmac_remove,
+ .driver = {
+ .name = "dwc-eth-dwmac",
+ .of_match_table = dwc_eth_dwmac_match,
+ },
+};
+module_platform_driver(dwc_eth_dwmac_driver);
+
+MODULE_AUTHOR("Joao Pinto <jpinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index ffaed1f35efe..9685555932ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -35,10 +35,6 @@
#define PRG_ETH0_TXDLY_SHIFT 5
#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
-#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT)
/* divider for the result of m250_sel */
#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
@@ -69,6 +65,8 @@ struct meson8b_dwmac {
struct clk_divider m25_div;
struct clk *m25_div_clk;
+
+ u32 tx_delay_ns;
};
static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
@@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
unsigned long clk_rate;
+ u8 tx_dly_val = 0;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
+ /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
+ * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
+ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+ */
+ tx_dly_val = dwmac->tx_delay_ns >> 1;
+ /* fall through */
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
/* Generate a 25MHz clock for the PHY */
clk_rate = 25 * 1000 * 1000;
@@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
- /* TX clock delay - all known boards use a 1/4 cycle delay */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- PRG_ETH0_TXDLY_QUARTER);
+ tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
break;
case PHY_INTERFACE_MODE_RMII:
@@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ /* use 2ns as fallback since this value was previously hardcoded */
+ if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
+ &dwmac->tx_delay_ns))
+ dwmac->tx_delay_ns = 2;
+
ret = meson8b_init_clk(dwmac);
if (ret)
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index fa6e9704c077..e5db6ac36235 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -302,6 +302,122 @@ static const struct rk_gmac_ops rk3288_ops = {
.set_rmii_speed = rk3288_set_rmii_speed,
};
+#define RK3328_GRF_MAC_CON0 0x0900
+#define RK3328_GRF_MAC_CON1 0x0904
+
+/* RK3328_GRF_MAC_CON0 */
+#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3328_GRF_MAC_CON1 */
+#define RK3328_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3328_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
+#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
+#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
+#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12))
+#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
+#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
+#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+
+static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_PHY_INTF_SEL_RGMII |
+ RK3328_GMAC_RMII_MODE_CLR |
+ RK3328_GMAC_RXCLK_DLY_ENABLE |
+ RK3328_GMAC_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0,
+ RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3328_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_PHY_INTF_SEL_RMII |
+ RK3328_GMAC_RMII_MODE);
+
+ /* set MAC to RMII mode */
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11));
+}
+
+static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_RMII_CLK_2_5M |
+ RK3328_GMAC_SPEED_10M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_RMII_CLK_25M |
+ RK3328_GMAC_SPEED_100M);
+ else
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+}
+
+static const struct rk_gmac_ops rk3328_ops = {
+ .set_to_rgmii = rk3328_set_to_rgmii,
+ .set_to_rmii = rk3328_set_to_rmii,
+ .set_rgmii_speed = rk3328_set_rgmii_speed,
+ .set_rmii_speed = rk3328_set_rmii_speed,
+};
+
#define RK3366_GRF_SOC_CON6 0x0418
#define RK3366_GRF_SOC_CON7 0x041c
@@ -1006,6 +1122,7 @@ static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 1f997027ae51..17d4bbaeb65c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -341,7 +341,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
* mode. Create a copy of the core reset handle so it can be used by
* the driver later.
*/
- dwmac->stmmac_rst = stpriv->stmmac_rst;
+ dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
ret = socfpga_dwmac_set_phy_mode(dwmac);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 1657acfa70c2..e14984814041 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 52b9407a8a39..c02d36629c52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -10,10 +10,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index be3c91c7f211..91c8926b7479 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -16,10 +16,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -305,8 +301,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
{
void __iomem *ioaddr = hw->pcsr;
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
int ret = 0;
+ /* Discard masked bits */
+ intr_status &= ~intr_mask;
+
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & GMAC_INT_STATUS_MMCTIS))
x->mmc_tx_irq_n++;
@@ -343,11 +343,14 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw)
+static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
+ /*TODO - en_tx_lpi_clockgating treatment */
+
/* Enable the link status receive on RGMII, SGMII ore SMII
* receive path and instruct the transmit to enter in LPI
* state.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 612d3aaac9a4..fbaec0ffd9ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -16,10 +16,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 9dd2987e284d..8ab518997b1b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -18,10 +18,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index e5664da382f3..d40e91e8fc7b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -18,10 +18,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 3e8d4fefa5e0..db45134fddf0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -22,6 +22,7 @@
#define GMAC_HASH_TAB_32_63 0x00000014
#define GMAC_RX_FLOW_CTRL 0x00000090
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
+#define GMAC_RXQ_CTRL0 0x000000a0
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
#define GMAC_PCS_BASE 0x000000e0
@@ -44,6 +45,11 @@
#define GMAC_MAX_PERFECT_ADDRESSES 128
+/* MAC RX Queue Enable */
+#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
+#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
+#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1)
+
/* MAC Flow Control RX */
#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
@@ -84,6 +90,19 @@ enum power_event {
power_down = 0x00000001,
};
+/* Energy Efficient Ethernet (EEE) for GMAC4
+ *
+ * LPI status, timer and control register offset
+ */
+#define GMAC4_LPI_CTRL_STATUS 0xd0
+#define GMAC4_LPI_TIMER_CTRL 0xd4
+
+/* LPI control and status defines */
+#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
+#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
#define GMAC_DEBUG_TFCSTS_SHIFT 17
@@ -133,6 +152,8 @@ enum power_event {
/* MAC HW features2 bitmap */
#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
+#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6)
+#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0)
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index eaed7cb21867..202216cd6789 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -59,6 +59,17 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
writel(value, ioaddr + GMAC_INT_EN);
}
+static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
+
+ value &= GMAC_RX_QUEUE_CLEAR(queue);
+ value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+
+ writel(value, ioaddr + GMAC_RXQ_CTRL0);
+}
+
static void dwmac4_dump_regs(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -126,6 +137,65 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg_n));
}
+static void dwmac4_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state.
+ */
+ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+ value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+
+ if (en_tx_lpi_clockgating)
+ value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+
+ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+ value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ if (link)
+ value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
+}
+
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -392,12 +462,17 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
static const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.rx_ipc = dwmac4_rx_ipc_enable,
+ .rx_queue_enable = dwmac4_rx_queue_enable,
.dump_regs = dwmac4_dump_regs,
.host_irq_status = dwmac4_irq_status,
.flow_ctrl = dwmac4_flow_ctrl,
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
+ .set_eee_mode = dwmac4_set_eee_mode,
+ .reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_timer = dwmac4_set_eee_timer,
+ .set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
.pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 8816515e1bbb..843ec69222ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -103,7 +103,7 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->rx_mii++;
if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
- x->rx_crc++;
+ x->rx_crc_errors++;
stats->rx_crc_errors++;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 8196ab5fc33c..377d1b44d4f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -303,6 +303,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
dma_cap->number_tx_channel =
((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
+ /* TX and RX number of queues */
+ dma_cap->number_rx_queues =
+ ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
+ dma_cap->number_tx_queues =
+ ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
/* IEEE 1588-2002 */
dma_cap->time_stamp = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 726d9d9aaf83..56e485f79077 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 84e3e84cec7d..e60bfca2a763 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -10,10 +10,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -21,6 +17,7 @@
*******************************************************************************/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include "common.h"
#include "dwmac_dma.h"
@@ -29,19 +26,16 @@
int dwmac_dma_reset(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
- int limit;
+ int err;
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
- break;
- mdelay(10);
- }
- if (limit < 0)
+ err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 100000, 10000);
+ if (err)
return -EBUSY;
return 0;
@@ -102,7 +96,7 @@ static void show_tx_process_state(unsigned int status)
pr_debug("- TX (Stopped): Reset or Stop command\n");
break;
case 1:
- pr_debug("- TX (Running):Fetching the Tx desc\n");
+ pr_debug("- TX (Running): Fetching the Tx desc\n");
break;
case 2:
pr_debug("- TX (Running): Waiting for end of tx\n");
@@ -136,7 +130,7 @@ static void show_rx_process_state(unsigned int status)
pr_debug("- RX (Running): Fetching the Rx desc\n");
break;
case 2:
- pr_debug("- RX (Running):Checking for end of pkt\n");
+ pr_debug("- RX (Running): Checking for end of pkt\n");
break;
case 3:
pr_debug("- RX (Running): Waiting for Rx pkt\n");
@@ -246,7 +240,7 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned long data;
data = (addr[5] << 8) | addr[4];
- /* For MAC Addr registers se have to set the Address Enable (AE)
+ /* For MAC Addr registers we have to set the Address Enable (AE)
* bit that has no effect on the High Reg 0 where the bit 31 (MO)
* is RO.
*/
@@ -261,9 +255,9 @@ void stmmac_set_mac(void __iomem *ioaddr, bool enable)
u32 value = readl(ioaddr + MAC_CTRL_REG);
if (enable)
- value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+ value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
else
- value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
writel(value, ioaddr + MAC_CTRL_REG);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index f0d86321dfe2..323b59ec74a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -225,7 +221,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->rx_mii++;
if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
- x->rx_crc++;
+ x->rx_crc_errors++;
stats->rx_crc_errors++;
}
ret = discard_frame;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 38a1a5603293..c037326331f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index ce9aa792857b..e9b04c28980f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index fd78406e2e9a..efb818ebd55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -115,7 +111,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
stats->collisions++;
}
if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
- x->rx_crc++;
+ x->rx_crc_errors++;
stats->rx_crc_errors++;
}
ret = discard_frame;
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 9983ce9bd90d..452f256ff03f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -16,10 +16,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index eab04aeeeb95..cd8fb619b1e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -10,10 +10,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -106,9 +102,6 @@ struct stmmac_priv {
u32 msg_enable;
int wolopts;
int wol_irq;
- struct clk *stmmac_clk;
- struct clk *pclk;
- struct reset_control *stmmac_rst;
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
@@ -120,8 +113,6 @@ struct stmmac_priv {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
- struct clk *clk_ptp_ref;
- unsigned int clk_ptp_rate;
u32 adv_ts;
int use_riwt;
int irq_wake;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 699ee1d30426..5ff6bc4eb8f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -65,7 +61,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(overflow_error),
STMMAC_STAT(ipc_csum_error),
STMMAC_STAT(rx_collision),
- STMMAC_STAT(rx_crc),
+ STMMAC_STAT(rx_crc_errors),
STMMAC_STAT(dribbling_bit),
STMMAC_STAT(rx_length),
STMMAC_STAT(rx_mii),
@@ -446,24 +442,24 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
memset(reg_space, 0x0, REG_SPACE_SIZE);
- if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) {
+ if (priv->plat->has_gmac || priv->plat->has_gmac4) {
/* MAC registers */
- for (i = 0; i < 12; i++)
+ for (i = 0; i < 55; i++)
reg_space[i] = readl(priv->ioaddr + (i * 4));
/* DMA registers */
- for (i = 0; i < 9; i++)
- reg_space[i + 12] =
+ for (i = 0; i < 22; i++)
+ reg_space[i + 55] =
readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
- reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
- reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
} else {
/* MAC registers */
- for (i = 0; i < 55; i++)
+ for (i = 0; i < 12; i++)
reg_space[i] = readl(priv->ioaddr + (i * 4));
/* DMA registers */
- for (i = 0; i < 22; i++)
- reg_space[i + 55] =
+ for (i = 0; i < 9; i++)
+ reg_space[i + 12] =
readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
}
}
@@ -712,7 +708,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
{
- unsigned long clk = clk_get_rate(priv->stmmac_clk);
+ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
if (!clk)
return 0;
@@ -722,7 +718,7 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
{
- unsigned long clk = clk_get_rate(priv->stmmac_clk);
+ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
if (!clk)
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 10d6059b2f26..721b61655261 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a276a32d57f2..3cbe09682afe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -13,10 +13,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -158,7 +154,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
u32 clk_rate;
- clk_rate = clk_get_rate(priv->stmmac_clk);
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
/* Platform provided default clk_csr would be assumed valid
* for all other cases except for the below mentioned ones.
@@ -191,7 +187,7 @@ static void print_pkt(unsigned char *buf, int len)
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
{
- unsigned avail;
+ u32 avail;
if (priv->dirty_tx > priv->cur_tx)
avail = priv->dirty_tx - priv->cur_tx - 1;
@@ -203,7 +199,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
{
- unsigned dirty;
+ u32 dirty;
if (priv->dirty_rx <= priv->cur_rx)
dirty = priv->cur_rx - priv->dirty_rx;
@@ -216,7 +212,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
/**
* stmmac_hw_fix_mac_speed - callback for speed selection
* @priv: driver private structure
- * Description: on some platforms (e.g. ST), some HW system configuraton
+ * Description: on some platforms (e.g. ST), some HW system configuration
* registers have to be set according to the link speed negotiated.
*/
static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
@@ -239,7 +235,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if ((priv->dirty_tx == priv->cur_tx) &&
(priv->tx_path_in_lpi_mode == false))
- priv->hw->mac->set_eee_mode(priv->hw);
+ priv->hw->mac->set_eee_mode(priv->hw,
+ priv->plat->en_tx_lpi_clockgating);
}
/**
@@ -415,7 +412,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_hwtstamp_ioctl - control hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specefic structure, that can contain a pointer to
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* Description:
* This function configures the MAC to enable/disable both outgoing(TX)
@@ -606,7 +603,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* program Sub Second Increment reg */
sec_inc = priv->hw->ptp->config_sub_second_increment(
- priv->ptpaddr, priv->clk_ptp_rate,
+ priv->ptpaddr, priv->plat->clk_ptp_rate,
priv->plat->has_gmac4);
temp = div_u64(1000000000ULL, sec_inc);
@@ -616,7 +613,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
* where, freq_div_ratio = 1e9ns/sec_inc
*/
temp = (u64)(temp << 32);
- priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
priv->hw->ptp->config_addend(priv->ptpaddr,
priv->default_addend);
@@ -644,18 +641,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- /* Fall-back to main clock in case of no PTP ref is passed */
- priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
- if (IS_ERR(priv->clk_ptp_ref)) {
- priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
- priv->clk_ptp_ref = NULL;
- netdev_dbg(priv->dev, "PTP uses main clock\n");
- } else {
- clk_prepare_enable(priv->clk_ptp_ref);
- priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
- netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate);
- }
-
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x core */
if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
@@ -682,8 +667,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- if (priv->clk_ptp_ref)
- clk_disable_unprepare(priv->clk_ptp_ref);
+ if (priv->plat->clk_ptp_ref)
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
}
@@ -704,7 +689,7 @@ static void stmmac_adjust_link(struct net_device *dev)
int new_state = 0;
unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
- if (phydev == NULL)
+ if (!phydev)
return;
spin_lock_irqsave(&priv->lock, flags);
@@ -731,33 +716,36 @@ static void stmmac_adjust_link(struct net_device *dev)
new_state = 1;
switch (phydev->speed) {
case 1000:
- if (likely((priv->plat->has_gmac) ||
- (priv->plat->has_gmac4)))
+ if (priv->plat->has_gmac ||
+ priv->plat->has_gmac4)
ctrl &= ~priv->hw->link.port;
- stmmac_hw_fix_mac_speed(priv);
break;
case 100:
+ if (priv->plat->has_gmac ||
+ priv->plat->has_gmac4) {
+ ctrl |= priv->hw->link.port;
+ ctrl |= priv->hw->link.speed;
+ } else {
+ ctrl &= ~priv->hw->link.port;
+ }
+ break;
case 10:
- if (likely((priv->plat->has_gmac) ||
- (priv->plat->has_gmac4))) {
+ if (priv->plat->has_gmac ||
+ priv->plat->has_gmac4) {
ctrl |= priv->hw->link.port;
- if (phydev->speed == SPEED_100) {
- ctrl |= priv->hw->link.speed;
- } else {
- ctrl &= ~(priv->hw->link.speed);
- }
+ ctrl &= ~(priv->hw->link.speed);
} else {
ctrl &= ~priv->hw->link.port;
}
- stmmac_hw_fix_mac_speed(priv);
break;
default:
netif_warn(priv, link, priv->dev,
- "Speed (%d) not 10/100\n",
- phydev->speed);
+ "broken speed: %d\n", phydev->speed);
+ phydev->speed = SPEED_UNKNOWN;
break;
}
-
+ if (phydev->speed != SPEED_UNKNOWN)
+ stmmac_hw_fix_mac_speed(priv);
priv->speed = phydev->speed;
}
@@ -770,8 +758,8 @@ static void stmmac_adjust_link(struct net_device *dev)
} else if (priv->oldlink) {
new_state = 1;
priv->oldlink = 0;
- priv->speed = 0;
- priv->oldduplex = -1;
+ priv->speed = SPEED_UNKNOWN;
+ priv->oldduplex = DUPLEX_UNKNOWN;
}
if (new_state && netif_msg_link(priv))
@@ -833,8 +821,8 @@ static int stmmac_init_phy(struct net_device *dev)
int interface = priv->plat->interface;
int max_speed = priv->plat->max_speed;
priv->oldlink = 0;
- priv->speed = 0;
- priv->oldduplex = -1;
+ priv->speed = SPEED_UNKNOWN;
+ priv->oldduplex = DUPLEX_UNKNOWN;
if (priv->plat->phy_node) {
phydev = of_phy_connect(dev, priv->plat->phy_node,
@@ -886,9 +874,7 @@ static int stmmac_init_phy(struct net_device *dev)
if (phydev->is_pseudo_fixed_link)
phydev->irq = PHY_POLL;
- netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
- __func__, phydev->phy_id, phydev->link);
-
+ phy_attached_info(phydev);
return 0;
}
@@ -1014,7 +1000,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
* @dev: net device structure
* @flags: gfp flag.
* Description: this function initializes the DMA RX/TX descriptors
- * and allocates the socket buffers. It suppors the chained and ring
+ * and allocates the socket buffers. It supports the chained and ring
* modes.
*/
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
@@ -1127,13 +1113,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
int i;
for (i = 0; i < DMA_TX_SIZE; i++) {
- struct dma_desc *p;
-
- if (priv->extend_desc)
- p = &((priv->dma_etx + i)->basic);
- else
- p = priv->dma_tx + i;
-
if (priv->tx_skbuff_dma[i].buf) {
if (priv->tx_skbuff_dma[i].map_as_page)
dma_unmap_page(priv->device,
@@ -1147,7 +1126,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
DMA_TO_DEVICE);
}
- if (priv->tx_skbuff[i] != NULL) {
+ if (priv->tx_skbuff[i]) {
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
priv->tx_skbuff_dma[i].buf = 0;
@@ -1271,6 +1250,28 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
}
/**
+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues
+ * @priv: driver private structure
+ * Description: It is used for enabling the rx queues in the MAC
+ */
+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+{
+ int rx_count = priv->dma_cap.number_rx_queues;
+ int queue = 0;
+
+ /* If GMAC does not have multiple queues, then this is not necessary*/
+ if (rx_count == 1)
+ return;
+
+ /**
+ * If the core is synthesized with multiple rx queues / multiple
+ * dma channels, then rx queues will be disabled by default.
+ * For now only rx queue 0 is enabled.
+ */
+ priv->hw->mac->rx_queue_enable(priv->hw, queue);
+}
+
+/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv: driver private structure
* Description: it is used for configuring the DMA operation mode register in
@@ -1671,10 +1672,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Copy the MAC addr into the HW */
priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
- /* If required, perform hw setup of the bus. */
- if (priv->plat->bus_setup)
- priv->plat->bus_setup(priv->ioaddr);
-
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
int speed = priv->plat->mac_port_sel_speed;
@@ -1691,6 +1688,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
+ /* Initialize MAC RX Queues */
+ if (priv->hw->mac->rx_queue_enable)
+ stmmac_mac_enable_rx_queues(priv);
+
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
@@ -1711,8 +1712,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
- if (ret)
- netdev_warn(priv->dev, "fail to init PTP.\n");
+ if (ret == -EOPNOTSUPP)
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
}
#ifdef CONFIG_DEBUG_FS
@@ -2519,7 +2522,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
if (priv->hwts_rx_en && !priv->extend_desc) {
- /* DESC2 & DESC3 will be overwitten by device
+ /* DESC2 & DESC3 will be overwritten by device
* with timestamp value, hence reinitialize
* them in stmmac_rx_refill() function so that
* device can reuse it.
@@ -2542,7 +2545,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
- /* If frame length is greather than skb buffer size
+ /* If frame length is greater than skb buffer size
* (preallocated during init) then the packet is
* ignored
*/
@@ -2669,7 +2672,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
work_done = stmmac_rx(priv, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
stmmac_enable_dma_irq(priv);
}
return work_done;
@@ -2748,7 +2751,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
/* Some GMAC devices have a bugged Jumbo frame support that
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
- * the TX csum insertionin the TDES and not use SF.
+ * the TX csum insertion in the TDES and not use SF.
*/
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_CSUM_MASK;
@@ -2894,9 +2897,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
struct dma_desc *p = (struct dma_desc *)head;
for (i = 0; i < size; i++) {
- u64 x;
if (extend_desc) {
- x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
le32_to_cpu(ep->basic.des0),
@@ -2905,7 +2906,6 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
le32_to_cpu(ep->basic.des3));
ep++;
} else {
- x = *(u64 *) p;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
@@ -2975,7 +2975,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
(priv->dma_cap.hash_filter) ? "Y" : "N");
seq_printf(seq, "\tMultiple MAC address registers: %s\n",
(priv->dma_cap.multi_addr) ? "Y" : "N");
- seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
+ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
(priv->dma_cap.pcs) ? "Y" : "N");
seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
(priv->dma_cap.sma_mdio) ? "Y" : "N");
@@ -3251,44 +3251,8 @@ int stmmac_dvr_probe(struct device *device,
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
- priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
- if (IS_ERR(priv->stmmac_clk)) {
- netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
- __func__);
- /* If failed to obtain stmmac_clk and specific clk_csr value
- * is NOT passed from the platform, probe fail.
- */
- if (!priv->plat->clk_csr) {
- ret = PTR_ERR(priv->stmmac_clk);
- goto error_clk_get;
- } else {
- priv->stmmac_clk = NULL;
- }
- }
- clk_prepare_enable(priv->stmmac_clk);
-
- priv->pclk = devm_clk_get(priv->device, "pclk");
- if (IS_ERR(priv->pclk)) {
- if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto error_pclk_get;
- }
- priv->pclk = NULL;
- }
- clk_prepare_enable(priv->pclk);
-
- priv->stmmac_rst = devm_reset_control_get(priv->device,
- STMMAC_RESOURCE_NAME);
- if (IS_ERR(priv->stmmac_rst)) {
- if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto error_hw_init;
- }
- dev_info(priv->device, "no reset control found\n");
- priv->stmmac_rst = NULL;
- }
- if (priv->stmmac_rst)
- reset_control_deassert(priv->stmmac_rst);
+ if (priv->plat->stmmac_rst)
+ reset_control_deassert(priv->plat->stmmac_rst);
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
@@ -3326,9 +3290,9 @@ int stmmac_dvr_probe(struct device *device,
(priv->plat->maxmtu >= ndev->min_mtu))
ndev->max_mtu = priv->plat->maxmtu;
else if (priv->plat->maxmtu < ndev->min_mtu)
- netdev_warn(priv->dev,
- "%s: warning: maxmtu having invalid value (%d)\n",
- __func__, priv->plat->maxmtu);
+ dev_warn(priv->device,
+ "%s: warning: maxmtu having invalid value (%d)\n",
+ __func__, priv->plat->maxmtu);
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -3340,7 +3304,8 @@ int stmmac_dvr_probe(struct device *device,
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
- netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
}
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
@@ -3366,17 +3331,17 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- netdev_err(priv->dev,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err(priv->device,
+ "%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
ret = register_netdev(ndev);
if (ret) {
- netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
- __func__, ret);
+ dev_err(priv->device, "%s: ERROR %i registering the device\n",
+ __func__, ret);
goto error_netdev_register;
}
@@ -3390,10 +3355,6 @@ error_netdev_register:
error_mdio_register:
netif_napi_del(&priv->napi);
error_hw_init:
- clk_disable_unprepare(priv->pclk);
-error_pclk_get:
- clk_disable_unprepare(priv->stmmac_clk);
-error_clk_get:
free_netdev(ndev);
return ret;
@@ -3419,10 +3380,10 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
- if (priv->stmmac_rst)
- reset_control_assert(priv->stmmac_rst);
- clk_disable_unprepare(priv->pclk);
- clk_disable_unprepare(priv->stmmac_clk);
+ if (priv->plat->stmmac_rst)
+ reset_control_assert(priv->plat->stmmac_rst);
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
@@ -3471,14 +3432,14 @@ int stmmac_suspend(struct device *dev)
stmmac_set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- clk_disable(priv->pclk);
- clk_disable(priv->stmmac_clk);
+ clk_disable(priv->plat->pclk);
+ clk_disable(priv->plat->stmmac_clk);
}
spin_unlock_irqrestore(&priv->lock, flags);
priv->oldlink = 0;
- priv->speed = 0;
- priv->oldduplex = -1;
+ priv->speed = SPEED_UNKNOWN;
+ priv->oldduplex = DUPLEX_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -3511,9 +3472,9 @@ int stmmac_resume(struct device *dev)
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
- /* enable the clk prevously disabled */
- clk_enable(priv->stmmac_clk);
- clk_enable(priv->pclk);
+ /* enable the clk previously disabled */
+ clk_enable(priv->plat->stmmac_clk);
+ clk_enable(priv->plat->pclk);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b0344c213752..db157a47000c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -13,10 +13,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -24,13 +20,14 @@
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
-#include <asm/io.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
#include "stmmac.h"
@@ -42,22 +39,6 @@
#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
-static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
-{
- unsigned long curr;
- unsigned long finish = jiffies + 3 * HZ;
-
- do {
- curr = jiffies;
- if (readl(ioaddr + mii_addr) & MII_BUSY)
- cpu_relax();
- else
- return 0;
- } while (!time_after_eq(curr, finish));
-
- return -EBUSY;
-}
-
/**
* stmmac_mdio_read
* @bus: points to the mii_bus structure
@@ -74,7 +55,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
-
+ u32 v;
int data;
u32 value = MII_BUSY;
@@ -86,12 +67,14 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
if (priv->plat->has_gmac4)
value |= MII_GMAC4_READ;
- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
return -EBUSY;
writel(value, priv->ioaddr + mii_address);
- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
return -EBUSY;
/* Read the data from the MII data register */
@@ -115,7 +98,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
-
+ u32 v;
u32 value = MII_BUSY;
value |= (phyaddr << priv->hw->mii.addr_shift)
@@ -130,7 +113,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
value |= MII_WRITE;
/* Wait until any existing MII operation is complete */
- if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
return -EBUSY;
/* Set the MII address register to write */
@@ -138,7 +122,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
writel(value, priv->ioaddr + mii_address);
/* Wait until any existing MII operation is complete */
- return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+ return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000);
}
/**
@@ -156,9 +141,9 @@ int stmmac_mdio_reset(struct mii_bus *bus)
#ifdef CONFIG_OF
if (priv->device->of_node) {
-
if (data->reset_gpio < 0) {
struct device_node *np = priv->device->of_node;
+
if (!np)
return 0;
@@ -198,7 +183,7 @@ int stmmac_mdio_reset(struct mii_bus *bus)
/* This is a workaround for problems with the STE101P PHY.
* It doesn't complete its reset until at least one clock cycle
- * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
+ * on MDC, so perform a dummy mdio read. To be updated for GMAC4
* if needed.
*/
if (!priv->plat->has_gmac4)
@@ -225,7 +210,7 @@ int stmmac_mdio_register(struct net_device *ndev)
return 0;
new_bus = mdiobus_alloc();
- if (new_bus == NULL)
+ if (!new_bus)
return -ENOMEM;
if (mdio_bus_data->irqs)
@@ -262,49 +247,48 @@ int stmmac_mdio_register(struct net_device *ndev)
found = 0;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
- if (phydev) {
- int act = 0;
- char irq_num[4];
- char *irq_str;
-
- /*
- * If an IRQ was provided to be assigned after
- * the bus probe, do it here.
- */
- if ((mdio_bus_data->irqs == NULL) &&
- (mdio_bus_data->probed_phy_irq > 0)) {
- new_bus->irq[addr] =
- mdio_bus_data->probed_phy_irq;
- phydev->irq = mdio_bus_data->probed_phy_irq;
- }
-
- /*
- * If we're going to bind the MAC to this PHY bus,
- * and no PHY number was provided to the MAC,
- * use the one probed here.
- */
- if (priv->plat->phy_addr == -1)
- priv->plat->phy_addr = addr;
-
- act = (priv->plat->phy_addr == addr);
- switch (phydev->irq) {
- case PHY_POLL:
- irq_str = "POLL";
- break;
- case PHY_IGNORE_INTERRUPT:
- irq_str = "IGNORE";
- break;
- default:
- sprintf(irq_num, "%d", phydev->irq);
- irq_str = irq_num;
- break;
- }
- netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
- phydev->phy_id, addr,
- irq_str, phydev_name(phydev),
- act ? " active" : "");
- found = 1;
+ int act = 0;
+ char irq_num[4];
+ char *irq_str;
+
+ if (!phydev)
+ continue;
+
+ /*
+ * If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if (!mdio_bus_data->irqs &&
+ (mdio_bus_data->probed_phy_irq > 0)) {
+ new_bus->irq[addr] = mdio_bus_data->probed_phy_irq;
+ phydev->irq = mdio_bus_data->probed_phy_irq;
+ }
+
+ /*
+ * If we're going to bind the MAC to this PHY bus,
+ * and no PHY number was provided to the MAC,
+ * use the one probed here.
+ */
+ if (priv->plat->phy_addr == -1)
+ priv->plat->phy_addr = addr;
+
+ act = (priv->plat->phy_addr == addr);
+ switch (phydev->irq) {
+ case PHY_POLL:
+ irq_str = "POLL";
+ break;
+ case PHY_IGNORE_INTERRUPT:
+ irq_str = "IGNORE";
+ break;
+ default:
+ sprintf(irq_num, "%d", phydev->irq);
+ irq_str = irq_num;
+ break;
}
+ netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
+ phydev->phy_id, addr, irq_str, phydev_name(phydev),
+ act ? " active" : "");
+ found = 1;
}
if (!found && !mdio_node) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3da4737620cb..5c9e462276b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 082cd48db6a7..433a84239a68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -121,7 +117,6 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
- axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all");
axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
@@ -181,10 +176,19 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
mdio = false;
}
- /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
- for_each_child_of_node(np, plat->mdio_node) {
- if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
- break;
+ /* exception for dwmac-dwc-qos-eth glue logic */
+ if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ } else {
+ /**
+ * If snps,dwmac-mdio is passed from DT, always register
+ * the MDIO
+ */
+ for_each_child_of_node(np, plat->mdio_node) {
+ if (of_device_is_compatible(plat->mdio_node,
+ "snps,dwmac-mdio"))
+ break;
+ }
}
if (plat->mdio_node) {
@@ -249,6 +253,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
+ plat->en_tx_lpi_clockgating =
+ of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
+
/* Set the maxmtu to a default of JUMBO_LEN in case the
* parameter is not present in the device tree.
*/
@@ -333,7 +340,54 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->axi = stmmac_axi_setup(pdev);
+ /* clock setup */
+ plat->stmmac_clk = devm_clk_get(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Cannot get CSR clock\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
+
+ plat->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(plat->pclk)) {
+ if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
+ goto error_pclk_get;
+
+ plat->pclk = NULL;
+ }
+ clk_prepare_enable(plat->pclk);
+
+ /* Fall-back to main clock in case of no PTP ref is passed */
+ plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
+ if (IS_ERR(plat->clk_ptp_ref)) {
+ plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
+ plat->clk_ptp_ref = NULL;
+ dev_warn(&pdev->dev, "PTP uses main clock\n");
+ } else {
+ clk_prepare_enable(plat->clk_ptp_ref);
+ plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
+ dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ }
+
+ plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_rst)) {
+ if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
+ goto error_hw_init;
+
+ dev_info(&pdev->dev, "no reset control found\n");
+ plat->stmmac_rst = NULL;
+ }
+
return plat;
+
+error_hw_init:
+ clk_disable_unprepare(plat->pclk);
+error_pclk_get:
+ clk_disable_unprepare(plat->stmmac_clk);
+
+ return ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -351,12 +405,13 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(plat->phy_node);
+ of_node_put(plat->mdio_node);
}
#else
struct plat_stmmacenet_data *
stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
void stmmac_remove_config_dt(struct platform_device *pdev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 3eb281d1db08..d71bd80c5b5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index c06938c47af5..48fb72fc423c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -12,10 +12,6 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index a4b40e3015e5..b2caf5132bd2 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -70,19 +70,23 @@ config CASSINI
<http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>.
config SUNVNET_COMMON
- bool
+ tristate "Common routines to support Sun Virtual Networking"
depends on SUN_LDOMS
- default y if SUN_LDOMS
+ default m
config SUNVNET
tristate "Sun Virtual Network support"
+ default m
depends on SUN_LDOMS
+ depends on SUNVNET_COMMON
---help---
Support for virtual network devices under Sun Logical Domains.
config LDMVSW
tristate "Sun4v LDoms Virtual Switch support"
+ default m
depends on SUN_LDOMS
+ depends on SUNVNET_COMMON
---help---
Support for virtual switch devices under Sun4v Logical Domains.
This driver adds a network interface for every vsw-port node
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 335b87660638..89952deae47f 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -41,11 +41,11 @@
static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
#define DRV_MODULE_NAME "ldmvsw"
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "Jan 15, 2016"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "February 3, 2017"
static char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+ DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
MODULE_AUTHOR("Oracle");
MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver");
MODULE_LICENSE("GPL");
@@ -234,8 +234,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
dev->ethtool_ops = &vsw_ethtool_ops;
dev->watchdog_timeo = VSW_TX_TIMEOUT;
- dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
- NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
/* MTU range: 68 - 65535 */
@@ -259,11 +258,6 @@ static struct vio_driver_ops vsw_vio_ops = {
.handshake_complete = sunvnet_handshake_complete_common,
};
-static void print_version(void)
-{
- printk_once(KERN_INFO "%s", version);
-}
-
static const char *remote_macaddr_prop = "remote-mac-address";
static const char *id_prop = "id";
@@ -279,8 +273,6 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
const u64 *port_id;
u64 handle;
- print_version();
-
hp = mdesc_grab();
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
@@ -327,7 +319,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
port->vp = vp;
port->dev = dev;
port->switch_port = 1;
- port->tso = true;
+ port->tso = false; /* no tso in vsw, misbehaves in bridge */
port->tsolen = 0;
/* Mark the port as belonging to ldmvsw which directs the
@@ -457,6 +449,7 @@ static struct vio_driver vsw_port_driver = {
static int __init vsw_init(void)
{
+ pr_info("%s\n", version);
return vio_register_driver(&vsw_port_driver);
}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f90d1af6d390..57978056b336 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
work_done = niu_poll_core(np, lp, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
niu_ldg_rearm(np, lp, 1);
}
return work_done;
@@ -6294,8 +6294,8 @@ no_rings:
stats->tx_errors = errors;
}
-static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void niu_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct niu *np = netdev_priv(dev);
@@ -6303,8 +6303,6 @@ static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
niu_get_rx_stats(np, stats);
niu_get_tx_stats(np, stats);
}
-
- return stats;
}
static void niu_load_hash_xmac(struct niu *np, u16 *hash)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index d277e4107976..5c5952e782cd 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
gp->status = readl(gp->regs + GREG_STAT);
} while (gp->status & GREG_STAT_NAPI);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
gem_enable_ints(gp);
return work_done;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 5356a7074796..4cc2571f71c6 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -38,11 +38,11 @@
#define VNET_TX_TIMEOUT (5 * HZ)
#define DRV_MODULE_NAME "sunvnet"
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "June 25, 2007"
+#define DRV_MODULE_VERSION "2.0"
+#define DRV_MODULE_RELDATE "February 3, 2017"
static char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+ DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL");
@@ -303,11 +303,6 @@ static struct vio_driver_ops vnet_vio_ops = {
.handshake_complete = sunvnet_handshake_complete_common,
};
-static void print_version(void)
-{
- printk_once(KERN_INFO "%s", version);
-}
-
const char *remote_macaddr_prop = "remote-mac-address";
static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -319,8 +314,6 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
const u64 *rmac;
int len, i, err, switch_port;
- print_version();
-
hp = mdesc_grab();
vp = vnet_find_parent(hp, vdev->mp, vdev);
@@ -446,6 +439,7 @@ static struct vio_driver vnet_port_driver = {
static int __init vnet_init(void)
{
+ pr_info("%s\n", version);
return vio_register_driver(&vnet_port_driver);
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8878b75d68b4..fa2d11ca9b81 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -37,6 +37,11 @@
*/
#define VNET_MAX_RETRIES 10
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Sun LDOM virtual network support library");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1");
+
static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
static void vnet_port_reset(struct vnet_port *port);
@@ -181,6 +186,7 @@ static int handle_attr_info(struct vio_driver_state *vio,
} else {
pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
pkt->ipv4_lso_maxlen = 0;
+ port->tsolen = 0;
}
/* for version >= 1.6, ACK packet mode we support */
@@ -714,12 +720,8 @@ static void maybe_tx_wakeup(struct vnet_port *port)
txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
port->q_index);
__netif_tx_lock(txq, smp_processor_id());
- if (likely(netif_tx_queue_stopped(txq))) {
- struct vio_dring_state *dr;
-
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (likely(netif_tx_queue_stopped(txq)))
netif_tx_wake_queue(txq);
- }
__netif_tx_unlock(txq);
}
@@ -737,41 +739,37 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
struct vio_driver_state *vio = &port->vio;
int tx_wakeup, err;
int npkts = 0;
- int event = (port->rx_event & LDC_EVENT_RESET);
-
-ldc_ctrl:
- if (unlikely(event == LDC_EVENT_RESET ||
- event == LDC_EVENT_UP)) {
- vio_link_state_change(vio, event);
-
- if (event == LDC_EVENT_RESET) {
- vnet_port_reset(port);
- vio_port_up(vio);
-
- /* If the device is running but its tx queue was
- * stopped (due to flow control), restart it.
- * This is necessary since vnet_port_reset()
- * clears the tx drings and thus we may never get
- * back a VIO_TYPE_DATA ACK packet - which is
- * the normal mechanism to restart the tx queue.
- */
- if (netif_running(dev))
- maybe_tx_wakeup(port);
- }
+
+ /* we don't expect any other bits */
+ BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY |
+ LDC_EVENT_RESET |
+ LDC_EVENT_UP));
+
+ /* RESET takes precedent over any other event */
+ if (port->rx_event & LDC_EVENT_RESET) {
+ vio_link_state_change(vio, LDC_EVENT_RESET);
+ vnet_port_reset(port);
+ vio_port_up(vio);
+
+ /* If the device is running but its tx queue was
+ * stopped (due to flow control), restart it.
+ * This is necessary since vnet_port_reset()
+ * clears the tx drings and thus we may never get
+ * back a VIO_TYPE_DATA ACK packet - which is
+ * the normal mechanism to restart the tx queue.
+ */
+ if (netif_running(dev))
+ maybe_tx_wakeup(port);
+
port->rx_event = 0;
return 0;
}
- /* We may have multiple LDC events in rx_event. Unroll send_events() */
- event = (port->rx_event & LDC_EVENT_UP);
- port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP);
- if (event == LDC_EVENT_UP)
- goto ldc_ctrl;
- event = port->rx_event;
- if (!(event & LDC_EVENT_DATA_READY))
- return 0;
- /* we dont expect any other bits than RESET, UP, DATA_READY */
- BUG_ON(event != LDC_EVENT_DATA_READY);
+ if (port->rx_event & LDC_EVENT_UP) {
+ vio_link_state_change(vio, LDC_EVENT_UP);
+ port->rx_event = 0;
+ return 0;
+ }
err = 0;
tx_wakeup = 0;
@@ -794,25 +792,25 @@ ldc_ctrl:
pkt->start_idx = vio_dring_next(dr,
port->napi_stop_idx);
pkt->end_idx = -1;
- goto napi_resume;
- }
- err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
- if (unlikely(err < 0)) {
- if (err == -ECONNRESET)
- vio_conn_reset(vio);
- break;
+ } else {
+ err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+ if (unlikely(err < 0)) {
+ if (err == -ECONNRESET)
+ vio_conn_reset(vio);
+ break;
+ }
+ if (err == 0)
+ break;
+ viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+ msgbuf.tag.type,
+ msgbuf.tag.stype,
+ msgbuf.tag.stype_env,
+ msgbuf.tag.sid);
+ err = vio_validate_sid(vio, &msgbuf.tag);
+ if (err < 0)
+ break;
}
- if (err == 0)
- break;
- viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
- msgbuf.tag.type,
- msgbuf.tag.stype,
- msgbuf.tag.stype_env,
- msgbuf.tag.sid);
- err = vio_validate_sid(vio, &msgbuf.tag);
- if (err < 0)
- break;
-napi_resume:
+
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
if (!sunvnet_port_is_up_common(port)) {
@@ -860,7 +858,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget)
int processed = vnet_event_napi(port, budget);
if (processed < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, processed);
port->rx_event &= ~LDC_EVENT_DATA_READY;
vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
}
@@ -1256,10 +1254,8 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
rcu_read_lock();
port = vnet_tx_port(skb, dev);
- if (unlikely(!port)) {
- rcu_read_unlock();
+ if (unlikely(!port))
goto out_dropped;
- }
if (skb_is_gso(skb) && skb->len > port->tsolen) {
err = vnet_handle_offloads(port, skb, vnet_tx_port);
@@ -1284,7 +1280,6 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
fl4.saddr = ip_hdr(skb)->saddr;
rt = ip_route_output_key(dev_net(dev), &fl4);
- rcu_read_unlock();
if (!IS_ERR(rt)) {
skb_dst_set(skb, &rt->dst);
icmp_send(skb, ICMP_DEST_UNREACH,
@@ -1426,6 +1421,7 @@ ldc_start_done:
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
netif_tx_stop_queue(txq);
+ smp_rmb();
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
netif_tx_wake_queue(txq);
}
@@ -1443,8 +1439,7 @@ out_dropped:
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
del_timer(&port->clean_timer);
- if (port)
- rcu_read_unlock();
+ rcu_read_unlock();
if (skb)
dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
@@ -1641,7 +1636,7 @@ static void vnet_port_reset(struct vnet_port *port)
del_timer(&port->clean_timer);
sunvnet_port_free_tx_bufs_common(port);
port->rmtu = 0;
- port->tso = true;
+ port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
port->tsolen = 0;
}
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
deleted file mode 100644
index 8276ee5a7d54..000000000000
--- a/drivers/net/ethernet/synopsys/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Synopsys network device configuration
-#
-
-config NET_VENDOR_SYNOPSYS
- bool "Synopsys devices"
- default y
- ---help---
- If you have a network (Ethernet) device belonging to this class, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Synopsys devices. If you say Y, you will be asked
- for your specific device in the following questions.
-
-if NET_VENDOR_SYNOPSYS
-
-config SYNOPSYS_DWC_ETH_QOS
- tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
- select PHYLIB
- select CRC32
- select MII
- depends on OF && HAS_DMA
- ---help---
- This driver supports the DWC Ethernet QoS from Synopsys
-
-endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
deleted file mode 100644
index 7a375723fc18..000000000000
--- a/drivers/net/ethernet/synopsys/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Synopsys network device drivers.
-#
-
-obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
deleted file mode 100644
index 09f5a67da35e..000000000000
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ /dev/null
@@ -1,2998 +0,0 @@
-/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
- *
- * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
- * This version introduced a lot of changes which breaks backwards
- * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
- * Some fields differ between version 4.00a and 4.10a, mainly the interrupt
- * bit fields. The driver could be made compatible with 4.00, if all relevant
- * HW erratas are handled.
- *
- * The GMAC is highly configurable at synthesis time. This driver has been
- * developed for a subset of the total available feature set. Currently
- * it supports:
- * - TSO
- * - Checksum offload for RX and TX.
- * - Energy efficient ethernet.
- * - GMII phy interface.
- * - The statistics module.
- * - Single RX and TX queue.
- *
- * Copyright (C) 2015 Axis Communications AB.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ethtool.h>
-#include <linux/stat.h>
-#include <linux/types.h>
-
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-
-#include <linux/phy.h>
-#include <linux/mii.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-
-#include <linux/device.h>
-#include <linux/bitrev.h>
-#include <linux/crc32.h>
-
-#include <linux/of.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
-#include <linux/net_tstamp.h>
-#include <linux/pm_runtime.h>
-#include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_mdio.h>
-#include <linux/timer.h>
-#include <linux/tcp.h>
-
-#define DRIVER_NAME "dwceqos"
-#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver"
-#define DRIVER_VERSION "0.9"
-
-#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
-
-#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
-
-#define DWCEQOS_LPI_TIMER_MIN 8
-#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1)
-
-#define DWCEQOS_RX_BUF_SIZE 2048
-
-#define DWCEQOS_RX_DCNT 256
-#define DWCEQOS_TX_DCNT 256
-
-#define DWCEQOS_HASH_TABLE_SIZE 64
-
-/* The size field in the DMA descriptor is 14 bits */
-#define BYTES_PER_DMA_DESC 16376
-
-/* Hardware registers */
-#define START_MAC_REG_OFFSET 0x0000
-#define MAX_MAC_REG_OFFSET 0x0bd0
-#define START_MTL_REG_OFFSET 0x0c00
-#define MAX_MTL_REG_OFFSET 0x0d7c
-#define START_DMA_REG_OFFSET 0x1000
-#define MAX_DMA_REG_OFFSET 0x117C
-
-#define REG_SPACE_SIZE 0x1800
-
-/* DMA */
-#define REG_DWCEQOS_DMA_MODE 0x1000
-#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004
-#define REG_DWCEQOS_DMA_IS 0x1008
-#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c
-
-/* DMA channel registers */
-#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100
-#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104
-#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108
-#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114
-#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c
-#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120
-#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128
-#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c
-#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130
-#define REG_DWCEQOS_DMA_CH0_IE 0x1134
-#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144
-#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c
-#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154
-#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c
-#define REG_DWCEQOS_DMA_CH0_STA 0x1160
-
-#define DWCEQOS_DMA_MODE_TXPR BIT(11)
-#define DWCEQOS_DMA_MODE_DA BIT(1)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31)
-#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0)
-#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
- (((x) << 16) & 0x000F0000)
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
- (((x) << 24) & 0x0F000000)
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
- (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1)
-
-#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16)
-#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18)
-
-#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16)
-#define DWCEQOS_DMA_CH_CTRL_START BIT(0)
-#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1)
-#define DWCEQOS_DMA_CH_TX_OSP BIT(4)
-#define DWCEQOS_DMA_CH_TX_TSE BIT(12)
-
-#define DWCEQOS_DMA_CH0_IE_NIE BIT(15)
-#define DWCEQOS_DMA_CH0_IE_AIE BIT(14)
-#define DWCEQOS_DMA_CH0_IE_RIE BIT(6)
-#define DWCEQOS_DMA_CH0_IE_TIE BIT(0)
-#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12)
-#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7)
-
-#define DWCEQOS_DMA_IS_DC0IS BIT(0)
-#define DWCEQOS_DMA_IS_MTLIS BIT(16)
-#define DWCEQOS_DMA_IS_MACIS BIT(17)
-
-#define DWCEQOS_DMA_CH0_IS_TI BIT(0)
-#define DWCEQOS_DMA_CH0_IS_RI BIT(6)
-#define DWCEQOS_DMA_CH0_IS_RBU BIT(7)
-#define DWCEQOS_DMA_CH0_IS_FBE BIT(12)
-#define DWCEQOS_DMA_CH0_IS_CDE BIT(13)
-#define DWCEQOS_DMA_CH0_IS_AIS BIT(14)
-
-#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16)
-#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16)
-#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17)
-
-#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19)
-#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19)
-#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20)
-
-/* DMA descriptor bits for RX normal descriptor (read format) */
-#define DWCEQOS_DMA_RDES3_OWN BIT(31)
-#define DWCEQOS_DMA_RDES3_INTE BIT(30)
-#define DWCEQOS_DMA_RDES3_BUF2V BIT(25)
-#define DWCEQOS_DMA_RDES3_BUF1V BIT(24)
-
-/* DMA descriptor bits for RX normal descriptor (write back format) */
-#define DWCEQOS_DMA_RDES1_IPCE BIT(7)
-#define DWCEQOS_DMA_RDES3_ES BIT(15)
-#define DWCEQOS_DMA_RDES3_E_JT BIT(14)
-#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff)
-#define DWCEQOS_DMA_RDES1_PT 0x00000007
-#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0)
-#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1)
-#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
-
-/* DMA descriptor bits for TX normal descriptor (read format) */
-#define DWCEQOS_DMA_TDES2_IOC BIT(31)
-#define DWCEQOS_DMA_TDES3_OWN BIT(31)
-#define DWCEQOS_DMA_TDES3_CTXT BIT(30)
-#define DWCEQOS_DMA_TDES3_FD BIT(29)
-#define DWCEQOS_DMA_TDES3_LD BIT(28)
-#define DWCEQOS_DMA_TDES3_CIPH BIT(16)
-#define DWCEQOS_DMA_TDES3_CIPP BIT(17)
-#define DWCEQOS_DMA_TDES3_CA 0x00030000
-#define DWCEQOS_DMA_TDES3_TSE BIT(18)
-#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19)
-#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16)
-
-#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26)
-
-/* DMA channel states */
-#define DMA_TX_CH_STOPPED 0
-#define DMA_TX_CH_SUSPENDED 6
-
-#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
-
-/* MTL */
-#define REG_DWCEQOS_MTL_OPER 0x0c00
-#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c
-#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08
-#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38
-
-#define REG_DWCEQOS_MTL_IS 0x0c20
-#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00
-#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30
-#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34
-#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c
-
-#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c
-
-#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060
-
-#define DWCEQOS_MTL_TXQ_TXQEN BIT(3)
-#define DWCEQOS_MTL_TXQ_TSF BIT(1)
-#define DWCEQOS_MTL_TXQ_FTQ BIT(0)
-#define DWCEQOS_MTL_TXQ_TTC512 0x00000070
-
-#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8)
-
-#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12)
-#define DWCEQOS_MTL_RXQ_EHFC BIT(7)
-#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6)
-#define DWCEQOS_MTL_RXQ_FEP BIT(4)
-#define DWCEQOS_MTL_RXQ_FUP BIT(3)
-#define DWCEQOS_MTL_RXQ_RSF BIT(5)
-#define DWCEQOS_MTL_RXQ_RTC32 BIT(0)
-
-/* MAC */
-#define REG_DWCEQOS_MAC_CFG 0x0000
-#define REG_DWCEQOS_MAC_EXT_CFG 0x0004
-#define REG_DWCEQOS_MAC_PKT_FILT 0x0008
-#define REG_DWCEQOS_MAC_WD_TO 0x000c
-#define REG_DWCEQOS_HASTABLE_LO 0x0010
-#define REG_DWCEQOS_HASTABLE_HI 0x0014
-#define REG_DWCEQOS_MAC_IS 0x00b0
-#define REG_DWCEQOS_MAC_IE 0x00b4
-#define REG_DWCEQOS_MAC_STAT 0x00b8
-#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200
-#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204
-#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300
-#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304
-#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0
-#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c
-#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120
-#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124
-#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010
-#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014
-#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0
-#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4
-#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8
-#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc
-#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090
-#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070
-
-#define DWCEQOS_MAC_CFG_ACS BIT(20)
-#define DWCEQOS_MAC_CFG_JD BIT(17)
-#define DWCEQOS_MAC_CFG_JE BIT(16)
-#define DWCEQOS_MAC_CFG_PS BIT(15)
-#define DWCEQOS_MAC_CFG_FES BIT(14)
-#define DWCEQOS_MAC_CFG_DM BIT(13)
-#define DWCEQOS_MAC_CFG_DO BIT(10)
-#define DWCEQOS_MAC_CFG_TE BIT(1)
-#define DWCEQOS_MAC_CFG_IPC BIT(27)
-#define DWCEQOS_MAC_CFG_RE BIT(0)
-
-#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8))
-#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8))
-
-#define DWCEQOS_MAC_IS_LPI_INT BIT(5)
-#define DWCEQOS_MAC_IS_MMC_INT BIT(8)
-
-#define DWCEQOS_MAC_RXQ_EN BIT(1)
-#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31)
-#define DWCEQOS_MAC_PKT_FILT_RA BIT(31)
-#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10)
-#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9)
-#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8)
-#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5)
-#define DWCEQOS_MAC_PKT_FILT_PM BIT(4)
-#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3)
-#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2)
-#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1)
-#define DWCEQOS_MAC_PKT_FILT_PR BIT(0)
-
-#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8)
-#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2
-#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3
-#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0
-#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1
-#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4
-#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5
-#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c
-#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2)
-#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0)
-
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
-
-#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0))
-
-#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
- DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
- DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
-
-#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
-
-#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1)
-#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16)
-#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
-
-/* Features */
-#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
-#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
-#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2)
-#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13)
-#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1)
-#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0)
-
-#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18)
-#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
-#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f))
-
-#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
- (1 + (((feature1) & 0x1fc0000) >> 18))
-
-#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21)
-#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16)
-
-#define DWCEQOS_DMA_MODE_SWR BIT(0)
-
-#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
-
-/* Mac Management Counters */
-#define REG_DWCEQOS_MMC_CTRL 0x0700
-#define REG_DWCEQOS_MMC_RXIRQ 0x0704
-#define REG_DWCEQOS_MMC_TXIRQ 0x0708
-#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c
-#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710
-
-#define DWCEQOS_MMC_CTRL_CNTRST BIT(0)
-#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2)
-
-#define DWC_MMC_TXLPITRANSCNTR 0x07F0
-#define DWC_MMC_TXLPIUSCNTR 0x07EC
-#define DWC_MMC_TXOVERSIZE_G 0x0778
-#define DWC_MMC_TXVLANPACKETS_G 0x0774
-#define DWC_MMC_TXPAUSEPACKETS 0x0770
-#define DWC_MMC_TXEXCESSDEF 0x076C
-#define DWC_MMC_TXPACKETCOUNT_G 0x0768
-#define DWC_MMC_TXOCTETCOUNT_G 0x0764
-#define DWC_MMC_TXCARRIERERROR 0x0760
-#define DWC_MMC_TXEXCESSCOL 0x075C
-#define DWC_MMC_TXLATECOL 0x0758
-#define DWC_MMC_TXDEFERRED 0x0754
-#define DWC_MMC_TXMULTICOL_G 0x0750
-#define DWC_MMC_TXSINGLECOL_G 0x074C
-#define DWC_MMC_TXUNDERFLOWERROR 0x0748
-#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744
-#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740
-#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C
-#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738
-#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734
-#define DWC_MMC_TX256TO511OCTETS_GB 0x0730
-#define DWC_MMC_TX128TO255OCTETS_GB 0x072C
-#define DWC_MMC_TX65TO127OCTETS_GB 0x0728
-#define DWC_MMC_TX64OCTETS_GB 0x0724
-#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720
-#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C
-#define DWC_MMC_TXPACKETCOUNT_GB 0x0718
-#define DWC_MMC_TXOCTETCOUNT_GB 0x0714
-
-#define DWC_MMC_RXLPITRANSCNTR 0x07F8
-#define DWC_MMC_RXLPIUSCNTR 0x07F4
-#define DWC_MMC_RXCTRLPACKETS_G 0x07E4
-#define DWC_MMC_RXRCVERROR 0x07E0
-#define DWC_MMC_RXWATCHDOG 0x07DC
-#define DWC_MMC_RXVLANPACKETS_GB 0x07D8
-#define DWC_MMC_RXFIFOOVERFLOW 0x07D4
-#define DWC_MMC_RXPAUSEPACKETS 0x07D0
-#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC
-#define DWC_MMC_RXLENGTHERROR 0x07C8
-#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4
-#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0
-#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC
-#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8
-#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4
-#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0
-#define DWC_MMC_RX64OCTETS_GB 0x07AC
-#define DWC_MMC_RXOVERSIZE_G 0x07A8
-#define DWC_MMC_RXUNDERSIZE_G 0x07A4
-#define DWC_MMC_RXJABBERERROR 0x07A0
-#define DWC_MMC_RXRUNTERROR 0x079C
-#define DWC_MMC_RXALIGNMENTERROR 0x0798
-#define DWC_MMC_RXCRCERROR 0x0794
-#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790
-#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C
-#define DWC_MMC_RXOCTETCOUNT_G 0x0788
-#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
-#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
-
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
-
-/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
-struct ring_desc {
- struct sk_buff *skb;
- dma_addr_t mapping;
- size_t len;
-};
-
-/* DMA hardware descriptor */
-struct dwceqos_dma_desc {
- u32 des0;
- u32 des1;
- u32 des2;
- u32 des3;
-} ____cacheline_aligned;
-
-struct dwceqos_mmc_counters {
- __u64 txlpitranscntr;
- __u64 txpiuscntr;
- __u64 txoversize_g;
- __u64 txvlanpackets_g;
- __u64 txpausepackets;
- __u64 txexcessdef;
- __u64 txpacketcount_g;
- __u64 txoctetcount_g;
- __u64 txcarriererror;
- __u64 txexcesscol;
- __u64 txlatecol;
- __u64 txdeferred;
- __u64 txmulticol_g;
- __u64 txsinglecol_g;
- __u64 txunderflowerror;
- __u64 txbroadcastpackets_gb;
- __u64 txmulticastpackets_gb;
- __u64 txunicastpackets_gb;
- __u64 tx1024tomaxoctets_gb;
- __u64 tx512to1023octets_gb;
- __u64 tx256to511octets_gb;
- __u64 tx128to255octets_gb;
- __u64 tx65to127octets_gb;
- __u64 tx64octets_gb;
- __u64 txmulticastpackets_g;
- __u64 txbroadcastpackets_g;
- __u64 txpacketcount_gb;
- __u64 txoctetcount_gb;
-
- __u64 rxlpitranscntr;
- __u64 rxlpiuscntr;
- __u64 rxctrlpackets_g;
- __u64 rxrcverror;
- __u64 rxwatchdog;
- __u64 rxvlanpackets_gb;
- __u64 rxfifooverflow;
- __u64 rxpausepackets;
- __u64 rxoutofrangetype;
- __u64 rxlengtherror;
- __u64 rxunicastpackets_g;
- __u64 rx1024tomaxoctets_gb;
- __u64 rx512to1023octets_gb;
- __u64 rx256to511octets_gb;
- __u64 rx128to255octets_gb;
- __u64 rx65to127octets_gb;
- __u64 rx64octets_gb;
- __u64 rxoversize_g;
- __u64 rxundersize_g;
- __u64 rxjabbererror;
- __u64 rxrunterror;
- __u64 rxalignmenterror;
- __u64 rxcrcerror;
- __u64 rxmulticastpackets_g;
- __u64 rxbroadcastpackets_g;
- __u64 rxoctetcount_g;
- __u64 rxoctetcount_gb;
- __u64 rxpacketcount_gb;
-};
-
-/* Ethtool statistics */
-
-struct dwceqos_stat {
- const char stat_name[ETH_GSTRING_LEN];
- int offset;
-};
-
-#define STAT_ITEM(name, var) \
- {\
- name,\
- offsetof(struct dwceqos_mmc_counters, var),\
- }
-
-static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
- STAT_ITEM("tx_bytes", txoctetcount_gb),
- STAT_ITEM("tx_packets", txpacketcount_gb),
- STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
- STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
- STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb),
- STAT_ITEM("tx_pause_packets", txpausepackets),
- STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
- STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb),
- STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
- STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
- STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
- STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
- STAT_ITEM("tx_underflow_errors", txunderflowerror),
- STAT_ITEM("tx_lpi_count", txlpitranscntr),
-
- STAT_ITEM("rx_bytes", rxoctetcount_gb),
- STAT_ITEM("rx_packets", rxpacketcount_gb),
- STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
- STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
- STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
- STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
- STAT_ITEM("rx_pause_packets", rxpausepackets),
- STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
- STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb),
- STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
- STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
- STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
- STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
- STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
- STAT_ITEM("rx_oversize_packets", rxoversize_g),
- STAT_ITEM("rx_undersize_packets", rxundersize_g),
- STAT_ITEM("rx_jabbers", rxjabbererror),
- STAT_ITEM("rx_align_errors", rxalignmenterror),
- STAT_ITEM("rx_crc_errors", rxcrcerror),
- STAT_ITEM("rx_lpi_count", rxlpitranscntr),
-};
-
-/* Configuration of AXI bus parameters.
- * These values depend on the parameters set on the MAC core as well
- * as the AXI interconnect.
- */
-struct dwceqos_bus_cfg {
- /* Enable AXI low-power interface. */
- bool en_lpi;
- /* Limit on number of outstanding AXI write requests. */
- u32 write_requests;
- /* Limit on number of outstanding AXI read requests. */
- u32 read_requests;
- /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
- u32 burst_map;
- /* DMA Programmable burst length*/
- u32 tx_pbl;
- u32 rx_pbl;
-};
-
-struct dwceqos_flowcontrol {
- int autoneg;
- int rx;
- int rx_current;
- int tx;
- int tx_current;
-};
-
-struct net_local {
- void __iomem *baseaddr;
- struct clk *phy_ref_clk;
- struct clk *apb_pclk;
-
- struct device_node *phy_node;
- struct net_device *ndev;
- struct platform_device *pdev;
-
- u32 msg_enable;
-
- struct tasklet_struct tx_bdreclaim_tasklet;
- struct workqueue_struct *txtimeout_handler_wq;
- struct work_struct txtimeout_reinit;
-
- phy_interface_t phy_interface;
- struct mii_bus *mii_bus;
-
- unsigned int link;
- unsigned int speed;
- unsigned int duplex;
-
- struct napi_struct napi;
-
- /* DMA Descriptor Areas */
- struct ring_desc *rx_skb;
- struct ring_desc *tx_skb;
-
- struct dwceqos_dma_desc *tx_descs;
- struct dwceqos_dma_desc *rx_descs;
-
- /* DMA Mapped Descriptor areas*/
- dma_addr_t tx_descs_addr;
- dma_addr_t rx_descs_addr;
- dma_addr_t tx_descs_tail_addr;
- dma_addr_t rx_descs_tail_addr;
-
- size_t tx_free;
- size_t tx_next;
- size_t rx_cur;
- size_t tx_cur;
-
- /* Spinlocks for accessing DMA Descriptors */
- spinlock_t tx_lock;
-
- /* Spinlock for register read-modify-writes. */
- spinlock_t hw_lock;
-
- u32 feature0;
- u32 feature1;
- u32 feature2;
-
- struct dwceqos_bus_cfg bus_cfg;
- bool en_tx_lpi_clockgating;
-
- int eee_enabled;
- int eee_active;
- int csr_val;
- u32 gso_size;
-
- struct dwceqos_mmc_counters mmc_counters;
- /* Protect the mmc_counter updates. */
- spinlock_t stats_lock;
- u32 mmc_rx_counters_mask;
- u32 mmc_tx_counters_mask;
-
- struct dwceqos_flowcontrol flowcontrol;
-
- /* Tracks the intermediate state of phy started but hardware
- * init not finished yet.
- */
- bool phy_defer;
-};
-
-static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
- u32 tx_mask);
-
-static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
- unsigned int reg_n);
-static int dwceqos_stop(struct net_device *ndev);
-static int dwceqos_open(struct net_device *ndev);
-static void dwceqos_tx_poll_demand(struct net_local *lp);
-
-static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
-static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
-
-static void dwceqos_reset_state(struct net_local *lp);
-
-#define dwceqos_read(lp, reg) \
- readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
-#define dwceqos_write(lp, reg, val) \
- writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
-
-static void dwceqos_reset_state(struct net_local *lp)
-{
- lp->link = 0;
- lp->speed = 0;
- lp->duplex = DUPLEX_UNKNOWN;
- lp->flowcontrol.rx_current = 0;
- lp->flowcontrol.tx_current = 0;
- lp->eee_active = 0;
- lp->eee_enabled = 0;
-}
-
-static void print_descriptor(struct net_local *lp, int index, int tx)
-{
- struct dwceqos_dma_desc *dd;
-
- if (tx)
- dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
- else
- dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
-
- pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
- index, dd);
- pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
- dd->des3);
-}
-
-static void print_status(struct net_local *lp)
-{
- size_t desci, i;
-
- pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
- lp->tx_cur, lp->tx_next);
-
- print_descriptor(lp, lp->rx_cur, 0);
-
- for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
- i < DWCEQOS_TX_DCNT;
- ++i) {
- print_descriptor(lp, desci, 1);
- desci = (desci + 1) % DWCEQOS_TX_DCNT;
- }
-
- pr_info("DMA_Debug_Status0: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
- pr_info("DMA_CH0_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
- pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
- dwceqos_read(lp, 0x1144));
- pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
- dwceqos_read(lp, 0x1154));
- pr_info("MTL_Debug_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
- pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
- pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
- pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
- dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
-}
-
-static void dwceqos_mdio_set_csr(struct net_local *lp)
-{
- int rate = clk_get_rate(lp->apb_pclk);
-
- if (rate <= 20000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
- else if (rate <= 35000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
- else if (rate <= 60000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
- else if (rate <= 100000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
- else if (rate <= 150000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
- else if (rate <= 250000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
-}
-
-/* Simple MDIO functions implementing mii_bus */
-static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
-{
- struct net_local *lp = bus->priv;
- u32 regval;
- int i;
- int data;
-
- regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
- DWCEQOS_MDIO_PHYREG(phyreg) |
- DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
- DWCEQOS_MAC_MDIO_ADDR_GB |
- DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
- dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
-
- for (i = 0; i < 5; ++i) {
- usleep_range(64, 128);
- if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
- DWCEQOS_MAC_MDIO_ADDR_GB))
- break;
- }
-
- data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
- if (i == 5) {
- netdev_warn(lp->ndev, "MDIO read timed out\n");
- data = 0xffff;
- }
-
- return data & 0xffff;
-}
-
-static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
- u16 value)
-{
- struct net_local *lp = bus->priv;
- u32 regval;
- int i;
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
-
- regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
- DWCEQOS_MDIO_PHYREG(phyreg) |
- DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
- DWCEQOS_MAC_MDIO_ADDR_GB |
- DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
-
- for (i = 0; i < 5; ++i) {
- usleep_range(64, 128);
- if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
- DWCEQOS_MAC_MDIO_ADDR_GB))
- break;
- }
- if (i == 5)
- netdev_warn(lp->ndev, "MDIO write timed out\n");
- return 0;
-}
-
-static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return phy_mii_ioctl(phydev, rq, cmd);
- default:
- dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
- return -EOPNOTSUPP;
- }
-}
-
-static void dwceqos_link_down(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- /* Indicate link down to the LPI state machine */
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_link_up(struct net_local *lp)
-{
- struct net_device *ndev = lp->ndev;
- u32 regval;
- unsigned long flags;
-
- /* Indicate link up to the LPI state machine */
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- lp->eee_active = !phy_init_eee(ndev->phydev, 0);
-
- /* Check for changed EEE capability */
- if (!lp->eee_active && lp->eee_enabled) {
- lp->eee_enabled = 0;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- }
-}
-
-static void dwceqos_set_speed(struct net_local *lp)
-{
- struct net_device *ndev = lp->ndev;
- struct phy_device *phydev = ndev->phydev;
- u32 regval;
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
- regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
- DWCEQOS_MAC_CFG_DM);
-
- if (phydev->duplex)
- regval |= DWCEQOS_MAC_CFG_DM;
- if (phydev->speed == SPEED_10) {
- regval |= DWCEQOS_MAC_CFG_PS;
- } else if (phydev->speed == SPEED_100) {
- regval |= DWCEQOS_MAC_CFG_PS |
- DWCEQOS_MAC_CFG_FES;
- } else if (phydev->speed != SPEED_1000) {
- netdev_err(lp->ndev,
- "unknown PHY speed %d\n",
- phydev->speed);
- return;
- }
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
-}
-
-static void dwceqos_adjust_link(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
- int status_change = 0;
-
- if (lp->phy_defer)
- return;
-
- if (phydev->link) {
- if ((lp->speed != phydev->speed) ||
- (lp->duplex != phydev->duplex)) {
- dwceqos_set_speed(lp);
-
- lp->speed = phydev->speed;
- lp->duplex = phydev->duplex;
- status_change = 1;
- }
-
- if (lp->flowcontrol.autoneg) {
- lp->flowcontrol.rx = phydev->pause ||
- phydev->asym_pause;
- lp->flowcontrol.tx = phydev->pause ||
- phydev->asym_pause;
- }
-
- if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
- if (netif_msg_link(lp))
- netdev_dbg(ndev, "set rx flow to %d\n",
- lp->flowcontrol.rx);
- dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
- lp->flowcontrol.rx_current = lp->flowcontrol.rx;
- }
- if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
- if (netif_msg_link(lp))
- netdev_dbg(ndev, "set tx flow to %d\n",
- lp->flowcontrol.tx);
- dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
- lp->flowcontrol.tx_current = lp->flowcontrol.tx;
- }
- }
-
- if (phydev->link != lp->link) {
- lp->link = phydev->link;
- status_change = 1;
- }
-
- if (status_change) {
- if (phydev->link) {
- netif_trans_update(lp->ndev);
- dwceqos_link_up(lp);
- } else {
- dwceqos_link_down(lp);
- }
- phy_print_status(phydev);
- }
-}
-
-static int dwceqos_mii_probe(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = NULL;
-
- if (lp->phy_node) {
- phydev = of_phy_connect(lp->ndev,
- lp->phy_node,
- &dwceqos_adjust_link,
- 0,
- lp->phy_interface);
-
- if (!phydev) {
- netdev_err(ndev, "no PHY found\n");
- return -1;
- }
- } else {
- netdev_err(ndev, "no PHY configured\n");
- return -ENODEV;
- }
-
- if (netif_msg_probe(lp))
- phy_attached_info(phydev);
-
- phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause;
-
- lp->link = 0;
- lp->speed = 0;
- lp->duplex = DUPLEX_UNKNOWN;
- lp->flowcontrol.autoneg = AUTONEG_ENABLE;
-
- return 0;
-}
-
-static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
-{
- struct sk_buff *new_skb;
- dma_addr_t new_skb_baddr = 0;
-
- new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
- if (!new_skb) {
- netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
- goto err_out;
- }
-
- new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
- new_skb->data, DWCEQOS_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
- netdev_err(lp->ndev, "DMA map error\n");
- dev_kfree_skb(new_skb);
- new_skb = NULL;
- goto err_out;
- }
-
- lp->rx_descs[index].des0 = new_skb_baddr;
- lp->rx_descs[index].des1 = 0;
- lp->rx_descs[index].des2 = 0;
- lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
- DWCEQOS_DMA_RDES3_BUF1V |
- DWCEQOS_DMA_RDES3_OWN;
-
- lp->rx_skb[index].mapping = new_skb_baddr;
- lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
-
-err_out:
- lp->rx_skb[index].skb = new_skb;
-}
-
-static void dwceqos_clean_rings(struct net_local *lp)
-{
- int i;
-
- if (lp->rx_skb) {
- for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
- if (lp->rx_skb[i].skb) {
- dma_unmap_single(lp->ndev->dev.parent,
- lp->rx_skb[i].mapping,
- lp->rx_skb[i].len,
- DMA_FROM_DEVICE);
-
- dev_kfree_skb(lp->rx_skb[i].skb);
- lp->rx_skb[i].skb = NULL;
- lp->rx_skb[i].mapping = 0;
- }
- }
- }
-
- if (lp->tx_skb) {
- for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
- if (lp->tx_skb[i].skb) {
- dev_kfree_skb(lp->tx_skb[i].skb);
- lp->tx_skb[i].skb = NULL;
- }
- if (lp->tx_skb[i].mapping) {
- dma_unmap_single(lp->ndev->dev.parent,
- lp->tx_skb[i].mapping,
- lp->tx_skb[i].len,
- DMA_TO_DEVICE);
- lp->tx_skb[i].mapping = 0;
- }
- }
- }
-}
-
-static void dwceqos_descriptor_free(struct net_local *lp)
-{
- int size;
-
- dwceqos_clean_rings(lp);
-
- kfree(lp->tx_skb);
- lp->tx_skb = NULL;
- kfree(lp->rx_skb);
- lp->rx_skb = NULL;
-
- size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
- if (lp->rx_descs) {
- dma_free_coherent(lp->ndev->dev.parent, size,
- (void *)(lp->rx_descs), lp->rx_descs_addr);
- lp->rx_descs = NULL;
- }
-
- size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
- if (lp->tx_descs) {
- dma_free_coherent(lp->ndev->dev.parent, size,
- (void *)(lp->tx_descs), lp->tx_descs_addr);
- lp->tx_descs = NULL;
- }
-}
-
-static int dwceqos_descriptor_init(struct net_local *lp)
-{
- int size;
- u32 i;
-
- lp->gso_size = 0;
-
- lp->tx_skb = NULL;
- lp->rx_skb = NULL;
- lp->rx_descs = NULL;
- lp->tx_descs = NULL;
-
- /* Reset the DMA indexes */
- lp->rx_cur = 0;
- lp->tx_cur = 0;
- lp->tx_next = 0;
- lp->tx_free = DWCEQOS_TX_DCNT;
-
- /* Allocate Ring descriptors */
- size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
- lp->rx_skb = kzalloc(size, GFP_KERNEL);
- if (!lp->rx_skb)
- goto err_out;
-
- size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
- lp->tx_skb = kzalloc(size, GFP_KERNEL);
- if (!lp->tx_skb)
- goto err_out;
-
- /* Allocate DMA descriptors */
- size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
- lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->rx_descs_addr, GFP_KERNEL);
- if (!lp->rx_descs)
- goto err_out;
- lp->rx_descs_tail_addr = lp->rx_descs_addr +
- sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
-
- size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
- lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->tx_descs_addr, GFP_KERNEL);
- if (!lp->tx_descs)
- goto err_out;
- lp->tx_descs_tail_addr = lp->tx_descs_addr +
- sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
-
- /* Initialize RX Ring Descriptors and buffers */
- for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
- dwceqos_alloc_rxring_desc(lp, i);
- if (!(lp->rx_skb[lp->rx_cur].skb))
- goto err_out;
- }
-
- /* Initialize TX Descriptors */
- for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
- lp->tx_descs[i].des0 = 0;
- lp->tx_descs[i].des1 = 0;
- lp->tx_descs[i].des2 = 0;
- lp->tx_descs[i].des3 = 0;
- }
-
- /* Make descriptor writes visible to the DMA. */
- wmb();
-
- return 0;
-
-err_out:
- dwceqos_descriptor_free(lp);
- return -ENOMEM;
-}
-
-static int dwceqos_packet_avail(struct net_local *lp)
-{
- return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
-}
-
-static void dwceqos_get_hwfeatures(struct net_local *lp)
-{
- lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
- lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
- lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
-}
-
-static void dwceqos_dma_enable_txirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval |= DWCEQOS_DMA_CH0_IE_TIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_disable_txirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_enable_rxirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval |= DWCEQOS_DMA_CH0_IE_RIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_disable_rxirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
-{
- dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
- dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
-}
-
-static int dwceqos_mii_init(struct net_local *lp)
-{
- int ret = -ENXIO;
- struct resource res;
- struct device_node *mdionode;
-
- mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
-
- if (!mdionode)
- return 0;
-
- lp->mii_bus = mdiobus_alloc();
- if (!lp->mii_bus) {
- ret = -ENOMEM;
- goto err_out;
- }
-
- lp->mii_bus->name = "DWCEQOS MII bus";
- lp->mii_bus->read = &dwceqos_mdio_read;
- lp->mii_bus->write = &dwceqos_mdio_write;
- lp->mii_bus->priv = lp;
- lp->mii_bus->parent = &lp->pdev->dev;
-
- of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long)res.start);
- if (of_mdiobus_register(lp->mii_bus, mdionode))
- goto err_out_free_mdiobus;
-
- return 0;
-
-err_out_free_mdiobus:
- mdiobus_free(lp->mii_bus);
-err_out:
- of_node_put(mdionode);
- return ret;
-}
-
-/* DMA reset. When issued also resets all MTL and MAC registers as well */
-static void dwceqos_reset_hw(struct net_local *lp)
-{
- /* Wait (at most) 0.5 seconds for DMA reset*/
- int i = 5000;
- u32 reg;
-
- /* Force gigabit to guarantee a TX clock for GMII. */
- reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
- reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
- reg |= DWCEQOS_MAC_CFG_DM;
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
-
- do {
- udelay(100);
- i--;
- reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
- } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
- /* We might experience a timeout if the chip clock mux is broken */
- if (!i)
- netdev_err(lp->ndev, "DMA reset timed out!\n");
-}
-
-static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
-{
- if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
- netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
- dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
- "read" : "write",
- dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
- "descr" : "data",
- dma_status);
-
- print_status(lp);
- }
- if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
- netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
- dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
- "read" : "write",
- dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
- "descr" : "data",
- dma_status);
-
- print_status(lp);
- }
-}
-
-static void dwceqos_mmc_interrupt(struct net_local *lp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lp->stats_lock, flags);
-
- /* A latched mmc interrupt can not be masked, we must read
- * all the counters with an interrupt pending.
- */
- dwceqos_read_mmc_counters(lp,
- dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
- dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
-
- spin_unlock_irqrestore(&lp->stats_lock, flags);
-}
-
-static void dwceqos_mac_interrupt(struct net_local *lp)
-{
- u32 cause;
-
- cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
-
- if (cause & DWCEQOS_MAC_IS_MMC_INT)
- dwceqos_mmc_interrupt(lp);
-}
-
-static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
-{
- struct net_device *ndev = dev_id;
- struct net_local *lp = netdev_priv(ndev);
-
- u32 cause;
- u32 dma_status;
- irqreturn_t ret = IRQ_NONE;
-
- cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
- /* DMA Channel 0 Interrupt */
- if (cause & DWCEQOS_DMA_IS_DC0IS) {
- dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
-
- /* Transmit Interrupt */
- if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
- tasklet_schedule(&lp->tx_bdreclaim_tasklet);
- dwceqos_dma_disable_txirq(lp);
- }
-
- /* Receive Interrupt */
- if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
- /* Disable RX IRQs */
- dwceqos_dma_disable_rxirq(lp);
- napi_schedule(&lp->napi);
- }
-
- /* Fatal Bus Error interrupt */
- if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
- dwceqos_fatal_bus_error(lp, dma_status);
-
- /* errata 9000831707 */
- dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
- DWCEQOS_DMA_CH0_IS_REB;
- }
-
- /* Ack all DMA Channel 0 IRQs */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
- ret = IRQ_HANDLED;
- }
-
- if (cause & DWCEQOS_DMA_IS_MTLIS) {
- u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
-
- dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
- ret = IRQ_HANDLED;
- }
-
- if (cause & DWCEQOS_DMA_IS_MACIS) {
- dwceqos_mac_interrupt(lp);
- ret = IRQ_HANDLED;
- }
- return ret;
-}
-
-static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
- if (enable)
- regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
- else
- regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
-
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
-
- /* MTL flow control */
- regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
- if (enable)
- regval |= DWCEQOS_MTL_RXQ_EHFC;
- else
- regval &= ~DWCEQOS_MTL_RXQ_EHFC;
-
- dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
-
- /* MAC flow control */
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
- if (enable)
- regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
- else
- regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
-
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_configure_flow_control(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
- int RQS, RFD, RFA;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
-
- /* The queue size is in units of 256 bytes. We want 512 bytes units for
- * the threshold fields.
- */
- RQS = ((regval >> 20) & 0x3FF) + 1;
- RQS /= 2;
-
- /* The thresholds are relative to a full queue, with a bias
- * of 1 KiByte below full.
- */
- RFD = RQS / 2 - 2;
- RFA = RQS / 8 - 2;
-
- regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
-
- if (RFD >= 0 && RFA >= 0) {
- dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
- } else {
- netdev_warn(lp->ndev,
- "FIFO too small for flow control.");
- }
-
- regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
- DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
-
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_configure_clock(struct net_local *lp)
-{
- unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
-
- BUG_ON(!rate_mhz);
-
- dwceqos_write(lp,
- REG_DWCEQOS_MAC_1US_TIC_COUNTER,
- DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
-}
-
-static void dwceqos_configure_bus(struct net_local *lp)
-{
- u32 sysbus_reg;
-
- /* N.B. We do not support the Fixed Burst mode because it
- * opens a race window by making HW access to DMA descriptors
- * non-atomic.
- */
-
- sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
-
- if (lp->bus_cfg.en_lpi)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
-
- if (lp->bus_cfg.burst_map)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
- lp->bus_cfg.burst_map);
- else
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
- DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
-
- if (lp->bus_cfg.read_requests)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
- lp->bus_cfg.read_requests - 1);
- else
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
- DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
-
- if (lp->bus_cfg.write_requests)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
- lp->bus_cfg.write_requests - 1);
- else
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
- DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
-
- if (netif_msg_hw(lp))
- netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
-}
-
-static void dwceqos_init_hw(struct net_local *lp)
-{
- struct net_device *ndev = lp->ndev;
- u32 regval;
- u32 buswidth;
- u32 dma_skip;
-
- /* Software reset */
- dwceqos_reset_hw(lp);
-
- dwceqos_configure_bus(lp);
-
- /* Probe data bus width, 32/64/128 bits. */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
- buswidth = (regval ^ 0xF) + 1;
-
- /* Cache-align dma descriptors. */
- dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
- DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
- DWCEQOS_DMA_CH_CTRL_PBLX8);
-
- /* Initialize DMA Channel 0 */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
- (u32)lp->tx_descs_addr);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
- (u32)lp->rx_descs_addr);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
- lp->tx_descs_tail_addr);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
- lp->rx_descs_tail_addr);
-
- if (lp->bus_cfg.tx_pbl)
- regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
- else
- regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
-
- /* Enable TSO if the HW support it */
- if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
- regval |= DWCEQOS_DMA_CH_TX_TSE;
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
-
- if (lp->bus_cfg.rx_pbl)
- regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
- else
- regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
-
- regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
-
- regval |= DWCEQOS_DMA_CH_CTRL_START;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
-
- /* Initialize MTL Queues */
- regval = DWCEQOS_MTL_SCHALG_STRICT;
- dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
-
- regval = DWCEQOS_MTL_TXQ_SIZE(
- DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
- DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
- DWCEQOS_MTL_TXQ_TTC512;
- dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
-
- regval = DWCEQOS_MTL_RXQ_SIZE(
- DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
- DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
- dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
-
- dwceqos_configure_flow_control(lp);
-
- /* Initialize MAC */
- dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
-
- lp->eee_enabled = 0;
-
- dwceqos_configure_clock(lp);
-
- /* MMC counters */
-
- /* probe implemented counters */
- dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
- dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
- lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
- lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
-
- dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
- DWCEQOS_MMC_CTRL_RSTONRD);
- dwceqos_enable_mmc_interrupt(lp);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
- dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
- DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
-
- /* Start TX DMA */
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
- regval | DWCEQOS_DMA_CH_CTRL_START);
-
- /* Enable MAC TX/RX */
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
- regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
-
- lp->phy_defer = false;
- mutex_lock(&ndev->phydev->lock);
- phy_read_status(ndev->phydev);
- dwceqos_adjust_link(lp->ndev);
- mutex_unlock(&ndev->phydev->lock);
-}
-
-static void dwceqos_tx_reclaim(unsigned long data)
-{
- struct net_device *ndev = (struct net_device *)data;
- struct net_local *lp = netdev_priv(ndev);
- unsigned int tx_bytes = 0;
- unsigned int tx_packets = 0;
-
- spin_lock(&lp->tx_lock);
-
- while (lp->tx_free < DWCEQOS_TX_DCNT) {
- struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
- struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
-
- /* Descriptor still being held by DMA ? */
- if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
- break;
-
- if (rd->mapping)
- dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
- DMA_TO_DEVICE);
-
- if (unlikely(rd->skb)) {
- ++tx_packets;
- tx_bytes += rd->skb->len;
- dev_consume_skb_any(rd->skb);
- }
-
- rd->skb = NULL;
- rd->mapping = 0;
- lp->tx_free++;
- lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
-
- if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
- (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
- if (netif_msg_tx_err(lp))
- netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
- dd->des3);
- if (netif_msg_hw(lp))
- print_status(lp);
- }
- }
- spin_unlock(&lp->tx_lock);
-
- netdev_completed_queue(ndev, tx_packets, tx_bytes);
-
- dwceqos_dma_enable_txirq(lp);
- netif_wake_queue(ndev);
-}
-
-static int dwceqos_rx(struct net_local *lp, int budget)
-{
- struct sk_buff *skb;
- u32 tot_size = 0;
- unsigned int n_packets = 0;
- unsigned int n_descs = 0;
- u32 len;
-
- struct dwceqos_dma_desc *dd;
- struct sk_buff *new_skb;
- dma_addr_t new_skb_baddr = 0;
-
- while (n_descs < budget) {
- if (!dwceqos_packet_avail(lp))
- break;
-
- new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
- if (!new_skb) {
- netdev_err(lp->ndev, "no memory for new sk_buff\n");
- break;
- }
-
- /* Get dma handle of skb->data */
- new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
- new_skb->data,
- DWCEQOS_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
- netdev_err(lp->ndev, "DMA map error\n");
- dev_kfree_skb(new_skb);
- break;
- }
-
- /* Read descriptor data after reading owner bit. */
- dma_rmb();
-
- dd = &lp->rx_descs[lp->rx_cur];
- len = DWCEQOS_DMA_RDES3_PL(dd->des3);
- skb = lp->rx_skb[lp->rx_cur].skb;
-
- /* Unmap old buffer */
- dma_unmap_single(lp->ndev->dev.parent,
- lp->rx_skb[lp->rx_cur].mapping,
- lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
-
- /* Discard packet on reception error or bad checksum */
- if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
- (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
- dev_kfree_skb(skb);
- skb = NULL;
- } else {
- skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, lp->ndev);
- switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
- case DWCEQOS_DMA_RDES1_PT_UDP:
- case DWCEQOS_DMA_RDES1_PT_TCP:
- case DWCEQOS_DMA_RDES1_PT_ICMP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- skb->ip_summed = CHECKSUM_NONE;
- break;
- }
- }
-
- if (unlikely(!skb)) {
- if (netif_msg_rx_err(lp))
- netdev_dbg(lp->ndev, "rx error: des3=%X\n",
- lp->rx_descs[lp->rx_cur].des3);
- } else {
- tot_size += skb->len;
- n_packets++;
-
- netif_receive_skb(skb);
- }
-
- lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
- lp->rx_descs[lp->rx_cur].des1 = 0;
- lp->rx_descs[lp->rx_cur].des2 = 0;
- /* The DMA must observe des0/1/2 written before des3. */
- wmb();
- lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
- DWCEQOS_DMA_RDES3_OWN |
- DWCEQOS_DMA_RDES3_BUF1V;
-
- lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
- lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
- lp->rx_skb[lp->rx_cur].skb = new_skb;
-
- n_descs++;
- lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
- }
-
- /* Make sure any ownership update is written to the descriptors before
- * DMA wakeup.
- */
- wmb();
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
- /* Wake up RX by writing tail pointer */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
- lp->rx_descs_tail_addr);
-
- return n_descs;
-}
-
-static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
-{
- struct net_local *lp = container_of(napi, struct net_local, napi);
- int work_done = 0;
-
- work_done = dwceqos_rx(lp, budget - work_done);
-
- if (!dwceqos_packet_avail(lp) && work_done < budget) {
- napi_complete(napi);
- dwceqos_dma_enable_rxirq(lp);
- } else {
- work_done = budget;
- }
-
- return work_done;
-}
-
-/* Reinitialize function if a TX timed out */
-static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
-{
- struct net_local *lp = container_of(data, struct net_local,
- txtimeout_reinit);
-
- netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
- DWCEQOS_TX_TIMEOUT);
-
- if (netif_msg_hw(lp))
- print_status(lp);
-
- rtnl_lock();
- dwceqos_stop(lp->ndev);
- dwceqos_open(lp->ndev);
- rtnl_unlock();
-}
-
-/* DT Probing function called by main probe */
-static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
-{
- struct net_device *ndev;
- struct net_local *lp;
- const void *mac_address;
- struct dwceqos_bus_cfg *bus_cfg;
- struct device_node *np = pdev->dev.of_node;
-
- ndev = platform_get_drvdata(pdev);
- lp = netdev_priv(ndev);
- bus_cfg = &lp->bus_cfg;
-
- /* Set the MAC address. */
- mac_address = of_get_mac_address(pdev->dev.of_node);
- if (mac_address)
- ether_addr_copy(ndev->dev_addr, mac_address);
-
- /* These are all optional parameters */
- lp->en_tx_lpi_clockgating = of_property_read_bool(np,
- "snps,en-tx-lpi-clockgating");
- bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
- of_property_read_u32(np, "snps,write-requests",
- &bus_cfg->write_requests);
- of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
- of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
- of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
- of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
-
- netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
- bus_cfg->en_lpi,
- bus_cfg->write_requests,
- bus_cfg->read_requests,
- bus_cfg->burst_map,
- bus_cfg->rx_pbl,
- bus_cfg->tx_pbl);
-
- return 0;
-}
-
-static int dwceqos_open(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- int res;
-
- dwceqos_reset_state(lp);
- res = dwceqos_descriptor_init(lp);
- if (res) {
- netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
- return res;
- }
- netdev_reset_queue(ndev);
-
- /* The dwceqos reset state machine requires all phy clocks to complete,
- * hence the unusual init order with phy_start first.
- */
- lp->phy_defer = true;
- phy_start(ndev->phydev);
- dwceqos_init_hw(lp);
- napi_enable(&lp->napi);
-
- netif_start_queue(ndev);
- tasklet_enable(&lp->tx_bdreclaim_tasklet);
-
- /* Enable Interrupts -- do this only after we enable NAPI and the
- * tasklet.
- */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
- DWCEQOS_DMA_CH0_IE_NIE |
- DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
- DWCEQOS_DMA_CH0_IE_AIE |
- DWCEQOS_DMA_CH0_IE_FBEE);
-
- return 0;
-}
-
-static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
-{
- u32 reg;
-
- reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
- reg = DMA_GET_TX_STATE_CH0(reg);
-
- return reg == DMA_TX_CH_SUSPENDED;
-}
-
-static void dwceqos_drain_dma(struct net_local *lp)
-{
- /* Wait for all pending TX buffers to be sent. Upper limit based
- * on max frame size on a 10 Mbit link.
- */
- size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
-
- while (!dweqos_is_tx_dma_suspended(lp) && limit--)
- usleep_range(100, 200);
-}
-
-static int dwceqos_stop(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
-
- tasklet_disable(&lp->tx_bdreclaim_tasklet);
- napi_disable(&lp->napi);
-
- /* Stop all tx before we drain the tx dma. */
- netif_tx_lock_bh(lp->ndev);
- netif_stop_queue(ndev);
- netif_tx_unlock_bh(lp->ndev);
-
- dwceqos_drain_dma(lp);
- dwceqos_reset_hw(lp);
- phy_stop(ndev->phydev);
-
- dwceqos_descriptor_free(lp);
-
- return 0;
-}
-
-static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
- unsigned short gso_size)
-{
- struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
-
- dd->des0 = 0;
- dd->des1 = 0;
- dd->des2 = gso_size;
- dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
-
- lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
-}
-
-static void dwceqos_tx_poll_demand(struct net_local *lp)
-{
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
- lp->tx_descs_tail_addr);
-}
-
-struct dwceqos_tx {
- size_t nr_descriptors;
- size_t initial_descriptor;
- size_t last_descriptor;
- size_t prev_gso_size;
- size_t network_header_len;
-};
-
-static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- size_t n = 1;
- size_t i;
-
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
- ++n;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
- BYTES_PER_DMA_DESC;
- }
-
- tx->nr_descriptors = n;
- tx->initial_descriptor = lp->tx_next;
- tx->last_descriptor = lp->tx_next;
- tx->prev_gso_size = lp->gso_size;
-
- tx->network_header_len = skb_transport_offset(skb);
- if (skb_is_gso(skb))
- tx->network_header_len += tcp_hdrlen(skb);
-}
-
-static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- struct ring_desc *rd;
- struct dwceqos_dma_desc *dd;
- size_t payload_len;
- dma_addr_t dma_handle;
-
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
- dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
- lp->gso_size = skb_shinfo(skb)->gso_size;
- }
-
- dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
-
- if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
- netdev_err(lp->ndev, "TX DMA Mapping error\n");
- return -ENOMEM;
- }
-
- rd = &lp->tx_skb[lp->tx_next];
- dd = &lp->tx_descs[lp->tx_next];
-
- rd->skb = NULL;
- rd->len = skb_headlen(skb);
- rd->mapping = dma_handle;
-
- /* Set up DMA Descriptor */
- dd->des0 = dma_handle;
-
- if (skb_is_gso(skb)) {
- payload_len = skb_headlen(skb) - tx->network_header_len;
-
- if (payload_len)
- dd->des1 = dma_handle + tx->network_header_len;
- dd->des2 = tx->network_header_len |
- DWCEQOS_DMA_DES2_B2L(payload_len);
- dd->des3 = DWCEQOS_DMA_TDES3_TSE |
- DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
- (skb->len - tx->network_header_len);
- } else {
- dd->des1 = 0;
- dd->des2 = skb_headlen(skb);
- dd->des3 = skb->len;
-
- switch (skb->ip_summed) {
- case CHECKSUM_PARTIAL:
- dd->des3 |= DWCEQOS_DMA_TDES3_CA;
- case CHECKSUM_NONE:
- case CHECKSUM_UNNECESSARY:
- case CHECKSUM_COMPLETE:
- default:
- break;
- }
- }
-
- dd->des3 |= DWCEQOS_DMA_TDES3_FD;
- if (lp->tx_next != tx->initial_descriptor)
- dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
-
- tx->last_descriptor = lp->tx_next;
- lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
-
- return 0;
-}
-
-static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- struct ring_desc *rd = NULL;
- struct dwceqos_dma_desc *dd;
- dma_addr_t dma_handle;
- size_t i;
-
- /* Setup more ring and DMA descriptor if the packet is fragmented */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- size_t frag_size;
- size_t consumed_size;
-
- /* Map DMA Area */
- dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
- netdev_err(lp->ndev, "DMA Mapping error\n");
- return -ENOMEM;
- }
-
- /* order-3 fragments span more than one descriptor. */
- frag_size = skb_frag_size(frag);
- consumed_size = 0;
- while (consumed_size < frag_size) {
- size_t dma_size = min_t(size_t, 16376,
- frag_size - consumed_size);
-
- rd = &lp->tx_skb[lp->tx_next];
- memset(rd, 0, sizeof(*rd));
-
- dd = &lp->tx_descs[lp->tx_next];
-
- /* Set DMA Descriptor fields */
- dd->des0 = dma_handle + consumed_size;
- dd->des1 = 0;
- dd->des2 = dma_size;
-
- if (skb_is_gso(skb))
- dd->des3 = (skb->len - tx->network_header_len);
- else
- dd->des3 = skb->len;
-
- dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
-
- tx->last_descriptor = lp->tx_next;
- lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
- consumed_size += dma_size;
- }
-
- rd->len = skb_frag_size(frag);
- rd->mapping = dma_handle;
- }
-
- return 0;
-}
-
-static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
- lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
-
- lp->tx_skb[tx->last_descriptor].skb = skb;
-
- /* Make all descriptor updates visible to the DMA before setting the
- * owner bit.
- */
- wmb();
-
- lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
-
- /* Make the owner bit visible before TX wakeup. */
- wmb();
-
- dwceqos_tx_poll_demand(lp);
-}
-
-static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
-{
- size_t i = tx->initial_descriptor;
-
- while (i != lp->tx_next) {
- if (lp->tx_skb[i].mapping)
- dma_unmap_single(lp->ndev->dev.parent,
- lp->tx_skb[i].mapping,
- lp->tx_skb[i].len,
- DMA_TO_DEVICE);
-
- lp->tx_skb[i].mapping = 0;
- lp->tx_skb[i].skb = NULL;
-
- memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
-
- i = (i + 1) % DWCEQOS_TX_DCNT;
- }
-
- lp->tx_next = tx->initial_descriptor;
- lp->gso_size = tx->prev_gso_size;
-}
-
-static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct dwceqos_tx trans;
- int err;
-
- dwceqos_tx_prepare(skb, lp, &trans);
- if (lp->tx_free < trans.nr_descriptors) {
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
- }
-
- err = dwceqos_tx_linear(skb, lp, &trans);
- if (err)
- goto tx_error;
-
- err = dwceqos_tx_frags(skb, lp, &trans);
- if (err)
- goto tx_error;
-
- WARN_ON(lp->tx_next !=
- ((trans.initial_descriptor + trans.nr_descriptors) %
- DWCEQOS_TX_DCNT));
-
- spin_lock_bh(&lp->tx_lock);
- lp->tx_free -= trans.nr_descriptors;
- dwceqos_tx_finalize(skb, lp, &trans);
- netdev_sent_queue(ndev, skb->len);
- spin_unlock_bh(&lp->tx_lock);
-
- netif_trans_update(ndev);
- return 0;
-
-tx_error:
- dwceqos_tx_rollback(lp, &trans);
- dev_kfree_skb_any(skb);
- return 0;
-}
-
-/* Set MAC address and then update HW accordingly */
-static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct sockaddr *hwaddr = (struct sockaddr *)addr;
-
- if (netif_running(ndev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(hwaddr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
-
- dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
- return 0;
-}
-
-static void dwceqos_tx_timeout(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
-
- queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
-}
-
-static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
- unsigned int reg_n)
-{
- unsigned long data;
-
- data = (addr[5] << 8) | addr[4];
- dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
- data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
-}
-
-static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
-{
- /* Do not disable MAC address 0 */
- if (reg_n != 0)
- dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
-}
-
-static void dwceqos_set_rx_mode(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- u32 regval = 0;
- u32 mc_filter[2];
- int reg = 1;
- struct netdev_hw_addr *ha;
- unsigned int max_mac_addr;
-
- max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
-
- if (ndev->flags & IFF_PROMISC) {
- regval = DWCEQOS_MAC_PKT_FILT_PR;
- } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
- (ndev->flags & IFF_ALLMULTI))) {
- regval = DWCEQOS_MAC_PKT_FILT_PM;
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
- } else if (!netdev_mc_empty(ndev)) {
- regval = DWCEQOS_MAC_PKT_FILT_HMC;
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, ndev) {
- /* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table
- */
- int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
- /* The most significant bit determines the register
- * to use (H/L) while the other 5 bits determine
- * the bit within the register.
- */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
- }
- if (netdev_uc_count(ndev) > max_mac_addr) {
- regval |= DWCEQOS_MAC_PKT_FILT_PR;
- } else {
- netdev_for_each_uc_addr(ha, ndev) {
- dwceqos_set_umac_addr(lp, ha->addr, reg);
- reg++;
- }
- for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
- dwceqos_disable_umac_addr(lp, reg);
- }
- dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void dwceqos_poll_controller(struct net_device *ndev)
-{
- disable_irq(ndev->irq);
- dwceqos_interrupt(ndev->irq, ndev);
- enable_irq(ndev->irq);
-}
-#endif
-
-static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
- u32 tx_mask)
-{
- if (tx_mask & BIT(27))
- lp->mmc_counters.txlpitranscntr +=
- dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
- if (tx_mask & BIT(26))
- lp->mmc_counters.txpiuscntr +=
- dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
- if (tx_mask & BIT(25))
- lp->mmc_counters.txoversize_g +=
- dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
- if (tx_mask & BIT(24))
- lp->mmc_counters.txvlanpackets_g +=
- dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
- if (tx_mask & BIT(23))
- lp->mmc_counters.txpausepackets +=
- dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
- if (tx_mask & BIT(22))
- lp->mmc_counters.txexcessdef +=
- dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
- if (tx_mask & BIT(21))
- lp->mmc_counters.txpacketcount_g +=
- dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
- if (tx_mask & BIT(20))
- lp->mmc_counters.txoctetcount_g +=
- dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
- if (tx_mask & BIT(19))
- lp->mmc_counters.txcarriererror +=
- dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
- if (tx_mask & BIT(18))
- lp->mmc_counters.txexcesscol +=
- dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
- if (tx_mask & BIT(17))
- lp->mmc_counters.txlatecol +=
- dwceqos_read(lp, DWC_MMC_TXLATECOL);
- if (tx_mask & BIT(16))
- lp->mmc_counters.txdeferred +=
- dwceqos_read(lp, DWC_MMC_TXDEFERRED);
- if (tx_mask & BIT(15))
- lp->mmc_counters.txmulticol_g +=
- dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
- if (tx_mask & BIT(14))
- lp->mmc_counters.txsinglecol_g +=
- dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
- if (tx_mask & BIT(13))
- lp->mmc_counters.txunderflowerror +=
- dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
- if (tx_mask & BIT(12))
- lp->mmc_counters.txbroadcastpackets_gb +=
- dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
- if (tx_mask & BIT(11))
- lp->mmc_counters.txmulticastpackets_gb +=
- dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
- if (tx_mask & BIT(10))
- lp->mmc_counters.txunicastpackets_gb +=
- dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
- if (tx_mask & BIT(9))
- lp->mmc_counters.tx1024tomaxoctets_gb +=
- dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
- if (tx_mask & BIT(8))
- lp->mmc_counters.tx512to1023octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
- if (tx_mask & BIT(7))
- lp->mmc_counters.tx256to511octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
- if (tx_mask & BIT(6))
- lp->mmc_counters.tx128to255octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
- if (tx_mask & BIT(5))
- lp->mmc_counters.tx65to127octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
- if (tx_mask & BIT(4))
- lp->mmc_counters.tx64octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
- if (tx_mask & BIT(3))
- lp->mmc_counters.txmulticastpackets_g +=
- dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
- if (tx_mask & BIT(2))
- lp->mmc_counters.txbroadcastpackets_g +=
- dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
- if (tx_mask & BIT(1))
- lp->mmc_counters.txpacketcount_gb +=
- dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
- if (tx_mask & BIT(0))
- lp->mmc_counters.txoctetcount_gb +=
- dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
-
- if (rx_mask & BIT(27))
- lp->mmc_counters.rxlpitranscntr +=
- dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
- if (rx_mask & BIT(26))
- lp->mmc_counters.rxlpiuscntr +=
- dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
- if (rx_mask & BIT(25))
- lp->mmc_counters.rxctrlpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
- if (rx_mask & BIT(24))
- lp->mmc_counters.rxrcverror +=
- dwceqos_read(lp, DWC_MMC_RXRCVERROR);
- if (rx_mask & BIT(23))
- lp->mmc_counters.rxwatchdog +=
- dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
- if (rx_mask & BIT(22))
- lp->mmc_counters.rxvlanpackets_gb +=
- dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
- if (rx_mask & BIT(21))
- lp->mmc_counters.rxfifooverflow +=
- dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
- if (rx_mask & BIT(20))
- lp->mmc_counters.rxpausepackets +=
- dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
- if (rx_mask & BIT(19))
- lp->mmc_counters.rxoutofrangetype +=
- dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
- if (rx_mask & BIT(18))
- lp->mmc_counters.rxlengtherror +=
- dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
- if (rx_mask & BIT(17))
- lp->mmc_counters.rxunicastpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
- if (rx_mask & BIT(16))
- lp->mmc_counters.rx1024tomaxoctets_gb +=
- dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
- if (rx_mask & BIT(15))
- lp->mmc_counters.rx512to1023octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
- if (rx_mask & BIT(14))
- lp->mmc_counters.rx256to511octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
- if (rx_mask & BIT(13))
- lp->mmc_counters.rx128to255octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
- if (rx_mask & BIT(12))
- lp->mmc_counters.rx65to127octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
- if (rx_mask & BIT(11))
- lp->mmc_counters.rx64octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
- if (rx_mask & BIT(10))
- lp->mmc_counters.rxoversize_g +=
- dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
- if (rx_mask & BIT(9))
- lp->mmc_counters.rxundersize_g +=
- dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
- if (rx_mask & BIT(8))
- lp->mmc_counters.rxjabbererror +=
- dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
- if (rx_mask & BIT(7))
- lp->mmc_counters.rxrunterror +=
- dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
- if (rx_mask & BIT(6))
- lp->mmc_counters.rxalignmenterror +=
- dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
- if (rx_mask & BIT(5))
- lp->mmc_counters.rxcrcerror +=
- dwceqos_read(lp, DWC_MMC_RXCRCERROR);
- if (rx_mask & BIT(4))
- lp->mmc_counters.rxmulticastpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
- if (rx_mask & BIT(3))
- lp->mmc_counters.rxbroadcastpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
- if (rx_mask & BIT(2))
- lp->mmc_counters.rxoctetcount_g +=
- dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
- if (rx_mask & BIT(1))
- lp->mmc_counters.rxoctetcount_gb +=
- dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
- if (rx_mask & BIT(0))
- lp->mmc_counters.rxpacketcount_gb +=
- dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
-}
-
-static struct rtnl_link_stats64*
-dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
-{
- unsigned long flags;
- struct net_local *lp = netdev_priv(ndev);
- struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
-
- spin_lock_irqsave(&lp->stats_lock, flags);
- dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
- lp->mmc_tx_counters_mask);
- spin_unlock_irqrestore(&lp->stats_lock, flags);
-
- s->rx_packets = hwstats->rxpacketcount_gb;
- s->rx_bytes = hwstats->rxoctetcount_gb;
- s->rx_errors = hwstats->rxpacketcount_gb -
- hwstats->rxbroadcastpackets_g -
- hwstats->rxmulticastpackets_g -
- hwstats->rxunicastpackets_g;
- s->multicast = hwstats->rxmulticastpackets_g;
- s->rx_length_errors = hwstats->rxlengtherror;
- s->rx_crc_errors = hwstats->rxcrcerror;
- s->rx_fifo_errors = hwstats->rxfifooverflow;
-
- s->tx_packets = hwstats->txpacketcount_gb;
- s->tx_bytes = hwstats->txoctetcount_gb;
-
- if (lp->mmc_tx_counters_mask & BIT(21))
- s->tx_errors = hwstats->txpacketcount_gb -
- hwstats->txpacketcount_g;
- else
- s->tx_errors = hwstats->txunderflowerror +
- hwstats->txcarriererror;
-
- return s;
-}
-
-static void
-dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
-{
- const struct net_local *lp = netdev_priv(ndev);
-
- strcpy(ed->driver, lp->pdev->dev.driver->name);
- strcpy(ed->version, DRIVER_VERSION);
-}
-
-static void dwceqos_get_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pp)
-{
- const struct net_local *lp = netdev_priv(ndev);
-
- pp->autoneg = lp->flowcontrol.autoneg;
- pp->tx_pause = lp->flowcontrol.tx;
- pp->rx_pause = lp->flowcontrol.rx;
-}
-
-static int dwceqos_set_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pp)
-{
- struct net_local *lp = netdev_priv(ndev);
- int ret = 0;
-
- lp->flowcontrol.autoneg = pp->autoneg;
- if (pp->autoneg) {
- ndev->phydev->advertising |= ADVERTISED_Pause;
- ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
- } else {
- ndev->phydev->advertising &= ~ADVERTISED_Pause;
- ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
- lp->flowcontrol.rx = pp->rx_pause;
- lp->flowcontrol.tx = pp->tx_pause;
- }
-
- if (netif_running(ndev))
- ret = phy_start_aneg(ndev->phydev);
-
- return ret;
-}
-
-static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
- u8 *data)
-{
- size_t i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
- memcpy(data, dwceqos_ethtool_stats[i].stat_name,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
-}
-
-static void dwceqos_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct net_local *lp = netdev_priv(ndev);
- unsigned long flags;
- size_t i;
- u8 *mmcstat = (u8 *)&lp->mmc_counters;
-
- spin_lock_irqsave(&lp->stats_lock, flags);
- dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
- lp->mmc_tx_counters_mask);
- spin_unlock_irqrestore(&lp->stats_lock, flags);
-
- for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
- memcpy(data,
- mmcstat + dwceqos_ethtool_stats[i].offset,
- sizeof(u64));
- data++;
- }
-}
-
-static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
-{
- if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(dwceqos_ethtool_stats);
-
- return -EOPNOTSUPP;
-}
-
-static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *space)
-{
- const struct net_local *lp = netdev_priv(dev);
- u32 *reg_space = (u32 *)space;
- int reg_offset;
- int reg_ix = 0;
-
- /* MAC registers */
- for (reg_offset = START_MAC_REG_OFFSET;
- reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
- reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
- reg_ix++;
- }
- /* MTL registers */
- for (reg_offset = START_MTL_REG_OFFSET;
- reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
- reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
- reg_ix++;
- }
-
- /* DMA registers */
- for (reg_offset = START_DMA_REG_OFFSET;
- reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
- reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
- reg_ix++;
- }
-
- BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
-}
-
-static int dwceqos_get_regs_len(struct net_device *dev)
-{
- return REG_SPACE_SIZE;
-}
-
-static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
-{
- return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
-}
-
-static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
-{
- return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
-}
-
-static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct net_local *lp = netdev_priv(ndev);
- u32 lpi_status;
- u32 lpi_enabled;
-
- if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
- return -EOPNOTSUPP;
-
- edata->eee_active = lp->eee_active;
- edata->eee_enabled = lp->eee_enabled;
- edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
- lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
- edata->tx_lpi_enabled = lpi_enabled;
-
- if (netif_msg_hw(lp)) {
- u32 regval;
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-
- netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
- dwceqos_get_rx_lpi_state(regval),
- dwceqos_get_tx_lpi_state(regval));
- }
-
- return phy_ethtool_get_eee(ndev->phydev, edata);
-}
-
-static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct net_local *lp = netdev_priv(ndev);
- u32 regval;
- unsigned long flags;
-
- if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
- return -EOPNOTSUPP;
-
- if (edata->eee_enabled && !lp->eee_active)
- return -EOPNOTSUPP;
-
- if (edata->tx_lpi_enabled) {
- if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
- edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
- return -EINVAL;
- }
-
- lp->eee_enabled = edata->eee_enabled;
-
- if (edata->eee_enabled && edata->tx_lpi_enabled) {
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
- edata->tx_lpi_timer);
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
- if (lp->en_tx_lpi_clockgating)
- regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- }
-
- return phy_ethtool_set_eee(ndev->phydev, edata);
-}
-
-static u32 dwceqos_get_msglevel(struct net_device *ndev)
-{
- const struct net_local *lp = netdev_priv(ndev);
-
- return lp->msg_enable;
-}
-
-static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
-{
- struct net_local *lp = netdev_priv(ndev);
-
- lp->msg_enable = msglevel;
-}
-
-static const struct ethtool_ops dwceqos_ethtool_ops = {
- .get_drvinfo = dwceqos_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = dwceqos_get_pauseparam,
- .set_pauseparam = dwceqos_set_pauseparam,
- .get_strings = dwceqos_get_strings,
- .get_ethtool_stats = dwceqos_get_ethtool_stats,
- .get_sset_count = dwceqos_get_sset_count,
- .get_regs = dwceqos_get_regs,
- .get_regs_len = dwceqos_get_regs_len,
- .get_eee = dwceqos_get_eee,
- .set_eee = dwceqos_set_eee,
- .get_msglevel = dwceqos_get_msglevel,
- .set_msglevel = dwceqos_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = dwceqos_open,
- .ndo_stop = dwceqos_stop,
- .ndo_start_xmit = dwceqos_start_xmit,
- .ndo_set_rx_mode = dwceqos_set_rx_mode,
- .ndo_set_mac_address = dwceqos_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = dwceqos_poll_controller,
-#endif
- .ndo_do_ioctl = dwceqos_ioctl,
- .ndo_tx_timeout = dwceqos_tx_timeout,
- .ndo_get_stats64 = dwceqos_get_stats64,
-};
-
-static const struct of_device_id dwceq_of_match[] = {
- { .compatible = "snps,dwc-qos-ethernet-4.10", },
- {}
-};
-MODULE_DEVICE_TABLE(of, dwceq_of_match);
-
-static int dwceqos_probe(struct platform_device *pdev)
-{
- struct resource *r_mem = NULL;
- struct net_device *ndev;
- struct net_local *lp;
- int ret = -ENXIO;
-
- r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r_mem) {
- dev_err(&pdev->dev, "no IO resource defined.\n");
- return -ENXIO;
- }
-
- ndev = alloc_etherdev(sizeof(*lp));
- if (!ndev) {
- dev_err(&pdev->dev, "etherdev allocation failed.\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- lp = netdev_priv(ndev);
- lp->ndev = ndev;
- lp->pdev = pdev;
- lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
-
- spin_lock_init(&lp->tx_lock);
- spin_lock_init(&lp->hw_lock);
- spin_lock_init(&lp->stats_lock);
-
- lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(lp->apb_pclk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- ret = PTR_ERR(lp->apb_pclk);
- goto err_out_free_netdev;
- }
-
- ret = clk_prepare_enable(lp->apb_pclk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable APER clock.\n");
- goto err_out_free_netdev;
- }
-
- lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
- if (IS_ERR(lp->baseaddr)) {
- dev_err(&pdev->dev, "failed to map baseaddress.\n");
- ret = PTR_ERR(lp->baseaddr);
- goto err_out_clk_dis_aper;
- }
-
- ndev->irq = platform_get_irq(pdev, 0);
- ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
- ndev->netdev_ops = &netdev_ops;
- ndev->ethtool_ops = &dwceqos_ethtool_ops;
- ndev->base_addr = r_mem->start;
-
- dwceqos_get_hwfeatures(lp);
- dwceqos_mdio_set_csr(lp);
-
- ndev->hw_features = NETIF_F_SG;
-
- if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
- ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-
- if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
- ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-
- if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
- ndev->hw_features |= NETIF_F_RXCSUM;
-
- ndev->features = ndev->hw_features;
-
- lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(lp->phy_ref_clk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- ret = PTR_ERR(lp->phy_ref_clk);
- goto err_out_clk_dis_aper;
- }
-
- ret = clk_prepare_enable(lp->phy_ref_clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable device clock.\n");
- goto err_out_clk_dis_aper;
- }
-
- lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
- "phy-handle", 0);
- if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
- ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "invalid fixed-link");
- goto err_out_clk_dis_phy;
- }
-
- lp->phy_node = of_node_get(lp->pdev->dev.of_node);
- }
-
- ret = of_get_phy_mode(lp->pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
- goto err_out_deregister_fixed_link;
- }
-
- lp->phy_interface = ret;
-
- ret = dwceqos_mii_init(lp);
- if (ret) {
- dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
- goto err_out_deregister_fixed_link;
- }
-
- ret = dwceqos_mii_probe(ndev);
- if (ret != 0) {
- netdev_err(ndev, "mii_probe fail.\n");
- ret = -ENXIO;
- goto err_out_deregister_fixed_link;
- }
-
- dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
-
- tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
- (unsigned long)ndev);
- tasklet_disable(&lp->tx_bdreclaim_tasklet);
-
- lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME,
- WQ_MEM_RECLAIM, 0);
- INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
-
- platform_set_drvdata(pdev, ndev);
- ret = dwceqos_probe_config_dt(pdev);
- if (ret) {
- dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
- ret);
- goto err_out_deregister_fixed_link;
- }
- dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
- pdev->id, ndev->base_addr, ndev->irq);
-
- ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
- ndev->name, ndev);
- if (ret) {
- dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
- ndev->irq, ret);
- goto err_out_deregister_fixed_link;
- }
-
- if (netif_msg_probe(lp))
- netdev_dbg(ndev, "net_local@%p\n", lp);
-
- netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_deregister_fixed_link;
- }
-
- return 0;
-
-err_out_deregister_fixed_link:
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
-err_out_clk_dis_phy:
- clk_disable_unprepare(lp->phy_ref_clk);
-err_out_clk_dis_aper:
- clk_disable_unprepare(lp->apb_pclk);
-err_out_free_netdev:
- of_node_put(lp->phy_node);
- free_netdev(ndev);
- platform_set_drvdata(pdev, NULL);
- return ret;
-}
-
-static int dwceqos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct net_local *lp;
-
- if (ndev) {
- lp = netdev_priv(ndev);
-
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
- }
- mdiobus_unregister(lp->mii_bus);
- mdiobus_free(lp->mii_bus);
-
- unregister_netdev(ndev);
-
- clk_disable_unprepare(lp->phy_ref_clk);
- clk_disable_unprepare(lp->apb_pclk);
-
- free_netdev(ndev);
- }
-
- return 0;
-}
-
-static struct platform_driver dwceqos_driver = {
- .probe = dwceqos_probe,
- .remove = dwceqos_remove,
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = dwceq_of_match,
- },
-};
-
-module_platform_driver(dwceqos_driver);
-
-MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
-MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index baa3e4a5731c..f864fd0663db 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
* device lock and allow waiting tasks (eg rmmod) to advance) */
priv->napi_stop = 0;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
bdx_enable_interrupts(priv);
}
return work_done;
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 77c88fcf2b86..9b8a30bf939b 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
goto fail_alloc;
}
-#warning FIXME: unhardcode gpio&reset bits
+ /* FIXME: unhardcode gpio&reset bits */
ar7_gpio_disable(26);
ar7_gpio_disable(27);
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b203143647e6..9f3d9c67e3fe 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -145,6 +145,7 @@ do { \
cpsw->data.active_slave)
#define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8
+#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
static int debug_level;
module_param(debug_level, int, 0);
@@ -158,6 +159,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
module_param(rx_packet_max, int, 0);
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+module_param(descs_pool_size, int, 0444);
+MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
+
struct cpsw_wr_regs {
u32 id_ver;
u32 soft_reset;
@@ -352,7 +357,6 @@ struct cpsw_slave {
struct phy_device *phy;
struct net_device *ndev;
u32 port_vlan;
- u32 open_stat;
};
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -395,6 +399,7 @@ struct cpsw_common {
struct cpts *cpts;
int rx_ch_num, tx_ch_num;
int speed;
+ int usage_count;
};
struct cpsw_priv {
@@ -699,18 +704,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
- bool ndev_status = false;
- struct cpsw_slave *slave = cpsw->slaves;
- int n;
-
- if (cpsw->data.dual_emac) {
- /* In dual emac mode check for all interfaces */
- for (n = cpsw->data.slaves; n; n--, slave++)
- if (netif_running(slave->ndev))
- ndev_status = true;
- }
-
- if (ndev_status && (status >= 0)) {
+ /* In dual emac mode check for all interfaces */
+ if (cpsw->data.dual_emac && cpsw->usage_count &&
+ (status >= 0)) {
/* The packet received is for the interface which
* is already down and the other interface is up
* and running, instead of freeing which results
@@ -934,7 +930,7 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
}
if (num_rx < budget) {
- napi_complete(napi_rx);
+ napi_complete_done(napi_rx, num_rx);
writel(0xff, &cpsw->wr_regs->rx_en);
if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
cpsw->rx_irq_disabled = false;
@@ -1230,21 +1226,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev,
}
}
-static int cpsw_common_res_usage_state(struct cpsw_common *cpsw)
-{
- u32 i;
- u32 usage_count = 0;
-
- if (!cpsw->data.dual_emac)
- return 0;
-
- for (i = 0; i < cpsw->data.slaves; i++)
- if (cpsw->slaves[i].open_stat)
- usage_count++;
-
- return usage_count;
-}
-
static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
struct sk_buff *skb,
struct cpdma_chan *txch)
@@ -1478,8 +1459,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
return ret;
}
- if (!cpsw_common_res_usage_state(cpsw))
- cpsw_intr_disable(cpsw);
netif_carrier_off(ndev);
/* Notify the stack of the actual queue counts. */
@@ -1501,8 +1480,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
CPSW_RTL_VERSION(reg));
- /* initialize host and slave ports */
- if (!cpsw_common_res_usage_state(cpsw))
+ /* Initialize host and slave ports */
+ if (!cpsw->usage_count)
cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
@@ -1513,7 +1492,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
- if (!cpsw_common_res_usage_state(cpsw)) {
+ /* initialize shared resources for every ndev */
+ if (!cpsw->usage_count) {
/* disable priority elevation */
__raw_writel(0, &cpsw->regs->ptype);
@@ -1555,9 +1535,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpdma_ctlr_start(cpsw->dma);
cpsw_intr_enable(cpsw);
-
- if (cpsw->data.dual_emac)
- cpsw->slaves[priv->emac_port].open_stat = true;
+ cpsw->usage_count++;
return 0;
@@ -1578,7 +1556,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
- if (cpsw_common_res_usage_state(cpsw) <= 1) {
+ if (cpsw->usage_count <= 1) {
napi_disable(&cpsw->napi_rx);
napi_disable(&cpsw->napi_tx);
cpts_unregister(cpsw->cpts);
@@ -1591,9 +1569,8 @@ static int cpsw_ndo_stop(struct net_device *ndev)
if (cpsw_need_resplit(cpsw))
cpsw_split_res(ndev);
+ cpsw->usage_count--;
pm_runtime_put_sync(cpsw->dev);
- if (cpsw->data.dual_emac)
- cpsw->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -1606,12 +1583,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpdma_chan *txch;
int ret, q_idx;
- netif_trans_update(ndev);
-
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
ndev->stats.tx_dropped++;
- return NETDEV_TX_OK;
+ return NET_XMIT_DROP;
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
@@ -2363,17 +2338,11 @@ static int cpsw_update_channels(struct cpsw_priv *priv,
return 0;
}
-static int cpsw_set_channels(struct net_device *ndev,
- struct ethtool_channels *chs)
+static void cpsw_suspend_data_pass(struct net_device *ndev)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
struct cpsw_slave *slave;
- int i, ret;
-
- ret = cpsw_check_ch_settings(cpsw, chs);
- if (ret < 0)
- return ret;
+ int i;
/* Disable NAPI scheduling */
cpsw_intr_disable(cpsw);
@@ -2391,6 +2360,51 @@ static int cpsw_set_channels(struct net_device *ndev,
/* Handle rest of tx packets and stop cpdma channels */
cpdma_ctlr_stop(cpsw->dma);
+}
+
+static int cpsw_resume_data_pass(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ /* Allow rx packets handling */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_dormant_off(slave->ndev);
+
+ /* After this receive is started */
+ if (cpsw->usage_count) {
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret)
+ return ret;
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ }
+
+ /* Resume transmit for every affected interface */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_tx_start_all_queues(slave->ndev);
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ ret = cpsw_check_ch_settings(cpsw, chs);
+ if (ret < 0)
+ return ret;
+
+ cpsw_suspend_data_pass(ndev);
ret = cpsw_update_channels(priv, chs);
if (ret)
goto err;
@@ -2413,30 +2427,14 @@ static int cpsw_set_channels(struct net_device *ndev,
dev_err(priv->dev, "cannot set real number of rx queues\n");
goto err;
}
-
- /* Enable rx packets handling */
- netif_dormant_off(slave->ndev);
}
- if (cpsw_common_res_usage_state(cpsw)) {
- ret = cpsw_fill_rx_channels(priv);
- if (ret)
- goto err;
-
+ if (cpsw->usage_count)
cpsw_split_res(ndev);
- /* After this receive is started */
- cpdma_ctlr_start(cpsw->dma);
- cpsw_intr_enable(cpsw);
- }
-
- /* Resume transmit for every affected interface */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
- netif_tx_start_all_queues(slave->ndev);
- }
- return 0;
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
err:
dev_err(priv->dev, "cannot update channels number, closing device\n");
dev_close(ndev);
@@ -2479,6 +2477,52 @@ static int cpsw_nway_reset(struct net_device *ndev)
return -EOPNOTSUPP;
}
+static void cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* not supported */
+ ering->tx_max_pending = 0;
+ ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
+ ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
+ ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
+}
+
+static int cpsw_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ /* ignore ering->tx_pending - only rx_pending adjustment is supported */
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
+ ering->rx_pending < CPSW_MAX_QUEUES ||
+ ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
+ return -EINVAL;
+
+ if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+ return 0;
+
+ cpsw_suspend_data_pass(ndev);
+
+ cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+
+ if (cpsw->usage_count)
+ cpdma_chan_split_pool(cpsw->dma);
+
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
+
+ dev_err(&ndev->dev, "cannot set ring params, closing device\n");
+ dev_close(ndev);
+ return ret;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -2505,6 +2549,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_eee = cpsw_get_eee,
.set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
+ .get_ringparam = cpsw_get_ringparam,
+ .set_ringparam = cpsw_set_ringparam,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
@@ -2969,6 +3015,7 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.has_ext_regs = true;
dma_params.desc_hw_addr = dma_params.desc_mem_phys;
dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
+ dma_params.descs_pool_size = descs_pool_size;
cpsw->dma = cpdma_ctlr_create(&dma_params);
if (!cpsw->dma) {
@@ -2985,7 +3032,7 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
- ale_params.dev = &ndev->dev;
+ ale_params.dev = &pdev->dev;
ale_params.ale_ageout = ale_ageout;
ale_params.ale_entries = data->ale_entries;
ale_params.ale_ports = data->slaves;
@@ -3072,9 +3119,9 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
- &ss_res->start, ndev->irq);
-
+ cpsw_notice(priv, probe,
+ "initialized device (regs %pa, irq %d, pool size %d)\n",
+ &ss_res->start, ndev->irq, dma_params.descs_pool_size);
if (cpsw->data.dual_emac) {
ret = cpsw_probe_dual_emac(priv);
if (ret) {
@@ -3160,7 +3207,7 @@ static int cpsw_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_common *cpsw = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 43b061bd8e07..ddd43e09111e 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1,5 +1,5 @@
/*
- * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine
*
* Copyright (C) 2012 Texas Instruments
*
@@ -27,11 +27,14 @@
#define BITMASK(bits) (BIT(bits) - 1)
-#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff)
+#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask))
#define ALE_VERSION_MINOR(rev) (rev & 0xff)
+#define ALE_VERSION_1R3 0x0103
+#define ALE_VERSION_1R4 0x0104
/* ALE Registers */
#define ALE_IDVER 0x00
+#define ALE_STATUS 0x04
#define ALE_CONTROL 0x08
#define ALE_PRESCALE 0x10
#define ALE_UNKNOWNVLAN 0x18
@@ -39,6 +42,13 @@
#define ALE_TABLE 0x34
#define ALE_PORTCTL 0x40
+/* ALE NetCP NU switch specific Registers */
+#define ALE_UNKNOWNVLAN_MEMBER 0x90
+#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD 0x94
+#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD 0x98
+#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
+#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+
#define ALE_TABLE_WRITE BIT(31)
#define ALE_TYPE_FREE 0
@@ -51,6 +61,10 @@
#define ALE_UCAST_OUI 2
#define ALE_UCAST_TOUCHED 3
+#define ALE_TABLE_SIZE_MULTIPLIER 1024
+#define ALE_STATUS_SIZE_MASK 0x1f
+#define ALE_TABLE_SIZE_DEFAULT 64
+
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
int idx;
@@ -84,20 +98,34 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
+#define DEFINE_ALE_FIELD1(name, start) \
+static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \
+{ \
+ return cpsw_ale_get_field(ale_entry, start, bits); \
+} \
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \
+ u32 bits) \
+{ \
+ cpsw_ale_set_field(ale_entry, start, bits, value); \
+}
+
DEFINE_ALE_FIELD(entry_type, 60, 2)
DEFINE_ALE_FIELD(vlan_id, 48, 12)
DEFINE_ALE_FIELD(mcast_state, 62, 2)
-DEFINE_ALE_FIELD(port_mask, 66, 3)
+DEFINE_ALE_FIELD1(port_mask, 66)
DEFINE_ALE_FIELD(super, 65, 1)
DEFINE_ALE_FIELD(ucast_type, 62, 2)
-DEFINE_ALE_FIELD(port_num, 66, 2)
+DEFINE_ALE_FIELD1(port_num, 66)
DEFINE_ALE_FIELD(blocked, 65, 1)
DEFINE_ALE_FIELD(secure, 64, 1)
-DEFINE_ALE_FIELD(vlan_untag_force, 24, 3)
-DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3)
-DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3)
-DEFINE_ALE_FIELD(vlan_member_list, 0, 3)
+DEFINE_ALE_FIELD1(vlan_untag_force, 24)
+DEFINE_ALE_FIELD1(vlan_reg_mcast, 16)
+DEFINE_ALE_FIELD1(vlan_unreg_mcast, 8)
+DEFINE_ALE_FIELD1(vlan_member_list, 0)
DEFINE_ALE_FIELD(mcast, 40, 1)
+/* ALE NetCP nu switch specific */
+DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3)
+DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3)
/* The MAC address field in the ALE entry cannot be macroized as above */
static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
@@ -223,14 +251,16 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
{
int mask;
- mask = cpsw_ale_get_port_mask(ale_entry);
+ mask = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
if ((mask & port_mask) == 0)
return; /* ports dont intersect, not interested */
mask &= ~port_mask;
/* free if only remaining port is host port */
if (mask)
- cpsw_ale_set_port_mask(ale_entry, mask);
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
@@ -291,7 +321,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
- cpsw_ale_set_port_num(ale_entry, port);
+ cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits);
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
@@ -338,9 +368,11 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
- mask = cpsw_ale_get_port_mask(ale_entry);
+ mask = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
port_mask |= mask;
- cpsw_ale_set_port_mask(ale_entry, port_mask);
+ cpsw_ale_set_port_mask(ale_entry, port_mask,
+ ale->port_mask_bits);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@@ -367,7 +399,8 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
cpsw_ale_read(ale, idx, ale_entry);
if (port_mask)
- cpsw_ale_set_port_mask(ale_entry, port_mask);
+ cpsw_ale_set_port_mask(ale_entry, port_mask,
+ ale->port_mask_bits);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
@@ -376,6 +409,21 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
}
EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
+/* ALE NetCP NU switch specific vlan functions */
+static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ int reg_mcast, int unreg_mcast)
+{
+ int idx;
+
+ /* Set VLAN registered multicast flood mask */
+ idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry);
+ writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+
+ /* Set VLAN unregistered multicast flood mask */
+ idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
+ writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+}
+
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast)
{
@@ -389,10 +437,16 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
cpsw_ale_set_vlan_id(ale_entry, vid);
- cpsw_ale_set_vlan_untag_force(ale_entry, untag);
- cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
- cpsw_ale_set_vlan_member_list(ale_entry, port);
+ cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits);
+ if (!ale->params.nu_switch_ale) {
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
+ ale->vlan_field_bits);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+ ale->vlan_field_bits);
+ } else {
+ cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
+ }
+ cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@@ -418,7 +472,8 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
if (port_mask)
- cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
+ ale->vlan_field_bits);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
@@ -446,12 +501,15 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
if (type != ALE_TYPE_VLAN)
continue;
- unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry);
+ unreg_mcast =
+ cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+ ale->vlan_field_bits);
if (allmulti)
unreg_mcast |= 1;
else
unreg_mcast &= ~1;
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+ ale->vlan_field_bits);
cpsw_ale_write(ale, idx, ale_entry);
}
}
@@ -464,7 +522,7 @@ struct ale_control_info {
int bits;
};
-static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
+static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
[ALE_ENABLE] = {
.name = "enable",
.offset = ALE_CONTROL,
@@ -721,11 +779,83 @@ static void cpsw_ale_timer(unsigned long arg)
void cpsw_ale_start(struct cpsw_ale *ale)
{
- u32 rev;
+ u32 rev, ale_entries;
rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
- dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n",
- ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev));
+ if (!ale->params.major_ver_mask)
+ ale->params.major_ver_mask = 0xff;
+ ale->version =
+ (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
+ ALE_VERSION_MINOR(rev);
+ dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
+ ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
+ ALE_VERSION_MINOR(rev));
+
+ if (!ale->params.ale_entries) {
+ ale_entries =
+ __raw_readl(ale->params.ale_regs + ALE_STATUS) &
+ ALE_STATUS_SIZE_MASK;
+ /* ALE available on newer NetCP switches has introduced
+ * a register, ALE_STATUS, to indicate the size of ALE
+ * table which shows the size as a multiple of 1024 entries.
+ * For these, params.ale_entries will be set to zero. So
+ * read the register and update the value of ale_entries.
+ * ALE table on NetCP lite, is much smaller and is indicated
+ * by a value of zero in ALE_STATUS. So use a default value
+ * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected
+ * to set the value of ale_entries for all other versions
+ * of ALE.
+ */
+ if (!ale_entries)
+ ale_entries = ALE_TABLE_SIZE_DEFAULT;
+ else
+ ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
+ ale->params.ale_entries = ale_entries;
+ }
+ dev_info(ale->params.dev,
+ "ALE Table size %ld\n", ale->params.ale_entries);
+
+ /* set default bits for existing h/w */
+ ale->port_mask_bits = 3;
+ ale->port_num_bits = 2;
+ ale->vlan_field_bits = 3;
+
+ /* Set defaults override for ALE on NetCP NU switch and for version
+ * 1R3
+ */
+ if (ale->params.nu_switch_ale) {
+ /* Separate registers for unknown vlan configuration.
+ * Also there are N bits, where N is number of ale
+ * ports and shift value should be 0
+ */
+ ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset =
+ ALE_UNKNOWNVLAN_MEMBER;
+ ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0;
+ ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset =
+ ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD;
+ ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0;
+ ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset =
+ ALE_UNKNOWNVLAN_REG_MCAST_FLOOD;
+ ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
+ ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
+ ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
+ ale->port_mask_bits = ale->params.ale_ports;
+ ale->port_num_bits = ale->params.ale_ports - 1;
+ ale->vlan_field_bits = ale->params.ale_ports;
+ } else if (ale->version == ALE_VERSION_1R3) {
+ ale->port_mask_bits = ale->params.ale_ports;
+ ale->port_num_bits = 3;
+ ale->vlan_field_bits = ale->params.ale_ports;
+ }
+
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index a7001894f3da..25d24e8d0904 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -1,5 +1,5 @@
/*
- * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs
*
* Copyright (C) 2012 Texas Instruments
*
@@ -21,6 +21,16 @@ struct cpsw_ale_params {
unsigned long ale_ageout; /* in secs */
unsigned long ale_entries;
unsigned long ale_ports;
+ /* NU Switch has specific handling as number of bits in ALE entries
+ * are different than other versions of ALE. Also there are specific
+ * registers for unknown vlan specific fields. So use nu_switch_ale
+ * to identify this hardware.
+ */
+ bool nu_switch_ale;
+ /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
+ * pass it from caller.
+ */
+ u32 major_ver_mask;
};
struct cpsw_ale {
@@ -28,6 +38,11 @@ struct cpsw_ale {
struct timer_list timer;
unsigned long ageout;
int allmulti;
+ u32 version;
+ /* These bits are different on NetCP NU Switch ALE */
+ u32 port_mask_bits;
+ u32 port_num_bits;
+ u32 vlan_field_bits;
};
enum cpsw_ale_control {
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 36518fc5c7cc..7ecc6b70e7e8 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -108,6 +108,8 @@ struct cpdma_ctlr {
spinlock_t lock;
struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
int chan_num;
+ int num_rx_desc; /* RX descriptors number */
+ int num_tx_desc; /* TX descriptors number */
};
struct cpdma_chan {
@@ -166,12 +168,12 @@ static struct cpdma_control_info controls[] = {
#define num_chan params.num_chan
/* various accessors */
-#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
-#define chan_read(chan, fld) __raw_readl((chan)->fld)
-#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
-#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
-#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
-#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+#define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld) readl((chan)->fld)
+#define desc_read(desc, fld) readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v) writel(v, (chan)->fld)
+#define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
#define cpdma_desc_to_port(chan, mode, directed) \
do { \
@@ -181,8 +183,10 @@ static struct cpdma_control_info controls[] = {
(directed << CPDMA_TO_PORT_SHIFT)); \
} while (0)
-static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
{
+ struct cpdma_desc_pool *pool = ctlr->pool;
+
if (!pool)
return;
@@ -191,10 +195,8 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
gen_pool_size(pool->gen_pool),
gen_pool_avail(pool->gen_pool));
if (pool->cpumap)
- dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
pool->phys);
- else
- iounmap(pool->iomap);
}
/*
@@ -203,37 +205,50 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
* devices (e.g. cpsw switches) use plain old memory. Descriptor pools
* abstract out these details
*/
-static struct cpdma_desc_pool *
-cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
- int size, int align)
+int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
{
+ struct cpdma_params *cpdma_params = &ctlr->params;
struct cpdma_desc_pool *pool;
- int ret;
+ int ret = -ENOMEM;
- pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
+ pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
if (!pool)
goto gen_pool_create_fail;
+ ctlr->pool = pool;
+
+ pool->mem_size = cpdma_params->desc_mem_size;
+ pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
+ cpdma_params->desc_align);
+ pool->num_desc = pool->mem_size / pool->desc_size;
+
+ if (cpdma_params->descs_pool_size) {
+ /* recalculate memory size required cpdma descriptor pool
+ * basing on number of descriptors specified by user and
+ * if memory size > CPPI internal RAM size (desc_mem_size)
+ * then switch to use DDR
+ */
+ pool->num_desc = cpdma_params->descs_pool_size;
+ pool->mem_size = pool->desc_size * pool->num_desc;
+ if (pool->mem_size > cpdma_params->desc_mem_size)
+ cpdma_params->desc_mem_phys = 0;
+ }
- pool->dev = dev;
- pool->mem_size = size;
- pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
- pool->num_desc = size / pool->desc_size;
-
- pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
- "cpdma");
+ pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
+ -1, "cpdma");
if (IS_ERR(pool->gen_pool)) {
- dev_err(dev, "pool create failed %ld\n",
- PTR_ERR(pool->gen_pool));
+ ret = PTR_ERR(pool->gen_pool);
+ dev_err(ctlr->dev, "pool create failed %d\n", ret);
goto gen_pool_create_fail;
}
- if (phys) {
- pool->phys = phys;
- pool->iomap = ioremap(phys, size); /* should be memremap? */
- pool->hw_addr = hw_addr;
+ if (cpdma_params->desc_mem_phys) {
+ pool->phys = cpdma_params->desc_mem_phys;
+ pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
+ pool->mem_size);
+ pool->hw_addr = cpdma_params->desc_hw_addr;
} else {
- pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
- GFP_KERNEL);
+ pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
+ &pool->hw_addr, GFP_KERNEL);
pool->iomap = (void __iomem __force *)pool->cpumap;
pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
}
@@ -244,16 +259,17 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
pool->phys, pool->mem_size, -1);
if (ret < 0) {
- dev_err(dev, "pool add failed %d\n", ret);
+ dev_err(ctlr->dev, "pool add failed %d\n", ret);
goto gen_pool_add_virt_fail;
}
- return pool;
+ return 0;
gen_pool_add_virt_fail:
- cpdma_desc_pool_destroy(pool);
+ cpdma_desc_pool_destroy(ctlr);
gen_pool_create_fail:
- return NULL;
+ ctlr->pool = NULL;
+ return ret;
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -502,13 +518,11 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->chan_num = 0;
spin_lock_init(&ctlr->lock);
- ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
- ctlr->params.desc_mem_phys,
- ctlr->params.desc_hw_addr,
- ctlr->params.desc_mem_size,
- ctlr->params.desc_align);
- if (!ctlr->pool)
+ if (cpdma_desc_pool_create(ctlr))
return NULL;
+ /* split pool equally between RX/TX by default */
+ ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
+ ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -542,10 +556,10 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
}
for (i = 0; i < ctlr->num_chan; i++) {
- __raw_writel(0, ctlr->params.txhdp + 4 * i);
- __raw_writel(0, ctlr->params.rxhdp + 4 * i);
- __raw_writel(0, ctlr->params.txcp + 4 * i);
- __raw_writel(0, ctlr->params.rxcp + 4 * i);
+ writel(0, ctlr->params.txhdp + 4 * i);
+ writel(0, ctlr->params.rxhdp + 4 * i);
+ writel(0, ctlr->params.txcp + 4 * i);
+ writel(0, ctlr->params.rxcp + 4 * i);
}
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
@@ -623,7 +637,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
cpdma_chan_destroy(ctlr->channels[i]);
- cpdma_desc_pool_destroy(ctlr->pool);
+ cpdma_desc_pool_destroy(ctlr);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -708,22 +722,22 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
}
}
/* use remains */
- most_chan->desc_num += desc_cnt;
+ if (most_chan)
+ most_chan->desc_num += desc_cnt;
}
/**
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock
*/
-static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
{
int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
- struct cpdma_desc_pool *pool = ctlr->pool;
int free_rx_num = 0, free_tx_num = 0;
int rx_weight = 0, tx_weight = 0;
int tx_desc_num, rx_desc_num;
struct cpdma_chan *chan;
- int i, tx_num = 0;
+ int i;
if (!ctlr->chan_num)
return 0;
@@ -741,15 +755,14 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
if (!chan->weight)
free_tx_num++;
tx_weight += chan->weight;
- tx_num++;
}
}
if (rx_weight > 100 || tx_weight > 100)
return -EINVAL;
- tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num;
- rx_desc_num = pool->num_desc - tx_desc_num;
+ tx_desc_num = ctlr->num_tx_desc;
+ rx_desc_num = ctlr->num_rx_desc;
if (free_tx_num) {
tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
@@ -765,6 +778,8 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_chan_split_pool);
+
/* cpdma_chan_set_weight - set weight of a channel in percentage.
* Tx and Rx channels have separate weights. That is 100% for RX
@@ -820,8 +835,8 @@ EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
*/
int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
{
- struct cpdma_ctlr *ctlr = ch->ctlr;
unsigned long flags, ch_flags;
+ struct cpdma_ctlr *ctlr;
int ret, prio_mode;
u32 rmask;
@@ -831,6 +846,7 @@ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
if (ch->rate == rate)
return rate;
+ ctlr = ch->ctlr;
spin_lock_irqsave(&ctlr->lock, flags);
spin_lock_irqsave(&ch->lock, ch_flags);
@@ -898,7 +914,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan->chan_num = chan_num;
chan->handler = handler;
chan->rate = 0;
- chan->desc_num = ctlr->pool->num_desc / 2;
chan->weight = 0;
if (is_rx_chan(chan)) {
@@ -1061,13 +1076,17 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
cpdma_desc_to_port(chan, mode, directed);
- desc_write(desc, hw_next, 0);
- desc_write(desc, hw_buffer, buffer);
- desc_write(desc, hw_len, len);
- desc_write(desc, hw_mode, mode | len);
- desc_write(desc, sw_token, token);
- desc_write(desc, sw_buffer, buffer);
- desc_write(desc, sw_len, len);
+ /* Relaxed IO accessors can be used here as there is read barrier
+ * at the end of write sequence.
+ */
+ writel_relaxed(0, &desc->hw_next);
+ writel_relaxed(buffer, &desc->hw_buffer);
+ writel_relaxed(len, &desc->hw_len);
+ writel_relaxed(mode | len, &desc->hw_mode);
+ writel_relaxed(token, &desc->sw_token);
+ writel_relaxed(buffer, &desc->sw_buffer);
+ writel_relaxed(len, &desc->sw_len);
+ desc_read(desc, sw_len);
__cpdma_chan_submit(chan, desc);
@@ -1136,7 +1155,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
}
desc_dma = desc_phys(pool, desc);
- status = __raw_readl(&desc->hw_mode);
+ status = desc_read(desc, hw_mode);
outlen = status & 0x7ff;
if (status & CPDMA_DESC_OWNER) {
chan->stats.busy_dequeue++;
@@ -1155,7 +1174,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
chan->count--;
chan->stats.good_dequeue++;
- if (status & CPDMA_DESC_EOQ) {
+ if ((status & CPDMA_DESC_EOQ) && chan->head) {
chan->stats.requeue++;
chan_write(chan, hdp, desc_phys(pool, chan->head));
}
@@ -1316,4 +1335,23 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
}
EXPORT_SYMBOL_GPL(cpdma_control_set);
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
+{
+ return ctlr->num_rx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs);
+
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
+{
+ return ctlr->num_tx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs);
+
+void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+{
+ ctlr->num_rx_desc = num_rx_desc;
+ ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 4a167db2abab..fd65ce2b83de 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -37,6 +37,7 @@ struct cpdma_params {
int desc_mem_size;
int desc_align;
u32 bus_freq_mhz;
+ u32 descs_pool_size;
/*
* Some instances of embedded cpdma controllers have extra control and
@@ -113,5 +114,9 @@ enum cpdma_control {
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
+void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
+int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr);
#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 481c7bf0395b..64d5527feb2a 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
&emac_rxhost_errcodes[cause][0], ch);
}
} else if (num_rx_pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, num_rx_pkts);
emac_int_enable(priv);
}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index 0f58c584ae09..8900a6fad318 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -23,6 +23,7 @@
#include <linux/netdevice.h>
#include <linux/soc/ti/knav_dma.h>
+#include <linux/u64_stats_sync.h>
/* Maximum Ethernet frame size supported by Keystone switch */
#define NETCP_MAX_FRAME_SIZE 9504
@@ -68,6 +69,20 @@ struct netcp_addr {
struct list_head node;
};
+struct netcp_stats {
+ struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u32 rx_errors;
+ u32 rx_dropped;
+
+ struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u32 tx_errors;
+ u32 tx_dropped;
+};
+
struct netcp_intf {
struct device *dev;
struct device *ndev_dev;
@@ -87,6 +102,11 @@ struct netcp_intf {
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
+#define ETH_SW_CAN_REMOVE_ETH_FCS BIT(0)
+ u32 hw_cap;
+
+ /* 64-bit netcp stats */
+ struct netcp_stats stats;
void *rx_channel;
const char *dma_chan_name;
@@ -115,6 +135,7 @@ struct netcp_packet {
struct sk_buff *skb;
__le32 *epib;
u32 *psdata;
+ u32 eflags;
unsigned int psdata_len;
struct netcp_intf *netcp;
struct netcp_tx_pipe *tx_pipe;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index c243335ed649..7c7ae0890e90 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -122,6 +122,13 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
*ndesc = le32_to_cpu(desc->next_desc);
}
+static void get_desc_info(u32 *desc_info, u32 *pkt_info,
+ struct knav_dma_desc *desc)
+{
+ *desc_info = le32_to_cpu(desc->desc_info);
+ *pkt_info = le32_to_cpu(desc->packet_info);
+}
+
static u32 get_sw_data(int index, struct knav_dma_desc *desc)
{
/* No Endian conversion needed as this data is untouched by hw */
@@ -622,6 +629,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
static void netcp_empty_rx_queue(struct netcp_intf *netcp)
{
+ struct netcp_stats *rx_stats = &netcp->stats;
struct knav_dma_desc *desc;
unsigned int dma_sz;
dma_addr_t dma;
@@ -635,16 +643,17 @@ static void netcp_empty_rx_queue(struct netcp_intf *netcp)
if (unlikely(!desc)) {
dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
__func__);
- netcp->ndev->stats.rx_errors++;
+ rx_stats->rx_errors++;
continue;
}
netcp_free_rx_desc_chain(netcp, desc);
- netcp->ndev->stats.rx_dropped++;
+ rx_stats->rx_dropped++;
}
}
static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
{
+ struct netcp_stats *rx_stats = &netcp->stats;
unsigned int dma_sz, buf_len, org_buf_len;
struct knav_dma_desc *desc, *ndesc;
unsigned int pkt_sz = 0, accum_sz;
@@ -653,6 +662,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
struct netcp_packet p_info;
struct sk_buff *skb;
void *org_buf_ptr;
+ u32 tmp;
dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
if (!dma_desc)
@@ -724,21 +734,27 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
knav_pool_desc_put(netcp->rx_pool, ndesc);
}
- /* Free the primary descriptor */
- knav_pool_desc_put(netcp->rx_pool, desc);
-
/* check for packet len and warn */
if (unlikely(pkt_sz != accum_sz))
dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
pkt_sz, accum_sz);
- /* Remove ethernet FCS from the packet */
- __pskb_trim(skb, skb->len - ETH_FCS_LEN);
+ /* Newer version of the Ethernet switch can trim the Ethernet FCS
+ * from the packet and is indicated in hw_cap. So trim it only for
+ * older h/w
+ */
+ if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS))
+ __pskb_trim(skb, skb->len - ETH_FCS_LEN);
/* Call each of the RX hooks */
p_info.skb = skb;
skb->dev = netcp->ndev;
p_info.rxtstamp_complete = false;
+ get_desc_info(&tmp, &p_info.eflags, desc);
+ p_info.epib = desc->epib;
+ p_info.psdata = (u32 __force *)desc->psdata;
+ p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) &
+ KNAV_DMA_DESC_EFLAGS_MASK);
list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
int ret;
@@ -747,14 +763,20 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
if (unlikely(ret)) {
dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
rx_hook->order, ret);
- netcp->ndev->stats.rx_errors++;
+ /* Free the primary descriptor */
+ rx_stats->rx_dropped++;
+ knav_pool_desc_put(netcp->rx_pool, desc);
dev_kfree_skb(skb);
return 0;
}
}
+ /* Free the primary descriptor */
+ knav_pool_desc_put(netcp->rx_pool, desc);
- netcp->ndev->stats.rx_packets++;
- netcp->ndev->stats.rx_bytes += skb->len;
+ u64_stats_update_begin(&rx_stats->syncp_rx);
+ rx_stats->rx_packets++;
+ rx_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&rx_stats->syncp_rx);
/* push skb up the stack */
skb->protocol = eth_type_trans(skb, netcp->ndev);
@@ -763,7 +785,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
free_desc:
netcp_free_rx_desc_chain(netcp, desc);
- netcp->ndev->stats.rx_errors++;
+ rx_stats->rx_errors++;
return 0;
}
@@ -947,7 +969,7 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
netcp_rxpool_refill(netcp);
if (packets < budget) {
- napi_complete(&netcp->rx_napi);
+ napi_complete_done(&netcp->rx_napi, packets);
knav_queue_enable_notify(netcp->rx_queue);
}
@@ -994,6 +1016,7 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
unsigned int budget)
{
+ struct netcp_stats *tx_stats = &netcp->stats;
struct knav_dma_desc *desc;
struct netcp_tx_cb *tx_cb;
struct sk_buff *skb;
@@ -1008,7 +1031,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
if (unlikely(!desc)) {
dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
- netcp->ndev->stats.tx_errors++;
+ tx_stats->tx_errors++;
continue;
}
@@ -1019,7 +1042,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
netcp_free_tx_desc_chain(netcp, desc, dma_sz);
if (!skb) {
dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
- netcp->ndev->stats.tx_errors++;
+ tx_stats->tx_errors++;
continue;
}
@@ -1036,8 +1059,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
netif_wake_subqueue(netcp->ndev, subqueue);
}
- netcp->ndev->stats.tx_packets++;
- netcp->ndev->stats.tx_bytes += skb->len;
+ u64_stats_update_begin(&tx_stats->syncp_tx);
+ tx_stats->tx_packets++;
+ tx_stats->tx_bytes += skb->len;
+ u64_stats_update_end(&tx_stats->syncp_tx);
dev_kfree_skb(skb);
pkts++;
}
@@ -1212,9 +1237,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
/* psdata points to both native-endian and device-endian data */
__le32 *psdata = (void __force *)p_info.psdata;
- memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
- p_info.psdata_len);
- set_words(p_info.psdata, p_info.psdata_len, psdata);
+ set_words((u32 *)psdata +
+ (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len),
+ p_info.psdata_len, psdata);
tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
KNAV_DMA_DESC_PSLEN_SHIFT;
}
@@ -1258,6 +1283,7 @@ out:
static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_stats *tx_stats = &netcp->stats;
int subqueue = skb_get_queue_mapping(skb);
struct knav_dma_desc *desc;
int desc_count, ret = 0;
@@ -1273,7 +1299,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If we get here, the skb has already been dropped */
dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
ret);
- ndev->stats.tx_dropped++;
+ tx_stats->tx_dropped++;
return ret;
}
skb->len = NETCP_MIN_PACKET_SIZE;
@@ -1290,8 +1316,6 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (ret)
goto drop;
- netif_trans_update(ndev);
-
/* Check Tx pool count & stop subqueue if needed */
desc_count = knav_pool_count(netcp->tx_pool);
if (desc_count < netcp->tx_pause_threshold) {
@@ -1301,7 +1325,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
drop:
- ndev->stats.tx_dropped++;
+ tx_stats->tx_dropped++;
if (desc)
netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
dev_kfree_skb(skb);
@@ -1883,12 +1907,44 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return 0;
}
+static void
+netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_stats *p = &netcp->stats;
+ u64 rxpackets, rxbytes, txpackets, txbytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp_rx);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp_tx);
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
+
+ stats->rx_packets = rxpackets;
+ stats->rx_bytes = rxbytes;
+ stats->tx_packets = txpackets;
+ stats->tx_bytes = txbytes;
+
+ /* The following are stored as 32 bit */
+ stats->rx_errors = p->rx_errors;
+ stats->rx_dropped = p->rx_dropped;
+ stats->tx_dropped = p->tx_dropped;
+}
+
static const struct net_device_ops netcp_netdev_ops = {
.ndo_open = netcp_ndo_open,
.ndo_stop = netcp_ndo_stop,
.ndo_start_xmit = netcp_ndo_start_xmit,
.ndo_set_rx_mode = netcp_set_rx_mode,
.ndo_do_ioctl = netcp_ndo_ioctl,
+ .ndo_get_stats64 = netcp_get_stats,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
@@ -1935,6 +1991,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
INIT_LIST_HEAD(&netcp->txhook_list_head);
INIT_LIST_HEAD(&netcp->rxhook_list_head);
INIT_LIST_HEAD(&netcp->addr_list);
+ u64_stats_init(&netcp->stats.syncp_rx);
+ u64_stats_init(&netcp->stats.syncp_tx);
netcp->netcp_device = netcp_device;
netcp->dev = netcp_device->device;
netcp->ndev = ndev;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 7d9e36f66735..eece3e2eec14 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -81,7 +81,6 @@
#define GBENU_CPTS_OFFSET 0x1d000
#define GBENU_ALE_OFFSET 0x1e000
#define GBENU_HOST_PORT_NUM 0
-#define GBENU_NUM_ALE_ENTRIES 1024
#define GBENU_SGMII_MODULE_SIZE 0x100
/* 10G Ethernet SS defines */
@@ -103,7 +102,7 @@
#define XGBE10_ALE_OFFSET 0x700
#define XGBE10_HW_STATS_OFFSET 0x800
#define XGBE10_HOST_PORT_NUM 0
-#define XGBE10_NUM_ALE_ENTRIES 1024
+#define XGBE10_NUM_ALE_ENTRIES 2048
#define GBE_TIMER_INTERVAL (HZ / 2)
@@ -122,6 +121,7 @@
#define MACSL_FULLDUPLEX BIT(0)
#define GBE_CTL_P0_ENABLE BIT(2)
+#define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13)
#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
#define GBE_STATS_CD_SEL BIT(28)
@@ -2313,7 +2313,6 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
phydev_name(slave->phy));
phy_start(slave->phy);
- phy_read_status(slave->phy);
}
return 0;
}
@@ -2821,7 +2820,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
struct netcp_intf *netcp = netdev_priv(ndev);
struct gbe_slave *slave = gbe_intf->slave;
int port_num = slave->port_num;
- u32 reg;
+ u32 reg, val;
int ret;
reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
@@ -2851,7 +2850,12 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
/* Control register */
- writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
+ val = GBE_CTL_P0_ENABLE;
+ if (IS_SS_ID_MU(gbe_dev)) {
+ val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
+ netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
+ }
+ writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
/* All statistics enabled and STAT AB visible by default */
writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
@@ -2930,7 +2934,9 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
}
slave->open = false;
- slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+ (slave->link_interface == XGMII_LINK_MAC_PHY))
+ slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
if (slave->link_interface >= XGMII_LINK_MAC_PHY)
@@ -3112,7 +3118,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
dev_dbg(dev, "phy found: id is: 0x%s\n",
phydev_name(slave->phy));
phy_start(slave->phy);
- phy_read_status(slave->phy);
}
}
}
@@ -3433,7 +3438,6 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBENU_HOST_PORT_NUM;
- gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
@@ -3601,7 +3605,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
ale_params.ale_entries = gbe_dev->ale_entries;
ale_params.ale_ports = gbe_dev->ale_ports;
-
+ if (IS_SS_ID_MU(gbe_dev)) {
+ ale_params.major_ver_mask = 0x7;
+ ale_params.nu_switch_ale = true;
+ }
gbe_dev->ale = cpsw_ale_create(&ale_params);
if (!gbe_dev->ale) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 2255f9a6f3bc..7c634bc75615 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -681,7 +681,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
}
/* There are no packets left. */
- napi_complete(&info_mpipe->napi);
+ napi_complete_done(&info_mpipe->napi, work);
md = &mpipe_data[instance];
/* Re-enable hypervisor interrupts. */
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 0a3b7dafa3ba..49ccee4b9aec 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
}
}
- napi_complete(&info->napi);
+ napi_complete_done(&info->napi, work);
if (!priv->active)
goto done;
@@ -2047,8 +2047,8 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*
* Returns the address of the device statistics structure.
*/
-static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void tile_net_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct tile_net_priv *priv = netdev_priv(dev);
u64 rx_packets = 0, tx_packets = 0;
@@ -2090,12 +2090,8 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
stats->tx_bytes = tx_bytes;
stats->rx_errors = rx_errors;
stats->rx_dropped = rx_dropped;
-
- return stats;
}
-
-
/*
* Change the Ethernet Address of the NIC.
*
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 345316c749e7..72013314bba8 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
}
if (packets_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, packets_done);
gelic_card_rx_irq_on(card);
}
return packets_done;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index cb341dfe65ad..cec9e70ab995 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
if (packets_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, packets_done);
spider_net_rx_irq_on(card);
card->ignore_rx_ramfull = 0;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 3be61ed28741..a45f98fa4aa7 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1638,7 +1638,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
spin_unlock(&lp->rx_lock);
if (received < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, received);
/* enable interrupts */
tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index f153ad729ce5..c5583991da4a 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
if (num_received < budget) {
data->rxpending = 0;
- napi_complete(napi);
+ napi_complete_done(napi, num_received);
TSI_WRITE(TSI108_EC_INTMASK,
TSI_READ(TSI108_EC_INTMASK)
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 0a6c4e804eed..c068c58428f7 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -513,8 +513,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
static void rhine_tx(struct net_device *dev);
static int rhine_rx(struct net_device *dev, int limit);
static void rhine_set_rx_mode(struct net_device *dev);
-static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+static void rhine_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
@@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
iowrite16(enable_mask, ioaddr + IntrEnable);
mmiowb();
}
@@ -2221,7 +2221,7 @@ out_unlock:
mutex_unlock(&rp->task_lock);
}
-static struct rtnl_link_stats64 *
+static void
rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -2244,8 +2244,6 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = rp->tx_stats.packets;
stats->tx_bytes = rp->tx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
-
- return stats;
}
static void rhine_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4716e60e2ccb..d088788b27a7 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_struct *napi, int budget)
velocity_tx_srv(vptr);
/* If budget not fully consumed, exit the polling mode */
if (rx_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
mac_enable_int(vptr->mac_regs);
}
spin_unlock_irqrestore(&vptr->lock, flags);
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index e1296ef2cf66..f90267f0519f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
}
if (rx_count < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
w5100_enable_intr(priv);
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 724fabd38a23..56ae573001e8 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
}
if (rx_count < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
w5300_write(priv, W5300_IMR, IR_S0);
mmiowb();
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 93dc10b10c09..69e31ceccfae 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#ifdef __BIG_ENDIAN
+#define xemaclite_readl ioread32be
+#define xemaclite_writel iowrite32be
+#else
+#define xemaclite_readl ioread32
+#define xemaclite_writel iowrite32
+#endif
+
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
@@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
- __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Disable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Rx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
- __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
- drvdata->base_addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+ drvdata->base_addr + XEL_RSR_OFFSET);
}
/**
@@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
@@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
- __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
- addr + XEL_TPLR_OFFSET);
+ xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+ addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
- __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0;
}
@@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
*
* Return: Total number of bytes received
*/
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{
void __iomem *addr;
u16 length, proto_type;
@@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
@@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
return 0; /* No data was available */
/* Verify that buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
- proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
+ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
- length = ((ntohl(__raw_readl(addr +
+ length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
+ length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
@@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ if (WARN_ON(length > maxlen))
+ length = maxlen;
+
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
- __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
return length;
}
@@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
- __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
+ xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
- while ((__raw_readl(addr + XEL_TSR_OFFSET) &
+ while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
@@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data);
+ len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -640,32 +652,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
u32 tx_status;
/* Check if there is Rx Data available */
- if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+ if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) ||
- (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+ (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+ XEL_TSR_OFFSET);
tx_complete = true;
}
@@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
- while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
WARN_ON(1);
@@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(XEL_MDIOADDR_OP_MASK |
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
- rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
+ rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -781,13 +793,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(~XEL_MDIOADDR_OP_MASK &
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
}
@@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
- __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc();
if (!bus) {
@@ -1029,20 +1041,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
}
/**
- * xemaclite_remove_ndev - Free the network device
- * @ndev: Pointer to the network device to be freed
- *
- * This function un maps the IO region of the Emaclite device and frees the net
- * device.
- */
-static void xemaclite_remove_ndev(struct net_device *ndev)
-{
- if (ndev) {
- free_netdev(ndev);
- }
-}
-
-/**
* get_bool - Get a parameter from the OF device
* @ofdev: Pointer to OF device structure
* @s: Property to be retrieved
@@ -1065,7 +1063,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
}
}
-static struct net_device_ops xemaclite_netdev_ops;
+static const struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
@@ -1140,8 +1138,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
/* Clear the Tx CSR's in case this is a restart */
- __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
- __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
@@ -1172,7 +1170,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
return 0;
error:
- xemaclite_remove_ndev(ndev);
+ free_netdev(ndev);
return rc;
}
@@ -1204,7 +1202,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- xemaclite_remove_ndev(ndev);
+ free_netdev(ndev);
return 0;
}
@@ -1219,7 +1217,7 @@ xemaclite_poll_controller(struct net_device *ndev)
}
#endif
-static struct net_device_ops xemaclite_netdev_ops = {
+static const struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
diff --git a/drivers/net/fddi/skfp/cfm.c b/drivers/net/fddi/skfp/cfm.c
index e395ace3120b..648ff9fdb909 100644
--- a/drivers/net/fddi/skfp/cfm.c
+++ b/drivers/net/fddi/skfp/cfm.c
@@ -52,7 +52,6 @@ static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ;
#define ACTIONS_DONE() (smc->mib.fddiSMTCF_State &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
-#ifdef DEBUG
/*
* symbolic state names
*/
@@ -68,7 +67,6 @@ static const char * const cfm_states[] = {
static const char * const cfm_events[] = {
"NONE","CF_LOOP_A","CF_LOOP_B","CF_JOIN_A","CF_JOIN_B"
} ;
-#endif
/*
* map from state to downstream port type
@@ -230,10 +228,10 @@ void cfm(struct s_smc *smc, int event)
oldstate = smc->mib.fddiSMTCF_State ;
do {
- DB_CFM("CFM : state %s%s",
- (smc->mib.fddiSMTCF_State & AFLAG) ? "ACTIONS " : "",
- cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG]) ;
- DB_CFM(" event %s\n",cfm_events[event],0) ;
+ DB_CFM("CFM : state %s%s event %s",
+ smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "",
+ cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG],
+ cfm_events[event]);
state = smc->mib.fddiSMTCF_State ;
cfm_fsm(smc,event) ;
event = 0 ;
@@ -297,7 +295,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
/* Don't do the WC-Flag changing here */
ACTIONS_DONE() ;
- DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break;
case SC0_ISOLATED :
/*SC07*/
@@ -338,7 +336,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
- DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC9_C_WRAP_A :
/*SC10*/
@@ -403,7 +401,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
- DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC10_C_WRAP_B :
/*SC20*/
@@ -448,7 +446,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
ACTIONS_DONE() ;
- DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC4_THRU_A :
/*SC41*/
@@ -481,7 +479,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
ACTIONS_DONE() ;
- DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC5_THRU_B :
/*SC51*/
@@ -519,7 +517,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
- DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC11_C_WRAP_S :
/*SC70*/
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index 07da97c303d6..fed3a92d3df4 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -343,8 +343,8 @@ void init_board(struct s_smc *smc, u_char *mac_addr)
*/
void sm_pm_bypass_req(struct s_smc *smc, int mode)
{
- DB_ECMN(1,"ECM : sm_pm_bypass_req(%s)\n",(mode == BP_INSERT) ?
- "BP_INSERT" : "BP_DEINSERT",0) ;
+ DB_ECMN(1, "ECM : sm_pm_bypass_req(%s)",
+ mode == BP_INSERT ? "BP_INSERT" : "BP_DEINSERT");
if (smc->s.sas != SMT_DAS)
return ;
diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c
index 47d922cb3c08..eee9ba91346a 100644
--- a/drivers/net/fddi/skfp/ecm.c
+++ b/drivers/net/fddi/skfp/ecm.c
@@ -66,7 +66,6 @@ static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ;
#define EC6_CHECK 6 /* checking bypass */
#define EC7_DEINSERT 7 /* bypass being turnde off */
-#ifdef DEBUG
/*
* symbolic state names
*/
@@ -83,7 +82,6 @@ static const char * const ecm_events[] = {
"EC_TIMEOUT_TD","EC_TIMEOUT_TMAX",
"EC_TIMEOUT_IMAX","EC_TIMEOUT_INMAX","EC_TEST_DONE"
} ;
-#endif
/*
* all Globals are defined in smc.h
@@ -126,10 +124,10 @@ void ecm(struct s_smc *smc, int event)
int state ;
do {
- DB_ECM("ECM : state %s%s",
- (smc->mib.fddiSMTECMState & AFLAG) ? "ACTIONS " : "",
- ecm_states[smc->mib.fddiSMTECMState & ~AFLAG]) ;
- DB_ECM(" event %s\n",ecm_events[event],0) ;
+ DB_ECM("ECM : state %s%s event %s",
+ smc->mib.fddiSMTECMState & AFLAG ? "ACTIONS " : "",
+ ecm_states[smc->mib.fddiSMTECMState & ~AFLAG],
+ ecm_events[event]);
state = smc->mib.fddiSMTECMState ;
ecm_fsm(smc,event) ;
event = 0 ;
@@ -379,7 +377,7 @@ static void ecm_fsm(struct s_smc *smc, int cmd)
(((ls_a == PC_ILS) && (ls_b == PC_QLS)) ||
((ls_a == PC_QLS) && (ls_b == PC_ILS)))){
smc->e.sb_flag = TRUE ;
- DB_ECMN(1,"ECM : EC6_CHECK - stuck bypass\n",0,0) ;
+ DB_ECMN(1, "ECM : EC6_CHECK - stuck bypass");
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK,
smt_get_error_word(smc));
@@ -443,29 +441,29 @@ static void prop_actions(struct s_smc *smc)
return ;
}
- DB_ECM("ECM : prop_actions - trace_prop %d\n", smc->e.trace_prop,0) ;
- DB_ECM("ECM : prop_actions - in %d out %d\n", port_in,port_out) ;
+ DB_ECM("ECM : prop_actions - trace_prop %lu", smc->e.trace_prop);
+ DB_ECM("ECM : prop_actions - in %d out %d", port_in, port_out);
if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
/* trace initiatior */
- DB_ECM("ECM : initiate TRACE on PHY %c\n",'A'+port_in-PA,0) ;
+ DB_ECM("ECM : initiate TRACE on PHY %c", 'A' + port_in - PA);
queue_event(smc,EVENT_PCM+port_in,PC_TRACE) ;
}
else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) &&
port_out != PA) {
/* trace propagate upstream */
- DB_ECM("ECM : propagate TRACE on PHY B\n",0,0) ;
+ DB_ECM("ECM : propagate TRACE on PHY B");
queue_event(smc,EVENT_PCMB,PC_TRACE) ;
}
else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) &&
port_out != PB) {
/* trace propagate upstream */
- DB_ECM("ECM : propagate TRACE on PHY A\n",0,0) ;
+ DB_ECM("ECM : propagate TRACE on PHY A");
queue_event(smc,EVENT_PCMA,PC_TRACE) ;
}
else {
/* signal trace termination */
- DB_ECM("ECM : TRACE terminated\n",0,0) ;
+ DB_ECM("ECM : TRACE terminated");
smc->e.path_test = PT_PENDING ;
}
smc->e.trace_prop = 0 ;
@@ -482,13 +480,13 @@ static void prop_actions(struct s_smc *smc)
RS_SET(smc,RS_EVENT) ;
while (smc->e.trace_prop) {
- DB_ECM("ECM : prop_actions - trace_prop %d\n",
- smc->e.trace_prop,0) ;
+ DB_ECM("ECM : prop_actions - trace_prop %d",
+ smc->e.trace_prop);
if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
initiator = ENTITY_MAC ;
smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_MAC) ;
- DB_ECM("ECM: MAC initiates trace\n",0,0) ;
+ DB_ECM("ECM: MAC initiates trace");
}
else {
for (p = NUMPHYS-1 ; p >= 0 ; p--) {
@@ -503,12 +501,12 @@ static void prop_actions(struct s_smc *smc)
if (upstream == ENTITY_MAC) {
/* signal trace termination */
- DB_ECM("ECM : TRACE terminated\n",0,0) ;
+ DB_ECM("ECM : TRACE terminated");
smc->e.path_test = PT_PENDING ;
}
else {
/* trace propagate upstream */
- DB_ECM("ECM : propagate TRACE on PHY %d\n",upstream,0) ;
+ DB_ECM("ECM : propagate TRACE on PHY %d", upstream);
queue_event(smc,EVENT_PCM+upstream,PC_TRACE) ;
}
}
diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c
index 2fc5987b41dc..325e2c525e35 100644
--- a/drivers/net/fddi/skfp/ess.c
+++ b/drivers/net/fddi/skfp/ess.c
@@ -134,7 +134,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* get the resource type
*/
if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
- DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ;
+ DB_ESS("ESS: RAF frame error, parameter type not found");
return fs;
}
msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
@@ -146,16 +146,16 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
/*
* error in frame: para ESS command was not found
*/
- DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0);
+ DB_ESS("ESS: RAF frame error, parameter command not found");
return fs;
}
- DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ;
- DB_ESSN(2,"ver %x tran %lx\n",sm->smt_version,sm->smt_tid) ;
- DB_ESSN(2,"stn_id %s\n",addr_to_string(&sm->smt_source),0) ;
+ DB_ESSN(2, "fc %x ft %x", sm->smt_class, sm->smt_type);
+ DB_ESSN(2, "ver %x tran %x", sm->smt_version, sm->smt_tid);
+ DB_ESSN(2, "stn_id %s", addr_to_string(&sm->smt_source));
- DB_ESSN(2,"infolen %x res %x\n",sm->smt_len, msg_res_type) ;
- DB_ESSN(2,"sbacmd %x\n",cmd->sba_cmd,0) ;
+ DB_ESSN(2, "infolen %x res %lx", sm->smt_len, msg_res_type);
+ DB_ESSN(2, "sbacmd %x", cmd->sba_cmd);
/*
* evaluate the ESS command
@@ -189,7 +189,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* The ESS do not send the Frame to the network!
*/
smc->ess.alloc_trans_id = sm->smt_tid ;
- DB_ESS("ESS: save Alloc Req Trans ID %lx\n",sm->smt_tid,0);
+ DB_ESS("ESS: save Alloc Req Trans ID %x", sm->smt_tid);
p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
((struct smt_p_320f *)p)->mib_payload =
smc->mib.a[PATH0].fddiPATHSbaPayload ;
@@ -220,7 +220,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* check the parameters
*/
if (smt_check_para(smc,sm,plist_raf_alc_res)) {
- DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
+ DB_ESS("ESS: RAF with para problem, ignoring");
return fs;
}
@@ -241,7 +241,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
!= SMT_RDF_SUCCESS) ||
(sm->smt_tid != smc->ess.alloc_trans_id)) {
- DB_ESS("ESS: Allocation Response not accepted\n",0,0) ;
+ DB_ESS("ESS: Allocation Response not accepted");
return fs;
}
@@ -261,7 +261,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
}
overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
- DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ;
+ DB_ESSN(2, "payload= %lx overhead= %lx",
+ payload, overhead);
/*
* process the bandwidth allocation
@@ -279,7 +280,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* except only replies
*/
if (sm->smt_type != SMT_REQUEST) {
- DB_ESS("ESS: Do not process Change Responses\n",0,0) ;
+ DB_ESS("ESS: Do not process Change Responses");
return fs;
}
@@ -287,7 +288,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* check the para for the Change Request
*/
if (smt_check_para(smc,sm,plist_raf_chg_req)) {
- DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
+ DB_ESS("ESS: RAF with para problem, ignoring");
return fs;
}
@@ -299,7 +300,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
*/
if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
!= PRIMARY_RING) || (msg_res_type != SYNC_BW)) {
- DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ;
+ DB_ESS("ESS: RAF frame with para problem, ignoring");
return fs;
}
@@ -311,9 +312,10 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
- DB_ESSN(2,"ESS: Change Request from %s\n",
- addr_to_string(&sm->smt_source),0) ;
- DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ;
+ DB_ESSN(2, "ESS: Change Request from %s",
+ addr_to_string(&sm->smt_source));
+ DB_ESSN(2, "payload= %lx overhead= %lx",
+ payload, overhead);
/*
* process the bandwidth allocation
@@ -337,18 +339,18 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
* except only requests
*/
if (sm->smt_type != SMT_REQUEST) {
- DB_ESS("ESS: Do not process a Report Reply\n",0,0) ;
+ DB_ESS("ESS: Do not process a Report Reply");
return fs;
}
- DB_ESSN(2,"ESS: Report Request from %s\n",
- addr_to_string(&(sm->smt_source)),0) ;
+ DB_ESSN(2, "ESS: Report Request from %s",
+ addr_to_string(&sm->smt_source));
/*
* verify that the resource type is sync bw only
*/
if (msg_res_type != SYNC_BW) {
- DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ;
+ DB_ESS("ESS: ignoring RAF with para problem");
return fs;
}
@@ -364,7 +366,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
/*
* error in frame
*/
- DB_ESS("ESS: ignoring RAF with bad sba_cmd\n",0,0) ;
+ DB_ESS("ESS: ignoring RAF with bad sba_cmd");
break ;
}
@@ -417,17 +419,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
* set the mib attributes fddiPATHSbaOverhead, fddiPATHSbaPayload
*/
/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) {
- DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ;
+ DB_ESS("ESS: SMT does not accept the payload value");
return FALSE;
}
if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) {
- DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ;
+ DB_ESS("ESS: SMT does not accept the overhead value");
return FALSE;
} */
/* premliminary */
if (payload > MAX_PAYLOAD || overhead > 5000) {
- DB_ESS("ESS: payload / overhead not accepted\n",0,0) ;
+ DB_ESS("ESS: payload / overhead not accepted");
return FALSE;
}
@@ -446,7 +448,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
* evulate the Payload
*/
if (payload) {
- DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit on\n",0,0) ;
+ DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit on");
smc->ess.sync_bw_available = TRUE ;
smc->ess.sync_bw = overhead -
@@ -454,7 +456,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
payload / 1562 ;
}
else {
- DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit off\n",0,0) ;
+ DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit off");
smc->ess.sync_bw_available = FALSE ;
smc->ess.sync_bw = 0 ;
overhead = 0 ;
@@ -464,7 +466,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
smc->mib.a[PATH0].fddiPATHSbaOverhead = overhead ;
- DB_ESSN(2,"tsync = %lx\n",smc->ess.sync_bw,0) ;
+ DB_ESSN(2, "tsync = %lx", smc->ess.sync_bw);
ess_config_fifo(smc) ;
set_formac_tsync(smc,smc->ess.sync_bw) ;
@@ -541,7 +543,7 @@ void ess_timer_poll(struct s_smc *smc)
if (!smc->ess.raf_act_timer_poll)
return ;
- DB_ESSN(2,"ESS: timer_poll\n",0,0) ;
+ DB_ESSN(2, "ESS: timer_poll");
smc->ess.timer_count++ ;
if (smc->ess.timer_count == 10) {
@@ -667,11 +669,11 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb)
/*
* Send the Change Reply to the local SBA
*/
- DB_ESS("ESS:Send to the local SBA\n",0,0) ;
+ DB_ESS("ESS:Send to the local SBA");
if (!smc->ess.sba_reply_pend)
smc->ess.sba_reply_pend = mb ;
else {
- DB_ESS("Frame is lost - another frame was pending\n",0,0);
+ DB_ESS("Frame is lost - another frame was pending");
smt_free_mbuf(smc,mb) ;
}
}
@@ -679,7 +681,7 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb)
/*
* Send the SBA RAF Change Reply to the network
*/
- DB_ESS("ESS:Send to the network\n",0,0) ;
+ DB_ESS("ESS:Send to the network");
smt_send_frame(smc,mb,FC_SMT_INFO,0) ;
}
}
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index 7d3779ae7377..24aed28b982c 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -726,7 +726,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l)
if (code_s2u & FM_SMYBEC)
queue_event(smc,EVENT_RMT,RM_MY_BEACON) ;
if (change_s2u & code_s2u & FM_SLOCLM) {
- DB_RMTN(2,"RMT : lower claim received\n",0,0) ;
+ DB_RMTN(2, "RMT : lower claim received");
}
if ((code_s2u & FM_SMYCLM) && !(code_s2l & FM_SDUPCLM)) {
/*
@@ -746,7 +746,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l)
queue_event(smc,EVENT_RMT,RM_VALID_CLAIM) ;
}
if (change_s2u & code_s2u & FM_SHICLM) {
- DB_RMTN(2,"RMT : higher claim received\n",0,0) ;
+ DB_RMTN(2, "RMT : higher claim received");
}
if ( (code_s2l & FM_STRTEXP) ||
(code_s2l & FM_STRTEXR) )
@@ -1334,7 +1334,7 @@ void rtm_irq(struct s_smc *smc)
outpw(ADDR(B2_RTM_CRTL),TIM_CL_IRQ) ; /* clear IRQ */
if (inpw(ADDR(B2_RTM_CRTL)) & TIM_RES_TOK) {
outpw(FM_A(FM_CMDREG1),FM_ICL) ; /* force claim */
- DB_RMT("RMT: fddiPATHT_Rmode expired\n",0,0) ;
+ DB_RMT("RMT: fddiPATHT_Rmode expired");
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS,
(u_long) FDDI_SMT_EVENT,
(u_long) FDDI_RTT, smt_get_event_word(smc));
@@ -1353,8 +1353,8 @@ void rtm_set_timer(struct s_smc *smc)
/*
* MIB timer and hardware timer have the same resolution of 80nS
*/
- DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
- (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
+ DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns",
+ (int)smc->mib.a[PATH0].fddiPATHT_Rmode);
outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
}
@@ -1469,13 +1469,13 @@ static void smt_split_up_fifo(struct s_smc *smc)
smc->hw.fp.fifo.rx2_fifo_start = smc->hw.fp.fifo.tx_a0_start +
smc->hw.fp.fifo.tx_a0_size ;
- DB_SMT("FIFO split: mode = %x\n",smc->hw.fp.fifo.fifo_config_mode,0) ;
- DB_SMT("rbc_ram_start = %x rbc_ram_end = %x\n",
- smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end) ;
- DB_SMT("rx1_fifo_start = %x tx_s_start = %x\n",
- smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start) ;
- DB_SMT("tx_a0_start = %x rx2_fifo_start = %x\n",
- smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start) ;
+ DB_SMT("FIFO split: mode = %x", smc->hw.fp.fifo.fifo_config_mode);
+ DB_SMT("rbc_ram_start = %x rbc_ram_end = %x",
+ smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end);
+ DB_SMT("rx1_fifo_start = %x tx_s_start = %x",
+ smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start);
+ DB_SMT("tx_a0_start = %x rx2_fifo_start = %x",
+ smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start);
}
void formac_reinit_tx(struct s_smc *smc)
diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h
index f5bc90ff2a2a..5d6891154367 100644
--- a/drivers/net/fddi/skfp/h/cmtdef.h
+++ b/drivers/net/fddi/skfp/h/cmtdef.h
@@ -54,43 +54,48 @@
#endif
#ifdef DEBUG
-#define DB_PR(flag,a,b,c) { if (flag) printf(a,b,c) ; }
+#define DB_PR(flag, fmt, ...) \
+ do { if (flag) printf(fmt "\n", ##__VA_ARGS__); } while (0)
#else
-#define DB_PR(flag,a,b,c)
+#define DB_PR(flag, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__)
+
#endif
#ifdef DEBUG_BRD
-#define DB_ECM(a,b,c) DB_PR((smc->debug.d_smt&1),a,b,c)
-#define DB_ECMN(n,a,b,c) DB_PR((smc->debug.d_ecm >=(n)),a,b,c)
-#define DB_RMT(a,b,c) DB_PR((smc->debug.d_smt&2),a,b,c)
-#define DB_RMTN(n,a,b,c) DB_PR((smc->debug.d_rmt >=(n)),a,b,c)
-#define DB_CFM(a,b,c) DB_PR((smc->debug.d_smt&4),a,b,c)
-#define DB_CFMN(n,a,b,c) DB_PR((smc->debug.d_cfm >=(n)),a,b,c)
-#define DB_PCM(a,b,c) DB_PR((smc->debug.d_smt&8),a,b,c)
-#define DB_PCMN(n,a,b,c) DB_PR((smc->debug.d_pcm >=(n)),a,b,c)
-#define DB_SMT(a,b,c) DB_PR((smc->debug.d_smtf),a,b,c)
-#define DB_SMTN(n,a,b,c) DB_PR((smc->debug.d_smtf >=(n)),a,b,c)
-#define DB_SBA(a,b,c) DB_PR((smc->debug.d_sba),a,b,c)
-#define DB_SBAN(n,a,b,c) DB_PR((smc->debug.d_sba >=(n)),a,b,c)
-#define DB_ESS(a,b,c) DB_PR((smc->debug.d_ess),a,b,c)
-#define DB_ESSN(n,a,b,c) DB_PR((smc->debug.d_ess >=(n)),a,b,c)
+#define DB_TEST (smc->debug)
#else
-#define DB_ECM(a,b,c) DB_PR((debug.d_smt&1),a,b,c)
-#define DB_ECMN(n,a,b,c) DB_PR((debug.d_ecm >=(n)),a,b,c)
-#define DB_RMT(a,b,c) DB_PR((debug.d_smt&2),a,b,c)
-#define DB_RMTN(n,a,b,c) DB_PR((debug.d_rmt >=(n)),a,b,c)
-#define DB_CFM(a,b,c) DB_PR((debug.d_smt&4),a,b,c)
-#define DB_CFMN(n,a,b,c) DB_PR((debug.d_cfm >=(n)),a,b,c)
-#define DB_PCM(a,b,c) DB_PR((debug.d_smt&8),a,b,c)
-#define DB_PCMN(n,a,b,c) DB_PR((debug.d_pcm >=(n)),a,b,c)
-#define DB_SMT(a,b,c) DB_PR((debug.d_smtf),a,b,c)
-#define DB_SMTN(n,a,b,c) DB_PR((debug.d_smtf >=(n)),a,b,c)
-#define DB_SBA(a,b,c) DB_PR((debug.d_sba),a,b,c)
-#define DB_SBAN(n,a,b,c) DB_PR((debug.d_sba >=(n)),a,b,c)
-#define DB_ESS(a,b,c) DB_PR((debug.d_ess),a,b,c)
-#define DB_ESSN(n,a,b,c) DB_PR((debug.d_ess >=(n)),a,b,c)
+#define DB_TEST (debug)
#endif
+#define DB_ECM(fmt, ...) \
+ DB_PR((DB_TEST).d_smt & 1, fmt, ##__VA_ARGS__)
+#define DB_ECMN(n, fmt, ...) \
+ DB_PR((DB_TEST).d_ecm >= (n), fmt, ##__VA_ARGS__)
+#define DB_RMT(fmt, ...) \
+ DB_PR((DB_TEST).d_smt & 2, fmt, ##__VA_ARGS__)
+#define DB_RMTN(n, fmt, ...) \
+ DB_PR((DB_TEST).d_rmt >= (n), fmt, ##__VA_ARGS__)
+#define DB_CFM(fmt, ...) \
+ DB_PR((DB_TEST).d_smt & 4, fmt, ##__VA_ARGS__)
+#define DB_CFMN(n, fmt, ...) \
+ DB_PR((DB_TEST).d_cfm >= (n), fmt, ##__VA_ARGS__)
+#define DB_PCM(fmt, ...) \
+ DB_PR((DB_TEST).d_smt & 8, fmt, ##__VA_ARGS__)
+#define DB_PCMN(n, fmt, ...) \
+ DB_PR((DB_TEST).d_pcm >= (n), fmt, ##__VA_ARGS__)
+#define DB_SMT(fmt, ...) \
+ DB_PR((DB_TEST).d_smtf, fmt, ##__VA_ARGS__)
+#define DB_SMTN(n, fmt, ...) \
+ DB_PR((DB_TEST).d_smtf >= (n), fmt, ##__VA_ARGS__)
+#define DB_SBA(fmt, ...) \
+ DB_PR((DB_TEST).d_sba, fmt, ##__VA_ARGS__)
+#define DB_SBAN(n, fmt, ...) \
+ DB_PR((DB_TEST).d_sba >= (n), fmt, ##__VA_ARGS__)
+#define DB_ESS(fmt, ...) \
+ DB_PR((DB_TEST).d_ess, fmt, ##__VA_ARGS__)
+#define DB_ESSN(n, fmt, ...) \
+ DB_PR((DB_TEST).d_ess >= (n), fmt, ##__VA_ARGS__)
+
#ifndef SS_NOT_DS
#define SK_LOC_DECL(type,var) type var
#else
@@ -640,8 +645,8 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text);
#define dump_smt(smc,sm,text)
#endif
-#ifdef DEBUG
char* addr_to_string(struct fddi_addr *addr);
+#ifdef DEBUG
void dump_hex(char *p, int len);
#endif
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 4ca2341d7f06..123cfa09c354 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -168,13 +168,25 @@ struct os_debug {
#define DB_P debug
#endif
-#define DB_RX(a,b,c,lev) if (DB_P.d_os.hwm_rx >= (lev)) printf(a,b,c)
-#define DB_TX(a,b,c,lev) if (DB_P.d_os.hwm_tx >= (lev)) printf(a,b,c)
-#define DB_GEN(a,b,c,lev) if (DB_P.d_os.hwm_gen >= (lev)) printf(a,b,c)
+#define DB_RX(lev, fmt, ...) \
+do { \
+ if (DB_P.d_os.hwm_rx >= (lev)) \
+ printf(fmt "\n", ##__VA_ARGS__); \
+} while (0)
+#define DB_TX(lev, fmt, ...) \
+do { \
+ if (DB_P.d_os.hwm_tx >= (lev)) \
+ printf(fmt "\n", ##__VA_ARGS__); \
+} while (0)
+#define DB_GEN(lev, fmt, ...) \
+do { \
+ if (DB_P.d_os.hwm_gen >= (lev)) \
+ printf(fmt "\n", ##__VA_ARGS__); \
+} while (0)
#else /* DEBUG */
-#define DB_RX(a,b,c,lev)
-#define DB_TX(a,b,c,lev)
-#define DB_GEN(a,b,c,lev)
+#define DB_RX(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__)
+#define DB_TX(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__)
+#define DB_GEN(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__)
#endif /* DEBUG */
#ifndef SK_BREAK
diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c
index d0a68bdd5f63..abbe309051d9 100644
--- a/drivers/net/fddi/skfp/hwmtm.c
+++ b/drivers/net/fddi/skfp/hwmtm.c
@@ -158,7 +158,7 @@ u_int mac_drv_check_space(void);
SMbuf* smt_get_mbuf(struct s_smc *smc);
#ifdef DEBUG
- void mac_drv_debug_lev(void);
+ void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev);
#endif
/*
@@ -330,7 +330,7 @@ static u_long init_descr_ring(struct s_smc *smc,
union s_fp_descr volatile *d2 ;
u_long phys ;
- DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ;
+ DB_GEN(3, "descr ring starts at = %p", start);
for (i=count-1, d1=start; i ; i--) {
d2 = d1 ;
d1++ ; /* descr is owned by the host */
@@ -339,7 +339,7 @@ static u_long init_descr_ring(struct s_smc *smc,
phys = mac_drv_virt2phys(smc,(void *)d1) ;
d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
}
- DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
+ DB_GEN(3, "descr ring ends at = %p", d1);
d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
d1->r.rxd_next = &start->r ;
phys = mac_drv_virt2phys(smc,(void *)start) ;
@@ -364,7 +364,7 @@ static void init_txd_ring(struct s_smc *smc)
ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
queue = smc->hw.fp.tx[QUEUE_A0] ;
- DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
+ DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT);
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_ASYNC_TXD_COUNT) ;
phys = le32_to_cpu(ds->txd_ntdadr) ;
@@ -378,7 +378,7 @@ static void init_txd_ring(struct s_smc *smc)
ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
queue = smc->hw.fp.tx[QUEUE_S] ;
- DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
+ DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT);
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_SYNC_TXD_COUNT) ;
phys = le32_to_cpu(ds->txd_ntdadr) ;
@@ -400,7 +400,7 @@ static void init_rxd_ring(struct s_smc *smc)
*/
ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
queue = smc->hw.fp.rx[QUEUE_R1] ;
- DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
+ DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT);
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
SMT_R1_RXD_COUNT) ;
phys = le32_to_cpu(ds->rxd_nrdadr) ;
@@ -469,11 +469,11 @@ void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
*/
i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
if (i != 16) {
- DB_GEN("i = %d",i,0,3) ;
+ DB_GEN(3, "i = %d", i);
smc->os.hwm.descr_p = (union s_fp_descr volatile *)
((char *)smc->os.hwm.descr_p+i) ;
}
- DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ;
+ DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p);
init_txd_ring(smc) ;
init_rxd_ring(smc) ;
@@ -501,7 +501,7 @@ SMbuf *smt_get_mbuf(struct s_smc *smc)
mb->sm_off = 8 ;
mb->sm_use_count = 1 ;
}
- DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
+ DB_GEN(3, "get SMbuf: mb = %p", mb);
return mb; /* May be NULL */
}
@@ -510,14 +510,14 @@ void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
if (mb) {
mb->sm_use_count-- ;
- DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ;
+ DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count);
/*
* If the use_count is != zero the MBuf is queued
* more than once and must not queued into the
* free MBuf queue
*/
if (!mb->sm_use_count) {
- DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ;
+ DB_GEN(3, "free SMbuf: mb = %p", mb);
#ifndef COMMON_MB_POOL
mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
smc->os.hwm.mbuf_pool.mb_free = mb ;
@@ -741,7 +741,7 @@ void fddi_isr(struct s_smc *smc)
while ((is = GET_ISR() & ISR_MASK)) {
NDD_TRACE("CH0B",is,0,0) ;
- DB_GEN("ISA = 0x%x",is,0,7) ;
+ DB_GEN(7, "ISA = 0x%lx", is);
if (is & IMASK_SLOW) {
NDD_TRACE("CH1b",is,0,0) ;
@@ -754,20 +754,20 @@ void fddi_isr(struct s_smc *smc)
if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */
stu = inpw(FM_A(FM_ST1U)) ;
stl = inpw(FM_A(FM_ST1L)) ;
- DB_GEN("Slow transmit complete",0,0,6) ;
+ DB_GEN(6, "Slow transmit complete");
mac1_irq(smc,stu,stl) ;
}
if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */
stu= inpw(FM_A(FM_ST2U)) ;
stl= inpw(FM_A(FM_ST2L)) ;
- DB_GEN("Slow receive complete",0,0,6) ;
- DB_GEN("stl = %x : stu = %x",stl,stu,7) ;
+ DB_GEN(6, "Slow receive complete");
+ DB_GEN(7, "stl = %x : stu = %x", stl, stu);
mac2_irq(smc,stu,stl) ;
}
if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */
stu= inpw(FM_A(FM_ST3U)) ;
stl= inpw(FM_A(FM_ST3L)) ;
- DB_GEN("FORMAC Mode Register 3",0,0,6) ;
+ DB_GEN(6, "FORMAC Mode Register 3");
mac3_irq(smc,stu,stl) ;
}
if (is & IS_TIMINT) { /* Timer 82C54-2 */
@@ -814,7 +814,7 @@ void fddi_isr(struct s_smc *smc)
* Fast Tx complete Async/Sync Queue (BMU service)
*/
if (is & (IS_XS_F|IS_XA_F)) {
- DB_GEN("Fast tx complete queue",0,0,6) ;
+ DB_GEN(6, "Fast tx complete queue");
/*
* clear IRQ, Note: no IRQ is lost, because
* we always service both queues
@@ -829,7 +829,7 @@ void fddi_isr(struct s_smc *smc)
* Fast Rx Complete (BMU service)
*/
if (is & IS_R1_F) {
- DB_GEN("Fast receive complete",0,0,6) ;
+ DB_GEN(6, "Fast receive complete");
/* clear IRQ */
#ifndef USE_BREAK_ISR
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
@@ -1083,13 +1083,13 @@ void process_receive(struct s_smc *smc)
#endif
n = 0 ;
do {
- DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
+ DB_RX(5, "Check RxD %p for OWN and EOF", r);
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
if (rbctrl & BMU_OWN) {
NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
- DB_RX("End of RxDs",0,0,4) ;
+ DB_RX(4, "End of RxDs");
goto rx_end ;
}
/*
@@ -1136,19 +1136,19 @@ void process_receive(struct s_smc *smc)
rx_used-- ;
} while (!(rbctrl & BMU_EOF)) ;
used_frags = frag_count ;
- DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ;
+ DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags);
/* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
/* BMU_ST_BUF will not be changed by the ASIC */
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
- DB_RX("Check STF bit in %x",(void *)r,0,5) ;
+ DB_RX(5, "Check STF bit in %p", r);
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
frag_count++ ;
rx_used-- ;
}
- DB_RX("STF bit found",0,0,5) ;
+ DB_RX(5, "STF bit found");
/*
* The received frame is finished for the process receive
@@ -1164,7 +1164,7 @@ void process_receive(struct s_smc *smc)
rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
- DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
+ DB_RX(5, "dma_complete for RxD %p", r);
dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
}
smc->hw.fp.err_stats.err_valid++ ;
@@ -1173,34 +1173,34 @@ void process_receive(struct s_smc *smc)
/* the length of the data including the FC */
len = (rfsw & RD_LENGTH) - 4 ;
- DB_RX("frame length = %d",len,0,4) ;
+ DB_RX(4, "frame length = %d", len);
/*
* check the frame_length and all error flags
*/
if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
if (rfsw & RD_S_MSRABT) {
- DB_RX("Frame aborted by the FORMAC",0,0,2) ;
+ DB_RX(2, "Frame aborted by the FORMAC");
smc->hw.fp.err_stats.err_abort++ ;
}
/*
* check frame status
*/
if (rfsw & RD_S_SEAC2) {
- DB_RX("E-Indicator set",0,0,2) ;
+ DB_RX(2, "E-Indicator set");
smc->hw.fp.err_stats.err_e_indicator++ ;
}
if (rfsw & RD_S_SFRMERR) {
- DB_RX("CRC error",0,0,2) ;
+ DB_RX(2, "CRC error");
smc->hw.fp.err_stats.err_crc++ ;
}
if (rfsw & RX_FS_IMPL) {
- DB_RX("Implementer frame",0,0,2) ;
+ DB_RX(2, "Implementer frame");
smc->hw.fp.err_stats.err_imp_frame++ ;
}
goto abort_frame ;
}
if (len > FDDI_RAW_MTU-4) {
- DB_RX("Frame too long error",0,0,2) ;
+ DB_RX(2, "Frame too long error");
smc->hw.fp.err_stats.err_too_long++ ;
goto abort_frame ;
}
@@ -1209,12 +1209,12 @@ void process_receive(struct s_smc *smc)
* of aborded frames to the BMU
*/
if (len <= 4) {
- DB_RX("Frame length = 0",0,0,2) ;
+ DB_RX(2, "Frame length = 0");
goto abort_frame ;
}
if (len != (n-4)) {
- DB_RX("BMU: rx len differs: [%d:%d]",len,n,4);
+ DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n);
smc->os.hwm.rx_len_error++ ;
goto abort_frame ;
}
@@ -1223,7 +1223,7 @@ void process_receive(struct s_smc *smc)
* Check SA == MA
*/
virt = (u_char far *) rxd->rxd_virt ;
- DB_RX("FC = %x",*virt,0,2) ;
+ DB_RX(2, "FC = %x", *virt);
if (virt[12] == MA[5] &&
virt[11] == MA[4] &&
virt[10] == MA[3] &&
@@ -1250,7 +1250,7 @@ void process_receive(struct s_smc *smc)
virt[3] != MA[2] ||
virt[2] != MA[1] ||
virt[1] != MA[0]) {
- DB_RX("DA != MA and not multi- or broadcast",0,0,2) ;
+ DB_RX(2, "DA != MA and not multi- or broadcast");
goto abort_frame ;
}
}
@@ -1259,13 +1259,13 @@ void process_receive(struct s_smc *smc)
/*
* LLC frame received
*/
- DB_RX("LLC - receive",0,0,4) ;
+ DB_RX(4, "LLC - receive");
mac_drv_rx_complete(smc,rxd,frag_count,len) ;
}
else {
if (!(mb = smt_get_mbuf(smc))) {
smc->hw.fp.err_stats.err_no_buf++ ;
- DB_RX("No SMbuf; receive terminated",0,0,4) ;
+ DB_RX(4, "No SMbuf; receive terminated");
goto abort_frame ;
}
data = smtod(mb,char *) - 1 ;
@@ -1278,7 +1278,7 @@ void process_receive(struct s_smc *smc)
#else
for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
- DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
+ DB_RX(6, "cp SMT frame to mb: len = %d", n);
memcpy(data,r->rxd_virt,n) ;
data += n ;
}
@@ -1294,15 +1294,15 @@ void process_receive(struct s_smc *smc)
switch(fc) {
case FC_SMT_INFO :
smc->hw.fp.err_stats.err_smt_frame++ ;
- DB_RX("SMT frame received ",0,0,5) ;
+ DB_RX(5, "SMT frame received");
if (smc->os.hwm.pass_SMT) {
- DB_RX("pass SMT frame ",0,0,5) ;
+ DB_RX(5, "pass SMT frame");
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
- DB_RX("requeue RxD",0,0,5) ;
+ DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
@@ -1310,7 +1310,7 @@ void process_receive(struct s_smc *smc)
break ;
case FC_SMT_NSA :
smc->hw.fp.err_stats.err_smt_frame++ ;
- DB_RX("SMT frame received ",0,0,5) ;
+ DB_RX(5, "SMT frame received");
/* if pass_NSA set pass the NSA frame or */
/* pass_SMT set and the A-Indicator */
@@ -1318,12 +1318,12 @@ void process_receive(struct s_smc *smc)
if (smc->os.hwm.pass_NSA ||
(smc->os.hwm.pass_SMT &&
!(rfsw & A_INDIC))) {
- DB_RX("pass SMT frame ",0,0,5) ;
+ DB_RX(5, "pass SMT frame");
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
- DB_RX("requeue RxD",0,0,5) ;
+ DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
@@ -1331,12 +1331,12 @@ void process_receive(struct s_smc *smc)
break ;
case FC_BEACON :
if (smc->os.hwm.pass_DB) {
- DB_RX("pass DB frame ",0,0,5) ;
+ DB_RX(5, "pass DB frame");
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
- DB_RX("requeue RxD",0,0,5) ;
+ DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
smt_free_mbuf(smc,mb) ;
@@ -1345,9 +1345,9 @@ void process_receive(struct s_smc *smc)
/*
* unknown FC abord the frame
*/
- DB_RX("unknown FC error",0,0,2) ;
+ DB_RX(2, "unknown FC error");
smt_free_mbuf(smc,mb) ;
- DB_RX("requeue RxD",0,0,5) ;
+ DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count) ;
if ((fc & 0xf0) == FC_MAC)
smc->hw.fp.err_stats.err_mac_frame++ ;
@@ -1358,16 +1358,16 @@ void process_receive(struct s_smc *smc)
}
}
- DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
+ DB_RX(3, "next RxD is %p", queue->rx_curr_get);
NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
continue ;
/*--------------------------------------------------------------------*/
abort_frame:
- DB_RX("requeue RxD",0,0,5) ;
+ DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count) ;
- DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
+ DB_RX(3, "next RxD is %p", queue->rx_curr_get);
NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
}
rx_end:
@@ -1381,7 +1381,7 @@ static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
{
u_char fc ;
- DB_RX("send a queued frame to the llc layer",0,0,4) ;
+ DB_RX(4, "send a queued frame to the llc layer");
smc->os.hwm.r.len = mb->sm_len ;
smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
fc = *smc->os.hwm.r.mb_pos ;
@@ -1419,7 +1419,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
__le32 rbctrl;
NDD_TRACE("RHfB",virt,len,frame_status) ;
- DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
+ DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status);
r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
r->rxd_virt = virt ;
r->rxd_rbadr = cpu_to_le32(phys) ;
@@ -1475,7 +1475,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
}
queue = smc->hw.fp.rx[QUEUE_R1] ;
- DB_RX("clear_rx_queue",0,0,5) ;
+ DB_RX(5, "clear_rx_queue");
/*
* dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers
@@ -1483,7 +1483,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
r = queue->rx_curr_get ;
while (queue->rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
- DB_RX("switch OWN bit of RxD 0x%p ",r,0,5) ;
+ DB_RX(5, "switch OWN bit of RxD 0x%p", r);
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
frag_count = 1 ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
@@ -1491,23 +1491,23 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
while (r != queue->rx_curr_put &&
!(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
- DB_RX("Check STF bit in %x",(void *)r,0,5) ;
+ DB_RX(5, "Check STF bit in %p", r);
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
frag_count++ ;
}
- DB_RX("STF bit found",0,0,5) ;
+ DB_RX(5, "STF bit found");
next_rxd = r ;
for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
- DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
+ DB_RX(5, "dma_complete for RxD %p", r);
dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
}
- DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ",
- (void *)queue->rx_curr_get,frag_count,5) ;
+ DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d",
+ queue->rx_curr_get, frag_count);
mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
queue->rx_curr_get = next_rxd ;
@@ -1554,7 +1554,7 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
smc->os.hwm.tx_len = frame_len ;
- DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ;
+ DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len);
if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
frame_status |= LAN_TX ;
}
@@ -1577,23 +1577,23 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
if (!smc->hw.mac_ring_is_up) {
frame_status &= ~LAN_TX ;
frame_status |= RING_DOWN ;
- DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
+ DB_TX(2, "Ring is down: terminate LAN_TX");
}
if (frag_count > smc->os.hwm.tx_p->tx_free) {
#ifndef NDIS_OS2
mac_drv_clear_txd(smc) ;
if (frag_count > smc->os.hwm.tx_p->tx_free) {
- DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
+ DB_TX(2, "Out of TxDs, terminate LAN_TX");
frame_status &= ~LAN_TX ;
frame_status |= OUT_OF_TXD ;
}
#else
- DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
+ DB_TX(2, "Out of TxDs, terminate LAN_TX");
frame_status &= ~LAN_TX ;
frame_status |= OUT_OF_TXD ;
#endif
}
- DB_TX("frame_status = %x",frame_status,0,3) ;
+ DB_TX(3, "frame_status = %x", frame_status);
NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
return frame_status;
}
@@ -1642,10 +1642,10 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
*/
t = queue->tx_curr_put ;
- DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
+ DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status);
if (frame_status & LAN_TX) {
/* '*t' is already defined */
- DB_TX("LAN_TX: TxD = %p, virt = %p ",t,virt,3) ;
+ DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
t->txd_virt = virt ;
t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
t->txd_tbadr = cpu_to_le32(phys) ;
@@ -1674,11 +1674,11 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
}
}
if (frame_status & LOC_TX) {
- DB_TX("LOC_TX: ",0,0,3) ;
+ DB_TX(3, "LOC_TX:");
if (frame_status & FIRST_FRAG) {
if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
smc->hw.fp.err_stats.err_no_buf++ ;
- DB_TX("No SMbuf; transmit terminated",0,0,4) ;
+ DB_TX(4, "No SMbuf; transmit terminated");
}
else {
smc->os.hwm.tx_data =
@@ -1693,7 +1693,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
}
if (smc->os.hwm.tx_mb) {
#ifndef USE_OS_CPY
- DB_TX("copy fragment into MBuf ",0,0,3) ;
+ DB_TX(3, "copy fragment into MBuf");
memcpy(smc->os.hwm.tx_data,virt,len) ;
smc->os.hwm.tx_data += len ;
#endif
@@ -1718,7 +1718,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
smc->os.hwm.tx_data++ ;
smc->os.hwm.tx_mb->sm_len =
smc->os.hwm.tx_len - 1 ;
- DB_TX("pass LLC frame to SMT ",0,0,3) ;
+ DB_TX(3, "pass LLC frame to SMT");
smt_received_pack(smc,smc->os.hwm.tx_mb,
RD_FS_LOCAL) ;
}
@@ -1733,7 +1733,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
*/
static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
{
- DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ;
+ DB_GEN(4, "queue_llc_rx: mb = %p", mb);
smc->os.hwm.queued_rx_frames++ ;
mb->sm_next = (SMbuf *)NULL ;
if (smc->os.hwm.llc_rx_pipe == NULL) {
@@ -1763,7 +1763,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc)
smc->os.hwm.queued_rx_frames-- ;
smc->os.hwm.llc_rx_pipe = mb->sm_next ;
}
- DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
+ DB_GEN(4, "get_llc_rx: mb = 0x%p", mb);
return mb;
}
@@ -1773,7 +1773,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc)
*/
static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
{
- DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ;
+ DB_GEN(4, "_rx: queue_txd_mb = %p", mb);
smc->os.hwm.queued_txd_mb++ ;
mb->sm_next = (SMbuf *)NULL ;
if (smc->os.hwm.txd_tx_pipe == NULL) {
@@ -1796,7 +1796,7 @@ static SMbuf *get_txd_mb(struct s_smc *smc)
smc->os.hwm.queued_txd_mb-- ;
smc->os.hwm.txd_tx_pipe = mb->sm_next ;
}
- DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
+ DB_GEN(4, "get_txd_mb: mb = 0x%p", mb);
return mb;
}
@@ -1819,7 +1819,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
__le32 tbctrl;
NDD_TRACE("THSB",mb,fc,0) ;
- DB_TX("smt_send_mbuf: mb = 0x%p, fc = 0x%x",mb,fc,4) ;
+ DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc);
mb->sm_off-- ; /* set to fc */
mb->sm_len++ ; /* + fc */
@@ -1838,7 +1838,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
if (n >= len) {
n = len ;
}
- DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ;
+ DB_TX(5, "frag: virt/len = 0x%p/%d", data, n);
virt[frag_count] = data ;
frag_len[frag_count] = n ;
frag_count++ ;
@@ -1863,15 +1863,15 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
frame_status &= ~LAN_TX;
if (frame_status) {
- DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
+ DB_TX(2, "Ring is down: terminate LAN_TX");
}
else {
- DB_TX("Ring is down: terminate transmission",0,0,2) ;
+ DB_TX(2, "Ring is down: terminate transmission");
smt_free_mbuf(smc,mb) ;
return ;
}
}
- DB_TX("frame_status = 0x%x ",frame_status,0,5) ;
+ DB_TX(5, "frame_status = 0x%x", frame_status);
if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
mb->sm_use_count = 2 ;
@@ -1881,7 +1881,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
t = queue->tx_curr_put ;
frame_status |= FIRST_FRAG ;
for (i = 0; i < frag_count; i++) {
- DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
+ DB_TX(5, "init TxD = 0x%p", t);
if (i == frag_count-1) {
frame_status |= LAST_FRAG ;
t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
@@ -1912,7 +1912,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
}
if (frame_status & LOC_TX) {
- DB_TX("pass Mbuf to LLC queue",0,0,5) ;
+ DB_TX(5, "pass Mbuf to LLC queue");
queue_llc_rx(smc,mb) ;
}
@@ -1953,18 +1953,18 @@ static void mac_drv_clear_txd(struct s_smc *smc)
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
t1 = queue->tx_curr_get ;
- DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ;
+ DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i);
for ( ; ; ) {
frag_count = 0 ;
do {
DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
- DB_TX("check OWN/EOF bit of TxD 0x%p",t1,0,5) ;
+ DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1);
tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
if (tbctrl & BMU_OWN || !queue->tx_used){
- DB_TX("End of TxDs queue %d",i,0,4) ;
+ DB_TX(4, "End of TxDs queue %d", i);
goto free_next_queue ; /* next queue */
}
t1 = t1->txd_next ;
@@ -1988,11 +1988,11 @@ static void mac_drv_clear_txd(struct s_smc *smc)
}
else {
#ifndef PASS_1ST_TXD_2_TX_COMP
- DB_TX("mac_drv_tx_comp for TxD 0x%p",t2,0,4) ;
+ DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2);
mac_drv_tx_complete(smc,t2) ;
#else
- DB_TX("mac_drv_tx_comp for TxD 0x%x",
- queue->tx_curr_get,0,4) ;
+ DB_TX(4, "mac_drv_tx_comp for TxD 0x%x",
+ queue->tx_curr_get);
mac_drv_tx_complete(smc,queue->tx_curr_get) ;
#endif
}
@@ -2043,7 +2043,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
- DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ;
+ DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i);
/*
* switch the OWN bit of all pending frames to the host
@@ -2052,7 +2052,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
tx_used = queue->tx_used ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
- DB_TX("switch OWN bit of TxD 0x%p ",t,0,5) ;
+ DB_TX(5, "switch OWN bit of TxD 0x%p", t);
t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index 88d02d0a42c4..a9ecf923f63d 100644
--- a/drivers/net/fddi/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
@@ -91,7 +91,6 @@ int p
#define PC8_ACTIVE 8
#define PC9_MAINT 9
-#ifdef DEBUG
/*
* symbolic state names
*/
@@ -113,7 +112,6 @@ static const char * const pcm_events[] = {
"PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT",
"PC_NSE","PC_LEM"
} ;
-#endif
#ifdef MOT_ELM
/*
@@ -610,12 +608,11 @@ void pcm(struct s_smc *smc, const int np, int event)
mib = phy->mib ;
oldstate = mib->fddiPORTPCMState ;
do {
- DB_PCM("PCM %c: state %s",
- phy->phy_name,
- (mib->fddiPORTPCMState & AFLAG) ? "ACTIONS " : "") ;
- DB_PCM("%s, event %s\n",
- pcm_states[mib->fddiPORTPCMState & ~AFLAG],
- pcm_events[event]) ;
+ DB_PCM("PCM %c: state %s%s, event %s",
+ phy->phy_name,
+ mib->fddiPORTPCMState & AFLAG ? "ACTIONS " : "",
+ pcm_states[mib->fddiPORTPCMState & ~AFLAG],
+ pcm_events[event]);
state = mib->fddiPORTPCMState ;
pcm_fsm(smc,phy,event) ;
event = 0 ;
@@ -1017,7 +1014,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
ACTIONS_DONE() ;
break ;
case PC9_MAINT :
- DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ;
+ DB_PCMN(1, "PCM %c : MAINT", phy->phy_name);
/*PC90*/
if (cmd == PC_ENABLE) {
GO_STATE(PC0_OFF) ;
@@ -1126,13 +1123,12 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
}
if (lem->lem_errors) {
- DB_PCMN(1,"LEM %c :\n",phy->np == PB? 'B' : 'A',0) ;
- DB_PCMN(1,"errors : %ld\n",lem->lem_errors,0) ;
- DB_PCMN(1,"sum_errors : %ld\n",mib->fddiPORTLem_Ct,0) ;
- DB_PCMN(1,"current BER : 10E-%d\n",ber/100,0) ;
- DB_PCMN(1,"float BER : 10E-(%d/100)\n",lem->lem_float_ber,0) ;
- DB_PCMN(1,"avg. BER : 10E-%d\n",
- mib->fddiPORTLer_Estimate,0) ;
+ DB_PCMN(1, "LEM %c :", phy->np == PB ? 'B' : 'A');
+ DB_PCMN(1, "errors : %ld", lem->lem_errors);
+ DB_PCMN(1, "sum_errors : %ld", mib->fddiPORTLem_Ct);
+ DB_PCMN(1, "current BER : 10E-%d", ber / 100);
+ DB_PCMN(1, "float BER : 10E-(%d/100)", lem->lem_float_ber);
+ DB_PCMN(1, "avg. BER : 10E-%d", mib->fddiPORTLer_Estimate);
}
lem->lem_errors = 0L ;
@@ -1160,8 +1156,8 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
/*PC81b*/
#ifdef CONCENTRATOR
- DB_PCMN(1,"PCM: LER cutoff on port %d cutoff %d\n",
- phy->np, mib->fddiPORTLer_Cutoff) ;
+ DB_PCMN(1, "PCM: LER cutoff on port %d cutoff %d",
+ phy->np, mib->fddiPORTLer_Cutoff);
#endif
#ifdef SMT_EXT_CUTOFF
smt_port_off_event(smc,phy->np);
@@ -1213,7 +1209,7 @@ static void lem_check_lct(struct s_smc *smc, struct s_phy *phy)
phy->pc_lem_fail = TRUE ;
break ;
}
- DB_PCMN(1," >>errors : %d\n",lem->lem_errors,0) ;
+ DB_PCMN(1, " >>errors : %lu", lem->lem_errors);
}
if (phy->pc_lem_fail) {
mib->fddiPORTLCTFail_Ct++ ;
@@ -1277,7 +1273,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
mib = phy->mib ;
- DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
+ DB_PCMN(1, "SIG rec %x %x:", bit, phy->r_val[bit]);
bit++ ;
switch(bit) {
@@ -1298,8 +1294,8 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
case 4:
if (mib->fddiPORTMy_Type == TM &&
mib->fddiPORTNeighborType == TM) {
- DB_PCMN(1,"PCM %c : E100 withhold M-M\n",
- phy->phy_name,0) ;
+ DB_PCMN(1, "PCM %c : E100 withhold M-M",
+ phy->phy_name);
mib->fddiPORTPC_Withhold = PC_WH_M_M ;
RS_SET(smc,RS_EVENT) ;
}
@@ -1321,16 +1317,16 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
else {
mib->fddiPORTPC_Withhold = PC_WH_OTHER ;
RS_SET(smc,RS_EVENT) ;
- DB_PCMN(1,"PCM %c : E101 withhold other\n",
- phy->phy_name,0) ;
+ DB_PCMN(1, "PCM %c : E101 withhold other",
+ phy->phy_name);
}
phy->twisted = ((mib->fddiPORTMy_Type != TS) &&
(mib->fddiPORTMy_Type != TM) &&
(mib->fddiPORTNeighborType ==
mib->fddiPORTMy_Type)) ;
if (phy->twisted) {
- DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n",
- phy->phy_name,0) ;
+ DB_PCMN(1, "PCM %c : E102 !!! TWISTED !!!",
+ phy->phy_name);
}
break ;
case 5 :
@@ -1368,7 +1364,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
if (phy->t_next[7] > smc->s.pcm_lc_medium) {
start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy);
}
- DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ;
+ DB_PCMN(1, "LCT timer = %ld us", phy->t_next[7]);
phy->t_next[9] = smc->s.pcm_t_next_9 ;
break ;
case 7:
@@ -1379,8 +1375,9 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
break ;
case 8:
if (phy->t_val[7] || phy->r_val[7]) {
- DB_PCMN(1,"PCM %c : E103 LCT fail %s\n",
- phy->phy_name,phy->t_val[7]? "local":"remote") ;
+ DB_PCMN(1, "PCM %c : E103 LCT fail %s",
+ phy->phy_name,
+ phy->t_val[7] ? "local" : "remote");
queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
}
break ;
@@ -1529,8 +1526,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
phy->cf_loop = FALSE ;
lem_check_lct(smc,phy) ;
if (phy->pc_lem_fail) {
- DB_PCMN(1,"PCM %c : E104 LCT failed\n",
- phy->phy_name,0) ;
+ DB_PCMN(1, "PCM %c : E104 LCT failed", phy->phy_name);
phy->t_val[7] = 1 ;
}
else
@@ -1580,7 +1576,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
- DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
+ DB_PCMN(1, "SIG snd %x %x:", bit, phy->t_val[bit]);
}
/*
@@ -1783,13 +1779,14 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
}
/*jd 05-Aug-1999 changed: Bug #10419 */
- DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag);
+ DB_PCMN(1, "PLC %d: MDcF = %x", np, smc->e.DisconnectFlag);
if (smc->e.DisconnectFlag == FALSE) {
- DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason);
+ DB_PCMN(1, "PLC %d: restart (reason %x)", np, reason);
queue_event(smc,EVENT_PCM+np,PC_START) ;
}
else {
- DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason);
+ DB_PCMN(1, "PLC %d: NO!! restart (reason %x)",
+ np, reason);
}
return ;
}
@@ -1810,8 +1807,8 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */
/*PC22b*/
if (!phy->tr_flag) {
- DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n",
- np,smc->mib.fddiSMTECMState) ;
+ DB_PCMN(1, "PCM : irq TRACE_PROP %d %d",
+ np, smc->mib.fddiSMTECMState);
phy->tr_flag = TRUE ;
smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ;
queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
@@ -1824,8 +1821,9 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) {
/*PC22a*/
if (smc->e.path_test == PT_PASSED) {
- DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np),
- phy->mib->fddiPORTPCMState) ;
+ DB_PCMN(1, "PCM : state = %s %d",
+ get_pcmstate(smc, np),
+ phy->mib->fddiPORTPCMState);
smc->e.path_test = PT_PENDING ;
queue_event(smc,EVENT_ECM,EC_PATH_TEST) ;
@@ -1835,9 +1833,10 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
/* break_required (TNE > NS_Max) */
if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) {
if (!phy->tr_flag) {
- DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE");
- queue_event(smc,EVENT_PCM+np,PC_START) ;
- return ;
+ DB_PCMN(1, "PCM %c : PC81 %s",
+ phy->phy_name, "NSE");
+ queue_event(smc, EVENT_PCM + np, PC_START);
+ return;
}
}
}
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 52fa162a31e0..eee447315e32 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -284,7 +284,7 @@ void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local)
SMbuf *reply ;
sm = smtod(mb,struct smt_header *) ;
- DB_SMT("SMT: processing PMF frame at %p len %d\n",sm,mb->sm_len) ;
+ DB_SMT("SMT: processing PMF frame at %p len %d", sm, mb->sm_len);
#ifdef DEBUG
dump_smt(smc,sm,"PMF Received") ;
#endif
@@ -1585,7 +1585,7 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text)
dump_hex((char *) &sm->smt_source,6) ;
printf(" Class %x Type %x Version %x\n",
sm->smt_class,sm->smt_type,sm->smt_version) ;
- printf("TID %lx\t\tSID ",sm->smt_tid) ;
+ printf("TID %x\t\tSID ", sm->smt_tid);
dump_hex((char *) &sm->smt_sid,8) ;
printf(" LEN %x\n",sm->smt_len) ;
diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c
index ef8d5672d9e8..52b22095273a 100644
--- a/drivers/net/fddi/skfp/rmt.c
+++ b/drivers/net/fddi/skfp/rmt.c
@@ -70,7 +70,6 @@ static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ;
#define RM6_DIRECTED 6 /* sending directed beacons */
#define RM7_TRACE 7 /* trace initiated */
-#ifdef DEBUG
/*
* symbolic state names
*/
@@ -91,7 +90,6 @@ static const char * const rmt_events[] = {
"RM_TIMEOUT_ANNOUNCE","RM_TIMEOUT_T_DIRECT",
"RM_TIMEOUT_D_MAX","RM_TIMEOUT_POLL","RM_TX_STATE_CHANGE"
} ;
-#endif
/*
* Globals
@@ -149,10 +147,10 @@ void rmt(struct s_smc *smc, int event)
int state ;
do {
- DB_RMT("RMT : state %s%s",
- (smc->mib.m[MAC0].fddiMACRMTState & AFLAG) ? "ACTIONS " : "",
- rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG]) ;
- DB_RMT(" event %s\n",rmt_events[event],0) ;
+ DB_RMT("RMT : state %s%s event %s",
+ smc->mib.m[MAC0].fddiMACRMTState & AFLAG ? "ACTIONS " : "",
+ rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG],
+ rmt_events[event]);
state = smc->mib.m[MAC0].fddiMACRMTState ;
rmt_fsm(smc,event) ;
event = 0 ;
@@ -191,7 +189,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
smc->r.loop_avail = FALSE ;
smc->r.sm_ma_avail = FALSE ;
smc->r.no_flag = TRUE ;
- DB_RMTN(1,"RMT : ISOLATED\n",0,0) ;
+ DB_RMTN(1, "RMT : ISOLATED");
ACTIONS_DONE() ;
break ;
case RM0_ISOLATED :
@@ -213,7 +211,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
stop_rmt_timer1(smc) ;
stop_rmt_timer2(smc) ;
sm_ma_control(smc,MA_BEACON) ;
- DB_RMTN(1,"RMT : RING DOWN\n",0,0) ;
+ DB_RMTN(1, "RMT : RING DOWN");
RS_SET(smc,RS_NORINGOP) ;
smc->r.sm_ma_avail = FALSE ;
rmt_indication(smc,0) ;
@@ -248,7 +246,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
else
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
}
- DB_RMTN(1,"RMT : RING UP\n",0,0) ;
+ DB_RMTN(1, "RMT : RING UP");
RS_CLEAR(smc,RS_NORINGOP) ;
RS_SET(smc,RS_RINGOPCHANGE) ;
rmt_indication(smc,1) ;
@@ -285,7 +283,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
sm_mac_check_beacon_claim(smc) ;
- DB_RMTN(1,"RMT : RM3_DETECT\n",0,0) ;
+ DB_RMTN(1, "RMT : RM3_DETECT");
ACTIONS_DONE() ;
break ;
case RM3_DETECT :
@@ -327,7 +325,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
* trace !
*/
if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
- DB_RMTN(2,"RMT : DETECT && TRT_EXPIRED && T4/T5\n",0,0);
+ DB_RMTN(2, "RMT : DETECT && TRT_EXPIRED && T4/T5");
smc->r.bn_flag = TRUE ;
/*
* If one of the upstream stations beaconed
@@ -344,9 +342,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
* must be cleared in order to get in this condition.
*/
- DB_RMTN(2,
- "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n",
- tx,smc->r.bn_flag) ;
+ DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)",
+ tx, smc->r.bn_flag);
}
/*RM34a*/
else if (cmd == RM_MY_CLAIM && smc->r.timer0_exp) {
@@ -378,7 +375,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
sm_mac_check_beacon_claim(smc) ;
- DB_RMTN(1,"RMT : RM4_NON_OP_DUP\n",0,0) ;
+ DB_RMTN(1, "RMT : RM4_NON_OP_DUP");
ACTIONS_DONE() ;
break ;
case RM4_NON_OP_DUP :
@@ -406,7 +403,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
* trace !
*/
if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
- DB_RMTN(2,"RMT : NOPDUP && TRT_EXPIRED && T4/T5\n",0,0);
+ DB_RMTN(2, "RMT : NOPDUP && TRT_EXPIRED && T4/T5");
smc->r.bn_flag = TRUE ;
/*
* If one of the upstream stations beaconed
@@ -423,9 +420,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
* must be cleared in order to get in this condition.
*/
- DB_RMTN(2,
- "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n",
- tx,smc->r.bn_flag) ;
+ DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)",
+ tx, smc->r.bn_flag);
}
/*RM44c*/
else if (cmd == RM_TIMEOUT_ANNOUNCE && !smc->r.bn_flag) {
@@ -448,7 +444,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
stop_rmt_timer0(smc) ;
stop_rmt_timer1(smc) ;
stop_rmt_timer2(smc) ;
- DB_RMTN(1,"RMT : RM5_RING_OP_DUP\n",0,0) ;
+ DB_RMTN(1, "RMT : RM5_RING_OP_DUP");
ACTIONS_DONE() ;
break;
case RM5_RING_OP_DUP :
@@ -472,7 +468,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
sm_ma_control(smc,MA_DIRECTED) ;
RS_SET(smc,RS_BEACON) ;
- DB_RMTN(1,"RMT : RM6_DIRECTED\n",0,0) ;
+ DB_RMTN(1, "RMT : RM6_DIRECTED");
ACTIONS_DONE() ;
break ;
case RM6_DIRECTED :
@@ -515,7 +511,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
stop_rmt_timer2(smc) ;
smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ;
queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
- DB_RMTN(1,"RMT : RM7_TRACE\n",0,0) ;
+ DB_RMTN(1, "RMT : RM7_TRACE");
ACTIONS_DONE() ;
break ;
case RM7_TRACE :
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index e80a08903fcf..ab939ae7e5b5 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -35,7 +35,6 @@ static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ;
#define SMT_TID_MAGIC 0x1f0a7b3c
-#ifdef DEBUG
static const char *const smt_type_name[] = {
"SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??",
"SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??",
@@ -47,7 +46,7 @@ static const char *const smt_class_name[] = {
"UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF",
"SRF","PMF_GET","PMF_SET","ESF"
} ;
-#endif
+
#define LAST_CLASS (SMT_PMF_SET)
static const struct fddi_addr SMT_Unknown = {
@@ -203,7 +202,7 @@ void smt_agent_task(struct s_smc *smc)
{
smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L,
EV_TOKEN(EVENT_SMT,SM_TIMER)) ;
- DB_SMT("SMT agent task\n",0,0) ;
+ DB_SMT("SMT agent task");
}
#ifndef SMT_REAL_TOKEN_CT
@@ -396,7 +395,7 @@ void smt_event(struct s_smc *smc, int event)
*/
if (smc->sm.smt_tvu &&
time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) {
- DB_SMT("SMT : UNA expired\n",0,0) ;
+ DB_SMT("SMT : UNA expired");
smc->sm.smt_tvu = 0 ;
if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr,
@@ -419,7 +418,7 @@ void smt_event(struct s_smc *smc, int event)
}
if (smc->sm.smt_tvd &&
time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) {
- DB_SMT("SMT : DNA expired\n",0,0) ;
+ DB_SMT("SMT : DNA expired");
smc->sm.smt_tvd = 0 ;
if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr,
&SMT_Unknown)){
@@ -504,10 +503,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
#endif
smt_swap_para(sm,(int) mb->sm_len,1) ;
- DB_SMT("SMT : received packet [%s] at 0x%p\n",
- smt_type_name[m_fc(mb) & 0xf],sm) ;
- DB_SMT("SMT : version %d, class %s\n",sm->smt_version,
- smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ;
+ DB_SMT("SMT : received packet [%s] at 0x%p",
+ smt_type_name[m_fc(mb) & 0xf], sm);
+ DB_SMT("SMT : version %d, class %s",
+ sm->smt_version,
+ smt_class_name[sm->smt_class > LAST_CLASS ? 0 : sm->smt_class]);
#ifdef SBA
/*
@@ -524,8 +524,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
* ignore any packet with NSA and A-indicator set
*/
if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) {
- DB_SMT("SMT : ignoring NSA with A-indicator set from %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT : ignoring NSA with A-indicator set from %s",
+ addr_to_string(&sm->smt_source));
smt_free_mbuf(smc,mb) ;
return ;
}
@@ -556,15 +556,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
break ;
}
if (illegal) {
- DB_SMT("SMT : version = %d, dest = %s\n",
- sm->smt_version,addr_to_string(&sm->smt_source)) ;
+ DB_SMT("SMT : version = %d, dest = %s",
+ sm->smt_version, addr_to_string(&sm->smt_source));
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ;
smt_free_mbuf(smc,mb) ;
return ;
}
if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) ||
((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) {
- DB_SMT("SMT: info length error, len = %d\n",sm->smt_len,0) ;
+ DB_SMT("SMT: info length error, len = %d", sm->smt_len);
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ;
smt_free_mbuf(smc,mb) ;
return ;
@@ -572,7 +572,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
switch (sm->smt_class) {
case SMT_NIF :
if (smt_check_para(smc,sm,plist_nif)) {
- DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ;
+ DB_SMT("SMT: NIF with para problem, ignoring");
break ;
}
switch (sm->smt_type) {
@@ -586,8 +586,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
if (!is_equal(
&smc->mib.m[MAC0].fddiMACUpstreamNbr,
&sm->smt_source)) {
- DB_SMT("SMT : updated my UNA = %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT : updated my UNA = %s",
+ addr_to_string(&sm->smt_source));
if (!is_equal(&smc->mib.m[MAC0].
fddiMACUpstreamNbr,&SMT_Unknown)){
/* Do not update unknown address */
@@ -616,8 +616,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
is_individual(&sm->smt_source) &&
((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) ||
(m_fc(mb) != FC_SMT_NSA))) {
- DB_SMT("SMT : replying to NIF request %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT : replying to NIF request %s",
+ addr_to_string(&sm->smt_source));
smt_send_nif(smc,&sm->smt_source,
FC_SMT_INFO,
sm->smt_tid,
@@ -625,11 +625,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
}
break ;
case SMT_REPLY :
- DB_SMT("SMT : received NIF response from %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT : received NIF response from %s",
+ addr_to_string(&sm->smt_source));
if (fs & A_INDICATOR) {
smc->sm.pend[SMT_TID_NIF] = 0 ;
- DB_SMT("SMT : duplicate address\n",0,0) ;
+ DB_SMT("SMT : duplicate address");
smc->mib.m[MAC0].fddiMACDupAddressTest =
DA_FAILED ;
smc->r.dup_addr_test = DA_FAILED ;
@@ -644,7 +644,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
if (!is_equal(
&smc->mib.m[MAC0].fddiMACDownstreamNbr,
&sm->smt_source)) {
- DB_SMT("SMT : updated my DNA\n",0,0) ;
+ DB_SMT("SMT : updated my DNA");
if (!is_equal(&smc->mib.m[MAC0].
fddiMACDownstreamNbr, &SMT_Unknown)){
/* Do not update unknown address */
@@ -671,11 +671,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
}
else if (sm->smt_tid ==
smc->sm.pend[SMT_TID_NIF_TEST]) {
- DB_SMT("SMT : NIF test TID ok\n",0,0) ;
+ DB_SMT("SMT : NIF test TID ok");
}
else {
- DB_SMT("SMT : expected TID %lx, got %lx\n",
- smc->sm.pend[SMT_TID_NIF],sm->smt_tid) ;
+ DB_SMT("SMT : expected TID %lx, got %x",
+ smc->sm.pend[SMT_TID_NIF], sm->smt_tid);
}
break ;
default :
@@ -686,53 +686,53 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
case SMT_SIF_CONFIG : /* station information */
if (sm->smt_type != SMT_REQUEST)
break ;
- DB_SMT("SMT : replying to SIF Config request from %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT : replying to SIF Config request from %s",
+ addr_to_string(&sm->smt_source));
smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ;
break ;
case SMT_SIF_OPER : /* station information */
if (sm->smt_type != SMT_REQUEST)
break ;
- DB_SMT("SMT : replying to SIF Operation request from %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT : replying to SIF Operation request from %s",
+ addr_to_string(&sm->smt_source));
smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ;
break ;
case SMT_ECF : /* echo frame */
switch (sm->smt_type) {
case SMT_REPLY :
smc->mib.priv.fddiPRIVECF_Reply_Rx++ ;
- DB_SMT("SMT: received ECF reply from %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT: received ECF reply from %s",
+ addr_to_string(&sm->smt_source));
if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) {
- DB_SMT("SMT: ECHODATA missing\n",0,0) ;
+ DB_SMT("SMT: ECHODATA missing");
break ;
}
if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) {
- DB_SMT("SMT : ECF test TID ok\n",0,0) ;
+ DB_SMT("SMT : ECF test TID ok");
}
else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) {
- DB_SMT("SMT : ECF test UNA ok\n",0,0) ;
+ DB_SMT("SMT : ECF test UNA ok");
}
else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) {
- DB_SMT("SMT : ECF test DNA ok\n",0,0) ;
+ DB_SMT("SMT : ECF test DNA ok");
}
else {
- DB_SMT("SMT : expected TID %lx, got %lx\n",
- smc->sm.pend[SMT_TID_ECF],
- sm->smt_tid) ;
+ DB_SMT("SMT : expected TID %lx, got %x",
+ smc->sm.pend[SMT_TID_ECF],
+ sm->smt_tid);
}
break ;
case SMT_REQUEST :
smc->mib.priv.fddiPRIVECF_Req_Rx++ ;
{
if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) {
- DB_SMT("SMT: ECF with para problem,sending RDF\n",0,0) ;
+ DB_SMT("SMT: ECF with para problem,sending RDF");
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,
local) ;
break ;
}
- DB_SMT("SMT - sending ECF reply to %s\n",
- addr_to_string(&sm->smt_source),0) ;
+ DB_SMT("SMT - sending ECF reply to %s",
+ addr_to_string(&sm->smt_source));
/* set destination addr. & reply */
sm->smt_dest = sm->smt_source ;
@@ -750,7 +750,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
#ifndef BOOT
case SMT_RAF : /* resource allocation */
#ifdef ESS
- DB_ESSN(2,"ESS: RAF frame received\n",0,0) ;
+ DB_ESSN(2, "ESS: RAF frame received");
fs = ess_raf_received_pack(smc,mb,sm,fs) ;
#endif
@@ -764,7 +764,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
break ;
case SMT_ESF : /* extended service - not supported */
if (sm->smt_type == SMT_REQUEST) {
- DB_SMT("SMT - received ESF, sending RDF\n",0,0) ;
+ DB_SMT("SMT - received ESF, sending RDF");
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
}
break ;
@@ -782,7 +782,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
*/
if ((sm->smt_class == SMT_PMF_SET) &&
!is_individual(&sm->smt_dest)) {
- DB_SMT("SMT: ignoring PMF-SET with I/G set\n",0,0) ;
+ DB_SMT("SMT: ignoring PMF-SET with I/G set");
break ;
}
smt_pmf_received_pack(smc,mb, local) ;
@@ -798,16 +798,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
* we need to send a RDF frame according to 8.1.3.1.1,
* only if it is a REQUEST.
*/
- DB_SMT("SMT : class = %d, send RDF to %s\n",
- sm->smt_class, addr_to_string(&sm->smt_source)) ;
+ DB_SMT("SMT : class = %d, send RDF to %s",
+ sm->smt_class, addr_to_string(&sm->smt_source));
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
break ;
#endif
}
if (illegal) {
- DB_SMT("SMT: discarding invalid frame, reason = %d\n",
- illegal,0) ;
+ DB_SMT("SMT: discarding invalid frame, reason = %d", illegal);
}
smt_free_mbuf(smc,mb) ;
}
@@ -869,8 +868,8 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
if (sm->smt_type != SMT_REQUEST)
return ;
- DB_SMT("SMT: sending RDF to %s,reason = 0x%x\n",
- addr_to_string(&sm->smt_source),reason) ;
+ DB_SMT("SMT: sending RDF to %s,reason = 0x%x",
+ addr_to_string(&sm->smt_source), reason);
/*
@@ -1653,7 +1652,7 @@ int smt_check_para(struct s_smc *smc, struct smt_header *sm,
const u_short *p = list ;
while (*p) {
if (!sm_to_para(smc,sm,(int) *p)) {
- DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0);
+ DB_SMT("SMT: smt_check_para - missing para %hx", *p);
return -1;
}
p++ ;
@@ -1679,11 +1678,11 @@ void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
p += plen ;
len -= plen ;
if (len < 0) {
- DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ;
+ DB_SMT("SMT : sm_to_para - length error %d", plen);
return NULL;
}
if ((plen & 3) && (para != SMT_P_ECHODATA)) {
- DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ;
+ DB_SMT("SMT : sm_to_para - odd length %d", plen);
return NULL;
}
if (found)
@@ -1937,7 +1936,7 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
{
int event ;
int port ;
- DB_SMT("SMT: action %d code %d\n",class,code) ;
+ DB_SMT("SMT: action %d code %d", class, code);
switch(class) {
case SMT_STATION_ACTION :
switch(code) {
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index 9956680402de..4e286c1ba9cd 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -173,7 +173,6 @@ static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
#define THRESHOLD_2 (2*TICKS_PER_SECOND)
#define THRESHOLD_32 (32*TICKS_PER_SECOND)
-#ifdef DEBUG
static const char * const srf_names[] = {
"None","MACPathChangeEvent", "MACNeighborChangeEvent",
"PORTPathChangeEvent", "PORTUndesiredConnectionAttemptEvent",
@@ -182,7 +181,6 @@ static const char * const srf_names[] = {
"MACNotCopiedCondition", "PORTEBErrorCondition",
"PORTLerCondition"
} ;
-#endif
void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
{
@@ -198,10 +196,10 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
}
if (code) {
- DB_SMT("SRF: %s index %d\n",srf_names[code],index) ;
+ DB_SMT("SRF: %s index %d", srf_names[code], index);
if (!(evc = smt_get_evc(smc,code,index))) {
- DB_SMT("SRF : smt_get_evc() failed\n",0,0) ;
+ DB_SMT("SRF : smt_get_evc() failed");
return ;
}
/*
@@ -217,7 +215,7 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
*/
smt_set_timestamp(smc,smc->mib.fddiSMTTransitionTimeStamp) ;
if (SMT_IS_CONDITION(code)) {
- DB_SMT("SRF: condition is %s\n",cond ? "ON":"OFF",0) ;
+ DB_SMT("SRF: condition is %s", cond ? "ON" : "OFF");
if (cond) {
*evc->evc_cond_state = TRUE ;
evc->evc_rep_required = TRUE ;
@@ -414,9 +412,9 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ;
- DB_SMT("SRF: state SR%d Threshold %d\n",
- smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
+ DB_SMT("SRF: sending SRF at %p, len %d", smt, mb->sm_len);
+ DB_SMT("SRF: state SR%d Threshold %lu",
+ smc->srf.sr_state, smc->srf.SRThreshold / TICKS_PER_SECOND);
#ifdef DEBUG
dump_smt(smc,smt,"SRF Send") ;
#endif
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b77e4ecf3cf2..b75d9cdcfb0c 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -57,8 +57,7 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *);
static void fjes_tx_stall_task(struct work_struct *);
static void fjes_force_close_task(struct work_struct *);
static irqreturn_t fjes_intr(int, void*);
-static struct rtnl_link_stats64 *
-fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
+static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
static int fjes_change_mtu(struct net_device *, int);
static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
@@ -782,14 +781,12 @@ static void fjes_tx_retry(struct net_device *netdev)
netif_tx_wake_queue(queue);
}
-static struct rtnl_link_stats64 *
+static void
fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
-
- return stats;
}
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
@@ -1158,7 +1155,7 @@ static int fjes_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->unset_rx_last) {
adapter->rx_last_jiffies = jiffies;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8b6810bad54b..bda0c6413450 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@ struct gtp_dev {
struct socket *sock0;
struct socket *sock1u;
- struct net *net;
struct net_device *dev;
unsigned int hash_size;
@@ -184,7 +183,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
struct pdp_ctx *pctx;
- int ret = 0;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -197,26 +195,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
if (gtp0->type != GTP_TPDU)
return 1;
- rcu_read_lock();
pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- ret = -1;
- goto out_rcu;
+ return 1;
}
if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- ret = -1;
- goto out_rcu;
+ return 1;
}
- rcu_read_unlock();
/* Get rid of the GTP + UDP headers. */
return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
-out_rcu:
- rcu_read_unlock();
- return ret;
}
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
@@ -226,7 +217,6 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
struct pdp_ctx *pctx;
- int ret = 0;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -254,26 +244,19 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
- rcu_read_lock();
pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- ret = -1;
- goto out_rcu;
+ return 1;
}
if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- ret = -1;
- goto out_rcu;
+ return 1;
}
- rcu_read_unlock();
/* Get rid of the GTP + UDP headers. */
return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
-out_rcu:
- rcu_read_unlock();
- return ret;
}
static void gtp_encap_disable(struct gtp_dev *gtp)
@@ -316,7 +299,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
- xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+ xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
@@ -612,7 +595,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
pktinfo.iph->tos,
ip4_dst_hoplimit(&pktinfo.rt->dst),
- htons(IP_DF),
+ 0,
pktinfo.gtph_port, pktinfo.gtph_port,
true, false);
break;
@@ -658,7 +641,7 @@ static void gtp_link_setup(struct net_device *dev)
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1, struct net *src_net);
+ int fd_gtp0, int fd_gtp1);
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +658,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
- err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+ err = gtp_encap_enable(dev, gtp, fd0, fd1);
if (err < 0)
goto out_err;
@@ -821,7 +804,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
}
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1, struct net *src_net)
+ int fd_gtp0, int fd_gtp1)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct socket *sock0, *sock1u;
@@ -858,7 +841,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
gtp->sock0 = sock0;
gtp->sock1u = sock1u;
- gtp->net = src_net;
tuncfg.sk_user_data = gtp;
tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1358,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index ece59c54a653..4a40a3d825b4 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN;
+ dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 3958adade7eb..d3e73ac158ae 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -34,6 +34,7 @@
#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
+#define NDIS_OBJECT_TYPE_OFFLOAD 0xa7
#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
@@ -118,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
/* Fwd declaration */
struct ndis_tcp_ip_checksum_info;
+struct ndis_pkt_8021q_info;
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@@ -135,8 +137,10 @@ struct hv_netvsc_packet {
u8 page_buf_cnt;
u16 q_idx;
- u32 send_buf_index;
+ u16 total_packets;
+ u32 total_bytes;
+ u32 send_buf_index;
u32 total_data_buflen;
};
@@ -155,6 +159,8 @@ enum rndis_device_state {
RNDIS_DEV_DATAINITIALIZED,
};
+#define NETVSC_HASH_KEYLEN 40
+
struct rndis_device {
struct net_device *ndev;
@@ -165,14 +171,17 @@ struct rndis_device {
spinlock_t request_lock;
struct list_head req_list;
- unsigned char hw_mac_adr[ETH_ALEN];
+ u8 hw_mac_adr[ETH_ALEN];
+ u8 rss_key[NETVSC_HASH_KEYLEN];
+ u16 ind_table[ITAB_NUM];
};
/* Interface */
struct rndis_message;
struct netvsc_device;
-int netvsc_device_add(struct hv_device *device, void *additional_info);
+int netvsc_device_add(struct hv_device *device,
+ const struct netvsc_device_info *info);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -181,22 +190,25 @@ int netvsc_send(struct hv_device *device,
struct sk_buff *skb);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_message *resp);
-int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet,
- void **data,
- struct ndis_tcp_ip_checksum_info *csum_info,
- struct vmbus_channel *channel,
- u16 vlan_tci);
+int netvsc_recv_callback(struct net_device *net,
+ struct vmbus_channel *channel,
+ void *data, u32 len,
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
- void *additional_info);
-void rndis_filter_device_remove(struct hv_device *dev);
-int rndis_filter_receive(struct hv_device *dev,
- struct hv_netvsc_packet *pkt,
- void **data,
- struct vmbus_channel *channel);
+ struct netvsc_device_info *info);
+void rndis_filter_device_remove(struct hv_device *dev,
+ struct netvsc_device *nvdev);
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *key, int num_queue);
+int rndis_filter_receive(struct net_device *ndev,
+ struct netvsc_device *net_dev,
+ struct hv_device *dev,
+ struct vmbus_channel *channel,
+ void *data, u32 buflen);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
@@ -622,6 +634,7 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16
#define VRSS_CHANNEL_MAX 64
+#define VRSS_CHANNEL_DEFAULT 8
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -685,8 +698,7 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
- struct netvsc_stats __percpu *tx_stats;
- struct netvsc_stats __percpu *rx_stats;
+ u32 tx_checksum_mask;
/* Ethtool settings */
u8 duplex;
@@ -705,11 +717,21 @@ struct net_device_context {
u32 vf_serial;
};
+/* Per channel data */
+struct netvsc_channel {
+ struct vmbus_channel *channel;
+ struct multi_send_data msd;
+ struct multi_recv_comp mrc;
+ atomic_t queue_sends;
+
+ struct netvsc_stats tx_stats;
+ struct netvsc_stats rx_stats;
+};
+
/* Per netvsc device */
struct netvsc_device {
u32 nvsp_version;
- atomic_t num_outstanding_sends;
wait_queue_head_t wait_drain;
bool destroy;
@@ -735,32 +757,25 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
- struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
u32 num_sc_offered;
- atomic_t queue_sends[VRSS_CHANNEL_MAX];
/* Holds rndis device info */
void *extension;
int ring_size;
- /* The primary channel callback buffer */
- unsigned char *cb_buffer;
- /* The sub channel callback buffer */
- unsigned char *sub_cb_buf;
-
- struct multi_send_data msd[VRSS_CHANNEL_MAX];
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- struct multi_recv_comp mrc[VRSS_CHANNEL_MAX];
atomic_t num_outstanding_recvs;
atomic_t open_cnt;
+
+ struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
};
static inline struct netvsc_device *
@@ -939,7 +954,7 @@ struct ndis_pkt_8021q_info {
};
};
-struct ndis_oject_header {
+struct ndis_object_header {
u8 type;
u8 revision;
u16 size;
@@ -947,6 +962,9 @@ struct ndis_oject_header {
#define NDIS_OBJECT_TYPE_DEFAULT 0x80
#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1
+
#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
@@ -973,8 +991,135 @@ struct ndis_oject_header {
#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
+/*
+ * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_OFFLOAD
+ */
+
+#define NDIS_OFFLOAD_ENCAP_NONE 0x0000
+#define NDIS_OFFLOAD_ENCAP_NULL 0x0001
+#define NDIS_OFFLOAD_ENCAP_8023 0x0002
+#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004
+#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008
+#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010
+
+struct ndis_csum_offload {
+ u32 ip4_txenc;
+ u32 ip4_txcsum;
+#define NDIS_TXCSUM_CAP_IP4OPT 0x001
+#define NDIS_TXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP4 0x010
+#define NDIS_TXCSUM_CAP_UDP4 0x040
+#define NDIS_TXCSUM_CAP_IP4 0x100
+
+#define NDIS_TXCSUM_ALL_TCP4 (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
+
+ u32 ip4_rxenc;
+ u32 ip4_rxcsum;
+#define NDIS_RXCSUM_CAP_IP4OPT 0x001
+#define NDIS_RXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP4 0x010
+#define NDIS_RXCSUM_CAP_UDP4 0x040
+#define NDIS_RXCSUM_CAP_IP4 0x100
+ u32 ip6_txenc;
+ u32 ip6_txcsum;
+#define NDIS_TXCSUM_CAP_IP6EXT 0x001
+#define NDIS_TXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP6 0x010
+#define NDIS_TXCSUM_CAP_UDP6 0x040
+ u32 ip6_rxenc;
+ u32 ip6_rxcsum;
+#define NDIS_RXCSUM_CAP_IP6EXT 0x001
+#define NDIS_RXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP6 0x010
+#define NDIS_RXCSUM_CAP_UDP6 0x040
+
+#define NDIS_TXCSUM_ALL_TCP6 (NDIS_TXCSUM_CAP_TCP6 | \
+ NDIS_TXCSUM_CAP_TCP6OPT | \
+ NDIS_TXCSUM_CAP_IP6EXT)
+};
+
+struct ndis_lsov1_offload {
+ u32 encap;
+ u32 maxsize;
+ u32 minsegs;
+ u32 opts;
+};
+
+struct ndis_ipsecv1_offload {
+ u32 encap;
+ u32 ah_esp;
+ u32 xport_tun;
+ u32 ip4_opts;
+ u32 flags;
+ u32 ip4_ah;
+ u32 ip4_esp;
+};
+
+struct ndis_lsov2_offload {
+ u32 ip4_encap;
+ u32 ip4_maxsz;
+ u32 ip4_minsg;
+ u32 ip6_encap;
+ u32 ip6_maxsz;
+ u32 ip6_minsg;
+ u32 ip6_opts;
+#define NDIS_LSOV2_CAP_IP6EXT 0x001
+#define NDIS_LSOV2_CAP_TCP6OPT 0x004
+
+#define NDIS_LSOV2_CAP_IP6 (NDIS_LSOV2_CAP_IP6EXT | \
+ NDIS_LSOV2_CAP_TCP6OPT)
+};
+
+struct ndis_ipsecv2_offload {
+ u32 encap;
+ u16 ip6;
+ u16 ip4opt;
+ u16 ip6ext;
+ u16 ah;
+ u16 esp;
+ u16 ah_esp;
+ u16 xport;
+ u16 tun;
+ u16 xport_tun;
+ u16 lso;
+ u16 extseq;
+ u32 udp_esp;
+ u32 auth;
+ u32 crypto;
+ u32 sa_caps;
+};
+
+struct ndis_rsc_offload {
+ u16 ip4;
+ u16 ip6;
+};
+
+struct ndis_encap_offload {
+ u32 flags;
+ u32 maxhdr;
+};
+
+struct ndis_offload {
+ struct ndis_object_header header;
+ struct ndis_csum_offload csum;
+ struct ndis_lsov1_offload lsov1;
+ struct ndis_ipsecv1_offload ipsecv1;
+ struct ndis_lsov2_offload lsov2;
+ u32 flags;
+ /* NDIS >= 6.1 */
+ struct ndis_ipsecv2_offload ipsecv2;
+ /* NDIS >= 6.30 */
+ struct ndis_rsc_offload rsc;
+ struct ndis_encap_offload encap_gre;
+};
+
+#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload)
+#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ipsecv2)
+#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, rsc)
+
struct ndis_offload_params {
- struct ndis_oject_header header;
+ struct ndis_object_header header;
u8 ip_v4_csum;
u8 tcp_ip_v4_csum;
u8 udp_ip_v4_csum;
@@ -1301,15 +1446,10 @@ struct rndis_message {
#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
-#define INFO_IPV4 2
-#define INFO_IPV6 4
-#define INFO_TCP 2
-#define INFO_UDP 4
-
#define TRANSPORT_INFO_NOT_IP 0
-#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
-#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
-#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
-#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
+#define TRANSPORT_INFO_IPV4_TCP 0x01
+#define TRANSPORT_INFO_IPV4_UDP 0x02
+#define TRANSPORT_INFO_IPV6_TCP 0x10
+#define TRANSPORT_INFO_IPV6_UDP 0x20
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a1cc089acb7..fd6ebbefd919 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void)
if (!net_device)
return NULL;
- net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
- if (!net_device->cb_buffer) {
- kfree(net_device);
- return NULL;
- }
-
- net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
- sizeof(struct recv_comp_data));
+ net_device->chan_table[0].mrc.buf
+ = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
@@ -91,35 +85,28 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
int i;
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
- vfree(nvdev->mrc[i].buf);
+ vfree(nvdev->chan_table[i].mrc.buf);
- kfree(nvdev->cb_buffer);
kfree(nvdev);
}
-static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
-{
- struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
- if (net_device && net_device->destroy)
- net_device = NULL;
+static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
+ u16 q_idx)
+{
+ const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
- return net_device;
+ return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
+ atomic_read(&nvchan->queue_sends) == 0;
}
-static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
+static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
- if (!net_device)
- goto get_in_err;
-
- if (net_device->destroy &&
- atomic_read(&net_device->num_outstanding_sends) == 0 &&
- atomic_read(&net_device->num_outstanding_recvs) == 0)
+ if (net_device && net_device->destroy)
net_device = NULL;
-get_in_err:
return net_device;
}
@@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
- vfree(net_device->sub_cb_buf);
free_netvsc_device(net_device);
}
@@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
- int num_outstanding_sends;
u16 q_idx = 0;
int queue_sends;
/* Notify the layer above us */
if (likely(skb)) {
- struct hv_netvsc_packet *nvsc_packet
+ const struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
- u32 send_index = nvsc_packet->send_buf_index;
+ u32 send_index = packet->send_buf_index;
+ struct netvsc_stats *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
- q_idx = nvsc_packet->q_idx;
+ q_idx = packet->q_idx;
channel = incoming_channel;
+ tx_stats = &net_device->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets += packet->total_packets;
+ tx_stats->bytes += packet->total_bytes;
+ u64_stats_update_end(&tx_stats->syncp);
+
dev_consume_skb_any(skb);
}
- num_outstanding_sends =
- atomic_dec_return(&net_device->num_outstanding_sends);
- queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
+ queue_sends =
+ atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
- if (net_device->destroy && num_outstanding_sends == 0)
+ if (net_device->destroy && queue_sends == 0)
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
- unsigned long index;
- u32 max_words = net_device->map_words;
- unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
- u32 section_cnt = net_device->send_section_cnt;
- int ret_val = NETVSC_INVALID_INDEX;
- int i;
- int prev_val;
-
- for (i = 0; i < max_words; i++) {
- if (!~(map_addr[i]))
- continue;
- index = ffz(map_addr[i]);
- prev_val = sync_test_and_set_bit(index, &map_addr[i]);
- if (prev_val)
- continue;
- if ((index + (i * BITS_PER_LONG)) >= section_cnt)
- break;
- ret_val = (index + (i * BITS_PER_LONG));
- break;
+ unsigned long *map_addr = net_device->send_section_map;
+ unsigned int i;
+
+ for_each_clear_bit(i, map_addr, net_device->map_words) {
+ if (sync_test_and_set_bit(i, map_addr) == 0)
+ return i;
}
- return ret_val;
+
+ return NETVSC_INVALID_INDEX;
}
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
@@ -765,9 +745,11 @@ static inline int netvsc_send_pkt(
struct sk_buff *skb)
{
struct nvsp_message nvmsg;
- u16 q_idx = packet->q_idx;
- struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
+ struct netvsc_channel *nvchan
+ = &net_device->chan_table[packet->q_idx];
+ struct vmbus_channel *out_channel = nvchan->channel;
struct net_device *ndev = hv_get_drvdata(device);
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
@@ -827,23 +809,14 @@ static inline int netvsc_send_pkt(
}
if (ret == 0) {
- atomic_inc(&net_device->num_outstanding_sends);
- atomic_inc(&net_device->queue_sends[q_idx]);
-
- if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
- netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
+ atomic_inc_return(&nvchan->queue_sends);
- if (atomic_read(&net_device->
- queue_sends[q_idx]) < 1)
- netif_tx_wake_queue(netdev_get_tx_queue(
- ndev, q_idx));
- }
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
+ netif_tx_stop_queue(txq);
} else if (ret == -EAGAIN) {
- netif_tx_stop_queue(netdev_get_tx_queue(
- ndev, q_idx));
- if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
- netif_tx_wake_queue(netdev_get_tx_queue(
- ndev, q_idx));
+ netif_tx_stop_queue(txq);
+ if (atomic_read(&nvchan->queue_sends) < 1) {
+ netif_tx_wake_queue(txq);
ret = -ENOSPC;
}
} else {
@@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device,
{
struct netvsc_device *net_device;
int ret = 0;
- struct vmbus_channel *out_channel;
- u16 q_idx = packet->q_idx;
+ struct netvsc_channel *nvchan;
u32 pktlen = packet->total_data_buflen, msd_len = 0;
unsigned int section_index = NETVSC_INVALID_INDEX;
struct multi_send_data *msdp;
@@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device,
if (!net_device->send_section_map)
return -EAGAIN;
- out_channel = net_device->chn_table[q_idx];
-
+ nvchan = &net_device->chan_table[packet->q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
@@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device,
goto send_now;
}
- msdp = &net_device->msd[q_idx];
-
/* batch packets in send buffer if possible */
+ msdp = &nvchan->msd;
if (msdp->pkt)
msd_len = msdp->pkt->total_data_buflen;
@@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device,
packet->total_data_buflen += msd_len;
}
+ if (msdp->pkt) {
+ packet->total_packets += msdp->pkt->total_packets;
+ packet->total_bytes += msdp->pkt->total_bytes;
+ }
+
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
@@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel,
static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
u32 *filled, u32 *avail)
{
- u32 first = nvdev->mrc[q_idx].first;
- u32 next = nvdev->mrc[q_idx].next;
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
+ u32 first = mrc->first;
+ u32 next = mrc->next;
*filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
next - first;
@@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
*nvdev, u16 q_idx)
{
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail;
- if (!nvdev->mrc[q_idx].buf)
+ if (unlikely(!mrc->buf))
return NULL;
count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
if (!filled)
return NULL;
- return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
- sizeof(struct recv_comp_data);
+ return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
}
/* Put the first filled slot back to available pool */
static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
{
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
int num_recv;
- nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
- NETVSC_RECVSLOT_MAX;
+ mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
@@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
static inline struct recv_comp_data *get_recv_comp_slot(
struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
{
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail, next;
struct recv_comp_data *rcd;
- if (!nvdev->recv_section)
+ if (unlikely(!nvdev->recv_section))
return NULL;
- if (!nvdev->mrc[q_idx].buf)
+ if (unlikely(!mrc->buf))
return NULL;
if (atomic_read(&nvdev->num_outstanding_recvs) >
@@ -1095,60 +1072,44 @@ static inline struct recv_comp_data *get_recv_comp_slot(
if (!avail)
return NULL;
- next = nvdev->mrc[q_idx].next;
- rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
- nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
+ next = mrc->next;
+ rcd = mrc->buf + next * sizeof(struct recv_comp_data);
+ mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
atomic_inc(&nvdev->num_outstanding_recvs);
return rcd;
}
-static void netvsc_receive(struct netvsc_device *net_device,
- struct vmbus_channel *channel,
- struct hv_device *device,
- struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct net_device *ndev,
+ struct netvsc_device *net_device,
+ struct net_device_context *net_device_ctx,
+ struct hv_device *device,
+ struct vmbus_channel *channel,
+ struct vmtransfer_page_packet_header *vmxferpage_packet,
+ struct nvsp_message *nvsp)
{
- struct vmtransfer_page_packet_header *vmxferpage_packet;
- struct nvsp_message *nvsp_packet;
- struct hv_netvsc_packet nv_pkt;
- struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+ char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- struct net_device *ndev = hv_get_drvdata(device);
- void *data;
int ret;
struct recv_comp_data *rcd;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
- /*
- * All inbound packets other than send completion should be xfer page
- * packet
- */
- if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
- netdev_err(ndev, "Unknown packet type received - %d\n",
- packet->type);
- return;
- }
-
- nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
- (packet->offset8 << 3));
-
/* Make sure this is a valid nvsp packet */
- if (nvsp_packet->hdr.msg_type !=
- NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
- netdev_err(ndev, "Unknown nvsp packet type received-"
- " %d\n", nvsp_packet->hdr.msg_type);
+ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Unknown nvsp packet type received %u\n",
+ nvsp->hdr.msg_type);
return;
}
- vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
-
- if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
- netdev_err(ndev, "Invalid xfer page set id - "
- "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
- vmxferpage_packet->xfer_pageset_id);
+ if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Invalid xfer page set id - expecting %x got %x\n",
+ NETVSC_RECEIVE_BUFFER_ID,
+ vmxferpage_packet->xfer_pageset_id);
return;
}
@@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < count; i++) {
- /* Initialize the netvsc packet */
- data = (void *)((unsigned long)net_device->
- recv_buf + vmxferpage_packet->ranges[i].byte_offset);
- netvsc_packet->total_data_buflen =
- vmxferpage_packet->ranges[i].byte_count;
+ void *data = recv_buf
+ + vmxferpage_packet->ranges[i].byte_offset;
+ u32 buflen = vmxferpage_packet->ranges[i].byte_count;
/* Pass it to the upper layer */
- status = rndis_filter_receive(device, netvsc_packet, &data,
- channel);
+ status = rndis_filter_receive(ndev, net_device, device,
+ channel, data, buflen);
}
- if (!net_device->mrc[q_idx].buf) {
+ if (!net_device->chan_table[q_idx].mrc.buf) {
ret = netvsc_send_recv_completion(channel,
vmxferpage_packet->d.trans_id,
status);
@@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
u64 request_id,
struct vmpacket_descriptor *desc)
{
- struct nvsp_message *nvmsg;
struct net_device_context *net_device_ctx = netdev_priv(ndev);
-
- nvmsg = (struct nvsp_message *)((unsigned long)
- desc + (desc->offset8 << 3));
+ struct nvsp_message *nvmsg
+ = (struct nvsp_message *)((unsigned long)desc
+ + (desc->offset8 << 3));
switch (desc->type) {
case VM_PKT_COMP:
@@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(net_device, channel, device, desc);
+ netvsc_receive(ndev, net_device, net_device_ctx,
+ device, channel,
+ (struct vmtransfer_page_packet_header *)desc,
+ nvmsg);
break;
case VM_PKT_DATA_INBAND:
@@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
void netvsc_channel_cb(void *context)
{
- int ret;
- struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ struct vmbus_channel *channel = context;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct hv_device *device;
struct netvsc_device *net_device;
- u32 bytes_recvd;
- u64 request_id;
struct vmpacket_descriptor *desc;
- unsigned char *buffer;
- int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
bool need_to_commit = false;
@@ -1289,68 +1245,28 @@ void netvsc_channel_cb(void *context)
else
device = channel->device_obj;
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = hv_get_drvdata(device);
- buffer = get_per_channel_state(channel);
-
- do {
- desc = get_next_pkt_raw(channel);
- if (desc != NULL) {
- netvsc_process_raw_pkt(device,
- channel,
- net_device,
- ndev,
- desc->trans_id,
- desc);
-
- put_pkt_raw(channel, desc);
- need_to_commit = true;
- continue;
- }
- if (need_to_commit) {
- need_to_commit = false;
- commit_rd_index(channel);
- }
+ if (unlikely(!ndev))
+ return;
- ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
- &bytes_recvd, &request_id);
- if (ret == 0) {
- if (bytes_recvd > 0) {
- desc = (struct vmpacket_descriptor *)buffer;
- netvsc_process_raw_pkt(device,
- channel,
- net_device,
- ndev,
- request_id,
- desc);
- } else {
- /*
- * We are done for this pass.
- */
- break;
- }
-
- } else if (ret == -ENOBUFS) {
- if (bufferlen > NETVSC_PACKET_SIZE)
- kfree(buffer);
- /* Handle large packet */
- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
- if (buffer == NULL) {
- /* Try again next time around */
- netdev_err(ndev,
- "unable to allocate buffer of size "
- "(%d)!!\n", bytes_recvd);
- break;
- }
-
- bufferlen = bytes_recvd;
- }
- } while (1);
+ net_device = net_device_to_netvsc_device(ndev);
+ if (unlikely(net_device->destroy) &&
+ netvsc_channel_idle(net_device, q_idx))
+ return;
+
+ /* commit_rd_index() -> hv_signal_on_read() needs this. */
+ init_cached_read_index(channel);
+
+ while ((desc = get_next_pkt_raw(channel)) != NULL) {
+ netvsc_process_raw_pkt(device, channel, net_device,
+ ndev, desc->trans_id, desc);
- if (bufferlen > NETVSC_PACKET_SIZE)
- kfree(buffer);
+ put_pkt_raw(channel, desc);
+ need_to_commit = true;
+ }
+
+ if (need_to_commit)
+ commit_rd_index(channel);
netvsc_chk_recv_comp(net_device, channel, q_idx);
}
@@ -1359,11 +1275,11 @@ void netvsc_channel_cb(void *context)
* netvsc_device_add - Callback when the device belonging to this
* driver is added
*/
-int netvsc_device_add(struct hv_device *device, void *additional_info)
+int netvsc_device_add(struct hv_device *device,
+ const struct netvsc_device_info *device_info)
{
int i, ret = 0;
- int ring_size =
- ((struct netvsc_device_info *)additional_info)->ring_size;
+ int ring_size = device_info->ring_size;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1374,8 +1290,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
net_device->ring_size = ring_size;
- set_per_channel_state(device->channel, net_device->cb_buffer);
-
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
@@ -1394,7 +1308,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
* opened.
*/
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
- net_device->chn_table[i] = device->channel;
+ net_device->chan_table[i].channel = device->channel;
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c9414c054852..2d3cdb026a99 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -42,21 +42,11 @@
#define RING_SIZE_MIN 64
#define LINKCHANGE_INT (2 * HZ)
-#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
- NETIF_F_SG | \
- NETIF_F_TSO | \
- NETIF_F_TSO6 | \
- NETIF_F_HW_CSUM)
-
-/* Restrict GSO size to account for NVGRE */
-#define NETVSC_GSO_MAX_SIZE 62768
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
-static int max_num_vrss_chns = 8;
-
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -145,7 +135,7 @@ static int netvsc_close(struct net_device *net)
while (true) {
aread = 0;
for (i = 0; i < nvdev->num_chn; i++) {
- chn = nvdev->chn_table[i];
+ chn = nvdev->chan_table[i].channel;
if (!chn)
continue;
@@ -201,22 +191,41 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
+/*
+ * Select queue for transmit.
+ *
+ * If a valid queue has already been assigned, then use that.
+ * Otherwise compute tx queue based on hash and the send table.
+ *
+ * This is basically similar to default (__netdev_pick_tx) with the added step
+ * of using the host send_table when no other queue has been assigned.
+ *
+ * TODO support XPS - but get_xps_queue not exported
+ */
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
- u32 hash;
- u16 q_idx = 0;
+ struct sock *sk = skb->sk;
+ int q_idx = sk_tx_queue_get(sk);
- if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
- return 0;
+ if (q_idx < 0 || skb->ooo_okay ||
+ q_idx >= ndev->real_num_tx_queues) {
+ u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
+ int new_idx;
+
+ new_idx = nvsc_dev->send_table[hash]
+ % nvsc_dev->num_chn;
+
+ if (q_idx != new_idx && sk &&
+ sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, new_idx);
- hash = skb_get_hash(skb);
- q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
- ndev->real_num_tx_queues;
+ q_idx = new_idx;
+ }
- if (!nvsc_dev->chn_table[q_idx])
+ if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
q_idx = 0;
return q_idx;
@@ -323,33 +332,25 @@ static int netvsc_get_slots(struct sk_buff *skb)
return slots + frag_slots;
}
-static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
+static u32 net_checksum_info(struct sk_buff *skb)
{
- u32 ret_val = TRANSPORT_INFO_NOT_IP;
-
- if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
- (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
- goto not_ip;
- }
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip = ip_hdr(skb);
- *trans_off = skb_transport_offset(skb);
-
- if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
- struct iphdr *iphdr = ip_hdr(skb);
-
- if (iphdr->protocol == IPPROTO_TCP)
- ret_val = TRANSPORT_INFO_IPV4_TCP;
- else if (iphdr->protocol == IPPROTO_UDP)
- ret_val = TRANSPORT_INFO_IPV4_UDP;
+ if (ip->protocol == IPPROTO_TCP)
+ return TRANSPORT_INFO_IPV4_TCP;
+ else if (ip->protocol == IPPROTO_UDP)
+ return TRANSPORT_INFO_IPV4_UDP;
} else {
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- ret_val = TRANSPORT_INFO_IPV6_TCP;
+ struct ipv6hdr *ip6 = ipv6_hdr(skb);
+
+ if (ip6->nexthdr == IPPROTO_TCP)
+ return TRANSPORT_INFO_IPV6_TCP;
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
- ret_val = TRANSPORT_INFO_IPV6_UDP;
+ return TRANSPORT_INFO_IPV6_UDP;
}
-not_ip:
- return ret_val;
+ return TRANSPORT_INFO_NOT_IP;
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -362,11 +363,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
struct rndis_per_packet_info *ppi;
- struct ndis_tcp_ip_checksum_info *csum_info;
- int hdr_offset;
- u32 net_trans_info;
u32 hash;
- u32 skb_length;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
@@ -376,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* more pages we try linearizing it.
*/
- skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
@@ -409,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len;
+ packet->total_bytes = skb->len;
+ packet->total_packets = 1;
rndis_msg = (struct rndis_message *)skb->head;
@@ -445,13 +443,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
VLAN_PRIO_SHIFT;
}
- net_trans_info = get_net_transport_info(skb, &hdr_offset);
-
- /*
- * Setup the sendside checksum offload only if this is not a
- * GSO packet.
- */
- if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
+ if (skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -462,7 +454,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
- if (net_trans_info & (INFO_IPV4 << 16)) {
+ if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
@@ -478,10 +470,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
- lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
+ lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (net_trans_info & INFO_TCP) {
+ if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
+ struct ndis_tcp_ip_checksum_info *csum_info;
+
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
@@ -489,15 +483,25 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
- if (net_trans_info & (INFO_IPV4 << 16))
+ csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
csum_info->transmit.is_ipv4 = 1;
- else
+
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ csum_info->transmit.tcp_checksum = 1;
+ else
+ csum_info->transmit.udp_checksum = 1;
+ } else {
csum_info->transmit.is_ipv6 = 1;
- csum_info->transmit.tcp_checksum = 1;
- csum_info->transmit.tcp_header_offset = hdr_offset;
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ csum_info->transmit.tcp_checksum = 1;
+ else
+ csum_info->transmit.udp_checksum = 1;
+ }
} else {
- /* UDP checksum (and other) offload is not supported. */
+ /* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
goto drop;
}
@@ -513,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
- if (likely(ret == 0)) {
- struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += skb_length;
- u64_stats_update_end(&tx_stats->syncp);
+ if (likely(ret == 0))
return NETDEV_TX_OK;
- }
if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy;
@@ -541,7 +538,6 @@ no_memory:
++net_device_ctx->eth_stats.tx_no_memory;
goto drop;
}
-
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
@@ -593,13 +589,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
- struct hv_netvsc_packet *packet,
- struct ndis_tcp_ip_checksum_info *csum_info,
- void *data, u16 vlan_tci)
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ const struct ndis_pkt_8021q_info *vlan,
+ void *data, u32 buflen)
{
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
+ skb = netdev_alloc_skb_ip_align(net, buflen);
if (!skb)
return skb;
@@ -607,8 +603,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- memcpy(skb_put(skb, packet->total_data_buflen), data,
- packet->total_data_buflen);
+ memcpy(skb_put(skb, buflen), data, buflen);
skb->protocol = eth_type_trans(skb, net);
@@ -625,9 +620,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- if (vlan_tci & VLAN_TAG_PRESENT)
+ if (vlan) {
+ u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
+
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
+ }
return skb;
}
@@ -636,18 +634,19 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* netvsc_recv_callback - Callback when we receive a packet from the
* "wire" on the specified device.
*/
-int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet,
- void **data,
- struct ndis_tcp_ip_checksum_info *csum_info,
- struct vmbus_channel *channel,
- u16 vlan_tci)
+int netvsc_recv_callback(struct net_device *net,
+ struct vmbus_channel *channel,
+ void *data, u32 len,
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ const struct ndis_pkt_8021q_info *vlan)
{
- struct net_device *net = hv_get_drvdata(device_obj);
struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
struct net_device *vf_netdev;
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
@@ -659,30 +658,31 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* policy filters on the host). Deliver these via the VF
* interface in the guest.
*/
+ rcu_read_lock();
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
if (vf_netdev && (vf_netdev->flags & IFF_UP))
net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+ skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
+ rcu_read_unlock();
return NVSP_STAT_FAIL;
}
if (net != vf_netdev)
- skb_record_rx_queue(skb,
- channel->offermsg.offer.sub_channel_index);
+ skb_record_rx_queue(skb, q_idx);
/*
* Even if injecting the packet, record the statistics
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
- rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+ rx_stats = &net_device->chan_table[q_idx].rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
- rx_stats->bytes += packet->total_data_buflen;
+ rx_stats->bytes += len;
if (skb->pkt_type == PACKET_BROADCAST)
++rx_stats->broadcast;
@@ -695,7 +695,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* is done.
* TODO - use NAPI?
*/
- netif_rx(skb);
+ netif_receive_skb(skb);
+ rcu_read_unlock();
return 0;
}
@@ -719,102 +720,76 @@ static void netvsc_get_channels(struct net_device *net,
}
}
+static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
+ u32 num_chn)
+{
+ struct netvsc_device_info device_info;
+ int ret;
+
+ memset(&device_info, 0, sizeof(device_info));
+ device_info.num_chn = num_chn;
+ device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = num_chn;
+
+ ret = rndis_filter_device_add(dev, &device_info);
+ if (ret)
+ return ret;
+
+ ret = netif_set_real_num_tx_queues(net, num_chn);
+ if (ret)
+ return ret;
+
+ ret = netif_set_real_num_rx_queues(net, num_chn);
+
+ return ret;
+}
+
static int netvsc_set_channels(struct net_device *net,
struct ethtool_channels *channels)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nvdev = net_device_ctx->nvdev;
- struct netvsc_device_info device_info;
- u32 num_chn;
- u32 max_chn;
- int ret = 0;
- bool recovering = false;
+ unsigned int count = channels->combined_count;
+ int ret;
+
+ /* We do not support separate count for rx, tx, or other */
+ if (count == 0 ||
+ channels->rx_count || channels->tx_count || channels->other_count)
+ return -EINVAL;
+
+ if (count > net->num_tx_queues || count > net->num_rx_queues)
+ return -EINVAL;
if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
- num_chn = nvdev->num_chn;
- max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
-
- if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
- pr_info("vRSS unsupported before NVSP Version 5\n");
+ if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return -EINVAL;
- }
- /* We do not support rx, tx, or other */
- if (!channels ||
- channels->rx_count ||
- channels->tx_count ||
- channels->other_count ||
- (channels->combined_count < 1))
+ if (count > nvdev->max_chn)
return -EINVAL;
- if (channels->combined_count > max_chn) {
- pr_info("combined channels too high, using %d\n", max_chn);
- channels->combined_count = max_chn;
- }
-
ret = netvsc_close(net);
if (ret)
- goto out;
+ return ret;
- do_set:
net_device_ctx->start_remove = true;
- rndis_filter_device_remove(dev);
-
- nvdev->num_chn = channels->combined_count;
-
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
- device_info.ring_size = ring_size;
- device_info.max_num_vrss_chns = max_num_vrss_chns;
+ rndis_filter_device_remove(dev, nvdev);
- ret = rndis_filter_device_add(dev, &device_info);
- if (ret) {
- if (recovering) {
- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- return ret;
- }
- goto recover;
- }
-
- nvdev = net_device_ctx->nvdev;
-
- ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
- if (ret) {
- if (recovering) {
- netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
- return ret;
- }
- goto recover;
- }
-
- ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
- if (ret) {
- if (recovering) {
- netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
- return ret;
- }
- goto recover;
- }
+ ret = netvsc_set_queues(net, dev, count);
+ if (ret == 0)
+ nvdev->num_chn = count;
+ else
+ netvsc_set_queues(net, dev, nvdev->num_chn);
- out:
netvsc_open(net);
net_device_ctx->start_remove = false;
+
/* We may have missed link change notifications */
schedule_delayed_work(&net_device_ctx->dwork, 0);
return ret;
-
- recover:
- /* If the above failed, we attempt to recover through the same
- * process but with the original number of channels.
- */
- netdev_err(net, "could not set channels, recovering\n");
- recovering = true;
- channels->combined_count = num_chn;
- goto do_set;
}
static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
@@ -875,8 +850,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = ndevctx->nvdev;
struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
- u32 num_chn;
- int ret = 0;
+ int ret;
if (ndevctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
@@ -885,17 +859,15 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto out;
- num_chn = nvdev->num_chn;
-
ndevctx->start_remove = true;
- rndis_filter_device_remove(hdev);
+ rndis_filter_device_remove(hdev, nvdev);
ndev->mtu = mtu;
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
- device_info.num_chn = num_chn;
- device_info.max_num_vrss_chns = max_num_vrss_chns;
+ device_info.num_chn = nvdev->num_chn;
+ device_info.max_num_vrss_chns = nvdev->num_chn;
rndis_filter_device_add(hdev, &device_info);
out:
@@ -908,47 +880,50 @@ out:
return ret;
}
-static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
- struct rtnl_link_stats64 *t)
+static void netvsc_get_stats64(struct net_device *net,
+ struct rtnl_link_stats64 *t)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
- cpu);
- struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
- cpu);
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
+ struct netvsc_device *nvdev = ndev_ctx->nvdev;
+ int i;
+
+ if (!nvdev)
+ return;
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+ const struct netvsc_stats *stats;
+ u64 packets, bytes, multicast;
unsigned int start;
+ stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
- tx_packets = tx_stats->packets;
- tx_bytes = tx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ t->tx_bytes += bytes;
+ t->tx_packets += packets;
+
+ stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
- rx_packets = rx_stats->packets;
- rx_bytes = rx_stats->bytes;
- rx_multicast = rx_stats->multicast + rx_stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-
- t->tx_bytes += tx_bytes;
- t->tx_packets += tx_packets;
- t->rx_bytes += rx_bytes;
- t->rx_packets += rx_packets;
- t->multicast += rx_multicast;
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ multicast = stats->multicast + stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ t->rx_bytes += bytes;
+ t->rx_packets += packets;
+ t->multicast += multicast;
}
t->tx_dropped = net->stats.tx_dropped;
- t->tx_errors = net->stats.tx_dropped;
+ t->tx_errors = net->stats.tx_errors;
t->rx_dropped = net->stats.rx_dropped;
t->rx_errors = net->stats.rx_errors;
-
- return t;
}
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
@@ -986,11 +961,19 @@ static const struct {
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
};
+#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
+
+/* 4 statistics per queue (rx/tx packets/bytes) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
+
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
+
switch (string_set) {
case ETH_SS_STATS:
- return ARRAY_SIZE(netvsc_stats);
+ return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
default:
return -EINVAL;
}
@@ -1000,24 +983,107 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
const void *nds = &ndc->eth_stats;
- int i;
+ const struct netvsc_stats *qstats;
+ unsigned int start;
+ u64 packets, bytes;
+ int i, j;
- for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+ for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
+
+ for (j = 0; j < nvdev->num_chn; j++) {
+ qstats = &nvdev->chan_table[j].tx_stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&qstats->syncp);
+ packets = qstats->packets;
+ bytes = qstats->bytes;
+ } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ data[i++] = packets;
+ data[i++] = bytes;
+
+ qstats = &nvdev->chan_table[j].rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&qstats->syncp);
+ packets = qstats->packets;
+ bytes = qstats->bytes;
+ } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ data[i++] = packets;
+ data[i++] = bytes;
+ }
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
+ u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
+ memcpy(p + i * ETH_GSTRING_LEN,
netvsc_stats[i].name, ETH_GSTRING_LEN);
+
+ p += i * ETH_GSTRING_LEN;
+ for (i = 0; i < nvdev->num_chn; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+
+ break;
+ }
+}
+
+static int
+netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
+ struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nvdev->num_chn;
+ return 0;
+
+ case ETHTOOL_GRXFH:
+ return netvsc_get_rss_hash_opts(nvdev, info);
}
+ return -EOPNOTSUPP;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1029,6 +1095,68 @@ static void netvsc_poll_controller(struct net_device *net)
}
#endif
+static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
+{
+ return NETVSC_HASH_KEYLEN;
+}
+
+static u32 netvsc_rss_indir_size(struct net_device *dev)
+{
+ return ITAB_NUM;
+}
+
+static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *ndev = ndc->nvdev;
+ struct rndis_device *rndis_dev = ndev->extension;
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+
+ if (indir) {
+ for (i = 0; i < ITAB_NUM; i++)
+ indir[i] = rndis_dev->ind_table[i];
+ }
+
+ if (key)
+ memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
+
+ return 0;
+}
+
+static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *ndev = ndc->nvdev;
+ struct rndis_device *rndis_dev = ndev->extension;
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < ITAB_NUM; i++)
+ if (indir[i] >= dev->num_rx_queues)
+ return -EINVAL;
+
+ for (i = 0; i < ITAB_NUM; i++)
+ rndis_dev->ind_table[i] = indir[i];
+ }
+
+ if (!key) {
+ if (!indir)
+ return 0;
+
+ key = rndis_dev->rss_key;
+ }
+
+ return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
+}
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1040,6 +1168,11 @@ static const struct ethtool_ops ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_settings = netvsc_get_settings,
.set_settings = netvsc_set_settings,
+ .get_rxnfc = netvsc_get_rxnfc,
+ .get_rxfh_key_size = netvsc_get_rxfh_key_size,
+ .get_rxfh_indir_size = netvsc_rss_indir_size,
+ .get_rxfh = netvsc_get_rxfh,
+ .set_rxfh = netvsc_set_rxfh,
};
static const struct net_device_ops device_ops = {
@@ -1160,15 +1293,6 @@ out_unlock:
rtnl_unlock();
}
-static void netvsc_free_netdev(struct net_device *netdev)
-{
- struct net_device_context *net_device_ctx = netdev_priv(netdev);
-
- free_percpu(net_device_ctx->tx_stats);
- free_percpu(net_device_ctx->rx_stats);
- free_netdev(netdev);
-}
-
static struct net_device *get_netvsc_bymac(const u8 *mac)
{
struct net_device *dev;
@@ -1305,7 +1429,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
- struct netvsc_device *netvsc_dev;
struct net_device_context *net_device_ctx;
ndev = get_netvsc_byref(vf_netdev);
@@ -1313,7 +1436,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
- netvsc_dev = net_device_ctx->nvdev;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
@@ -1333,7 +1455,7 @@ static int netvsc_probe(struct hv_device *dev,
int ret;
net = alloc_etherdev_mq(sizeof(struct net_device_context),
- num_online_cpus());
+ VRSS_CHANNEL_MAX);
if (!net)
return -ENOMEM;
@@ -1348,18 +1470,6 @@ static int netvsc_probe(struct hv_device *dev,
netdev_dbg(net, "netvsc msg_enable: %d\n",
net_device_ctx->msg_enable);
- net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
- if (!net_device_ctx->tx_stats) {
- free_netdev(net);
- return -ENOMEM;
- }
- net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
- if (!net_device_ctx->rx_stats) {
- free_percpu(net_device_ctx->tx_stats);
- free_netdev(net);
- return -ENOMEM;
- }
-
hv_set_drvdata(dev, net);
net_device_ctx->start_remove = false;
@@ -1371,10 +1481,6 @@ static int netvsc_probe(struct hv_device *dev,
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
net->netdev_ops = &device_ops;
-
- net->hw_features = NETVSC_HW_FEATURES;
- net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
-
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
@@ -1384,20 +1490,26 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
- device_info.max_num_vrss_chns = max_num_vrss_chns;
+ device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
+ num_online_cpus());
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- netvsc_free_netdev(net);
+ free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ /* hw_features computed in rndis_filter_device_add */
+ net->features = net->hw_features |
+ NETIF_F_HIGHDMA | NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ net->vlan_features = net->features;
+
nvdev = net_device_ctx->nvdev;
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
- netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
@@ -1409,8 +1521,8 @@ static int netvsc_probe(struct hv_device *dev,
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
- rndis_filter_device_remove(dev);
- netvsc_free_netdev(net);
+ rndis_filter_device_remove(dev, nvdev);
+ free_netdev(net);
}
return ret;
@@ -1420,7 +1532,6 @@ static int netvsc_remove(struct hv_device *dev)
{
struct net_device *net;
struct net_device_context *ndev_ctx;
- struct netvsc_device *net_device;
net = hv_get_drvdata(dev);
@@ -1430,7 +1541,6 @@ static int netvsc_remove(struct hv_device *dev)
}
ndev_ctx = netdev_priv(net);
- net_device = ndev_ctx->nvdev;
/* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
* removing the device.
@@ -1451,11 +1561,11 @@ static int netvsc_remove(struct hv_device *dev)
* Call to the vsc driver to let it know that the device is being
* removed
*/
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, ndev_ctx->nvdev);
hv_set_drvdata(dev, NULL);
- netvsc_free_netdev(net);
+ free_netdev(net);
return 0;
}
@@ -1495,7 +1605,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
/* Avoid Vlan dev with same MAC registering as VF */
- if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(event_dev))
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8d90904e0e49..19356f56b7b1 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -57,6 +57,14 @@ struct rndis_request {
u8 request_ext[RNDIS_EXT_LEN];
};
+static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@@ -124,7 +132,7 @@ static void put_rndis_request(struct rndis_device *dev,
}
static void dump_rndis_message(struct hv_device *hv_dev,
- struct rndis_message *rndis_msg)
+ const struct rndis_message *rndis_msg)
{
struct net_device *netdev = hv_get_drvdata(hv_dev);
@@ -339,102 +347,78 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
return NULL;
}
-static int rndis_filter_receive_data(struct rndis_device *dev,
- struct rndis_message *msg,
- struct hv_netvsc_packet *pkt,
- void **data,
- struct vmbus_channel *channel)
+static int rndis_filter_receive_data(struct net_device *ndev,
+ struct rndis_device *dev,
+ struct rndis_message *msg,
+ struct vmbus_channel *channel,
+ void *data, u32 data_buflen)
{
- struct rndis_packet *rndis_pkt;
+ struct rndis_packet *rndis_pkt = &msg->msg.pkt;
+ const struct ndis_tcp_ip_checksum_info *csum_info;
+ const struct ndis_pkt_8021q_info *vlan;
u32 data_offset;
- struct ndis_pkt_8021q_info *vlan;
- struct ndis_tcp_ip_checksum_info *csum_info;
- u16 vlan_tci = 0;
- struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
-
- rndis_pkt = &msg->msg.pkt;
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
- pkt->total_data_buflen -= data_offset;
+ data_buflen -= data_offset;
/*
* Make sure we got a valid RNDIS message, now total_data_buflen
* should be the data packet size plus the trailer padding size
*/
- if (pkt->total_data_buflen < rndis_pkt->data_len) {
+ if (unlikely(data_buflen < rndis_pkt->data_len)) {
netdev_err(dev->ndev, "rndis message buffer "
"overflow detected (got %u, min %u)"
"...dropping this message!\n",
- pkt->total_data_buflen, rndis_pkt->data_len);
+ data_buflen, rndis_pkt->data_len);
return NVSP_STAT_FAIL;
}
+ vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
+
/*
* Remove the rndis trailer padding from rndis packet message
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
- pkt->total_data_buflen = rndis_pkt->data_len;
- *data = (void *)((unsigned long)(*data) + data_offset);
-
- vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
- if (vlan) {
- vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
- (vlan->pri << VLAN_PRIO_SHIFT);
- }
-
+ data = (void *)((unsigned long)data + data_offset);
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
- return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
- csum_info, channel, vlan_tci);
+ return netvsc_recv_callback(ndev, channel,
+ data, rndis_pkt->data_len,
+ csum_info, vlan);
}
-int rndis_filter_receive(struct hv_device *dev,
- struct hv_netvsc_packet *pkt,
- void **data,
- struct vmbus_channel *channel)
+int rndis_filter_receive(struct net_device *ndev,
+ struct netvsc_device *net_dev,
+ struct hv_device *dev,
+ struct vmbus_channel *channel,
+ void *data, u32 buflen)
{
- struct net_device *ndev = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_dev = net_device_ctx->nvdev;
- struct rndis_device *rndis_dev;
- struct rndis_message *rndis_msg;
- int ret = 0;
-
- if (!net_dev) {
- ret = NVSP_STAT_FAIL;
- goto exit;
- }
+ struct rndis_device *rndis_dev = net_dev->extension;
+ struct rndis_message *rndis_msg = data;
/* Make sure the rndis device state is initialized */
- if (!net_dev->extension) {
- netdev_err(ndev, "got rndis message but no rndis device - "
- "dropping this message!\n");
- ret = NVSP_STAT_FAIL;
- goto exit;
+ if (unlikely(!rndis_dev)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "got rndis message but no rndis device!\n");
+ return NVSP_STAT_FAIL;
}
- rndis_dev = (struct rndis_device *)net_dev->extension;
- if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
- netdev_err(ndev, "got rndis message but rndis device "
- "uninitialized...dropping this message!\n");
- ret = NVSP_STAT_FAIL;
- goto exit;
+ if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "got rndis message uninitialized\n");
+ return NVSP_STAT_FAIL;
}
- rndis_msg = *data;
-
- if (netif_msg_rx_err(net_device_ctx))
+ if (netif_msg_rx_status(net_device_ctx))
dump_rndis_message(dev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- /* data msg */
- ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt,
- data, channel);
- break;
-
+ return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
+ channel, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
@@ -454,8 +438,7 @@ int rndis_filter_receive(struct hv_device *dev,
break;
}
-exit:
- return ret;
+ return 0;
}
static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -485,7 +468,35 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
query->info_buflen = 0;
query->dev_vc_handle = 0;
- if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
+ if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
+ struct net_device_context *ndevctx = netdev_priv(dev->ndev);
+ struct netvsc_device *nvdev = ndevctx->nvdev;
+ struct ndis_offload *hwcaps;
+ u32 nvsp_version = nvdev->nvsp_version;
+ u8 ndis_rev;
+ size_t size;
+
+ if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+ size = NDIS_OFFLOAD_SIZE;
+ } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
+ size = NDIS_OFFLOAD_SIZE_6_1;
+ } else {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
+ size = NDIS_OFFLOAD_SIZE_6_0;
+ }
+
+ request->request_msg.msg_len += size;
+ query->info_buflen = size;
+ hwcaps = (struct ndis_offload *)
+ ((unsigned long)query + query->info_buf_offset);
+
+ hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
+ hwcaps->header.revision = ndis_rev;
+ hwcaps->header.size = size;
+
+ } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
struct ndis_recv_scale_cap *cap;
request->request_msg.msg_len +=
@@ -526,6 +537,44 @@ cleanup:
return ret;
}
+/* Get the hardware offload capabilities */
+static int
+rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
+{
+ u32 caps_len = sizeof(*caps);
+ int ret;
+
+ memset(caps, 0, sizeof(*caps));
+
+ ret = rndis_filter_query_device(dev,
+ OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
+ caps, &caps_len);
+ if (ret)
+ return ret;
+
+ if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
+ netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
+ caps->header.type);
+ return -EINVAL;
+ }
+
+ if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
+ netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
+ caps->header.revision);
+ return -EINVAL;
+ }
+
+ if (caps->header.size > caps_len ||
+ caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
+ netdev_warn(dev->ndev,
+ "invalid NDIS objsize %u, data size %u\n",
+ caps->header.size, caps_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rndis_filter_query_device_mac(struct rndis_device *dev)
{
u32 size = ETH_ALEN;
@@ -663,23 +712,15 @@ cleanup:
return ret;
}
-static const u8 netvsc_hash_key[] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
- 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
- 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
- 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
- 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
-};
-#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key)
-
-static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *rss_key, int num_queue)
{
struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_recv_scale_param) +
- 4*ITAB_NUM + HASH_KEYLEN;
+ 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
struct ndis_recv_scale_param *rssp;
u32 *itab;
u8 *keyp;
@@ -707,19 +748,18 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
NDIS_HASH_TCP_IPV6;
rssp->indirect_tabsize = 4*ITAB_NUM;
rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
- rssp->hashkey_size = HASH_KEYLEN;
+ rssp->hashkey_size = NETVSC_HASH_KEYLEN;
rssp->kashkey_offset = rssp->indirect_taboffset +
rssp->indirect_tabsize;
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
- itab[i] = i % num_queue;
+ itab[i] = rdev->ind_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
- for (i = 0; i < HASH_KEYLEN; i++)
- keyp[i] = netvsc_hash_key[i];
+ memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
@@ -727,7 +767,9 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ if (set_complete->status == RNDIS_STATUS_SUCCESS)
+ memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+ else {
netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
set_complete->status);
ret = -EINVAL;
@@ -778,7 +820,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
- u32 status;
int ret;
request = get_rndis_request(dev, RNDIS_MSG_SET,
@@ -805,8 +846,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
- status = set_complete->status;
-
cleanup:
if (request)
put_rndis_request(dev, request);
@@ -864,6 +903,23 @@ cleanup:
return ret;
}
+static bool netvsc_device_idle(const struct netvsc_device *nvdev)
+{
+ int i;
+
+ if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
+ return false;
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+
+ if (atomic_read(&nvchan->queue_sends) > 0)
+ return false;
+ }
+
+ return true;
+}
+
static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
@@ -894,9 +950,7 @@ cleanup:
spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
/* Wait for all send completions */
- wait_event(nvdev->wait_drain,
- atomic_read(&nvdev->num_outstanding_sends) == 0 &&
- atomic_read(&nvdev->num_outstanding_recvs) == 0);
+ wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
if (request)
put_rndis_request(dev, request);
@@ -948,18 +1002,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (chn_index >= nvscdev->num_chn)
return;
- set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
- NETVSC_PACKET_SIZE);
-
- nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX *
- sizeof(struct recv_comp_data));
+ nvscdev->chan_table[chn_index].mrc.buf
+ = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
netvsc_channel_cb, new_sc);
if (ret == 0)
- nvscdev->chn_table[chn_index] = new_sc;
+ nvscdev->chan_table[chn_index].channel = new_sc;
spin_lock_irqsave(&nvscdev->sc_lock, flags);
nvscdev->num_sc_offered--;
@@ -969,24 +1020,25 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
}
int rndis_filter_device_add(struct hv_device *dev,
- void *additional_info)
+ struct netvsc_device_info *device_info)
{
- int ret;
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
- struct netvsc_device_info *device_info = additional_info;
+ struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
struct nvsp_message *init_packet;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+ unsigned int gso_max_size = GSO_MAX_SIZE;
u32 mtu, size;
u32 num_rss_qs;
u32 sc_delta;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
unsigned long flags;
+ int i, ret;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -997,7 +1049,7 @@ int rndis_filter_device_add(struct hv_device *dev,
* NOTE! Once the channel is created, we may get a receive callback
* (RndisFilterOnReceive()) before this call is completed
*/
- ret = netvsc_device_add(dev, additional_info);
+ ret = netvsc_device_add(dev, device_info);
if (ret != 0) {
kfree(rndis_device);
return ret;
@@ -1016,7 +1068,7 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device);
if (ret != 0) {
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, net_device);
return ret;
}
@@ -1031,25 +1083,71 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
if (ret != 0) {
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, net_device);
return ret;
}
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
- /* Turn on the offloads; the host supports all of the relevant
- * offloads.
- */
+ /* Find HW offload capabilities */
+ ret = rndis_query_hwcaps(rndis_device, &hwcaps);
+ if (ret != 0) {
+ rndis_filter_device_remove(dev, net_device);
+ return ret;
+ }
+
+ /* A value of zero means "no change"; now turn on what we want. */
memset(&offloads, 0, sizeof(struct ndis_offload_params));
- /* A value of zero means "no change"; now turn on what we
- * want.
- */
- offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+
+ /* Linux does not care about IP checksum, always does in kernel */
+ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
+
+ /* Compute tx offload settings based on hw capabilities */
+ net->hw_features = NETIF_F_RXCSUM;
+
+ if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
+ /* Can checksum TCP */
+ net->hw_features |= NETIF_F_IP_CSUM;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
+
+ offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+
+ if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
+ offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+ net->hw_features |= NETIF_F_TSO;
+
+ if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
+ gso_max_size = hwcaps.lsov2.ip4_maxsz;
+ }
+
+ if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
+ offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
+ }
+ }
+
+ if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
+ net->hw_features |= NETIF_F_IPV6_CSUM;
+
+ offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
+
+ if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
+ (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
+ offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+ net->hw_features |= NETIF_F_TSO6;
+
+ if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
+ gso_max_size = hwcaps.lsov2.ip6_maxsz;
+ }
+
+ if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
+ offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
+ }
+ }
+
+ netif_set_gso_max_size(net, gso_max_size);
ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
@@ -1094,19 +1192,16 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
num_rss_qs = net_device->num_chn - 1;
+
+ for (i = 0; i < ITAB_NUM; i++)
+ rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
+ net_device->num_chn);
+
net_device->num_sc_offered = num_rss_qs;
if (net_device->num_chn == 1)
goto out;
- net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
- NETVSC_PACKET_SIZE);
- if (!net_device->sub_cb_buf) {
- net_device->num_chn = 1;
- dev_info(&dev->device, "No memory for subchannels.\n");
- goto out;
- }
-
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt;
@@ -1132,7 +1227,8 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
- ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+ ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+ net_device->num_chn);
/*
* Set the number of sub-channels to be received.
@@ -1152,13 +1248,13 @@ out:
return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, net_device);
return ret;
}
-void rndis_filter_device_remove(struct hv_device *dev)
+void rndis_filter_device_remove(struct hv_device *dev,
+ struct netvsc_device *net_dev)
{
- struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
struct rndis_device *rndis_dev = net_dev->extension;
/* If not all subchannel offers are complete, wait for them until
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 46d53a6c8cf8..76ba7ecfe142 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
/* Reset */
if (gpio_is_valid(rstn)) {
udelay(1);
- gpio_set_value(rstn, 0);
+ gpio_set_value_cansleep(rstn, 0);
udelay(1);
- gpio_set_value(rstn, 1);
+ gpio_set_value_cansleep(rstn, 1);
usleep_range(120, 240);
}
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 1253f864737a..ef688518ad77 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
+ uint8_t *buffer;
uint8_t value;
+ buffer = kmalloc(1, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
- 0, reg, &value, 1, 1000);
- return ret >= 0 ? value : ret;
+ 0, reg, buffer, 1, 1000);
+
+ if (ret >= 0) {
+ value = buffer[0];
+ kfree(buffer);
+ return value;
+ } else {
+ kfree(buffer);
+ return ret;
+ }
}
static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@@ -549,13 +562,6 @@ static int
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct atusb *atusb = hw->priv;
- struct device *dev = &atusb->usb_dev->dev;
-
- if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
- dev_info(dev, "Automatic frame retransmission is only available from "
- "firmware version 0.3. Please update if you want this feature.");
- return -EINVAL;
- }
return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
}
@@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[3];
+ unsigned char *buffer;
int ret;
+ buffer = kmalloc(3, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
+ kfree(buffer);
return ret;
}
static int atusb_get_and_show_build(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- char build[ATUSB_BUILD_SIZE + 1];
+ char *build;
int ret;
+ build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
+ if (!build)
+ return -ENOMEM;
+
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
@@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
}
+ kfree(build);
return ret;
}
@@ -698,7 +714,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
+ unsigned char *buffer;
__le64 extended_addr;
u64 addr;
int ret;
@@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
+ buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Firmware is new enough so we fetch the address from EEPROM */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
- if (ret < 0)
- dev_err(&usb_dev->dev, "failed to fetch extended address\n");
+ if (ret < 0) {
+ dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
+ ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
+ kfree(buffer);
+ return ret;
+ }
memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
/* Check if read address is not empty and the unicast bit is set correctly */
@@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
+ kfree(buffer);
return ret;
}
@@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS |
- IEEE802154_HW_FRAME_RETRIES;
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
@@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
atusb_get_and_show_build(atusb);
atusb_set_extended_addr(atusb);
+ if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
+ hw->flags |= IEEE802154_HW_FRAME_RETRIES;
+
ret = atusb_get_and_clear_error(atusb);
if (ret) {
dev_err(&atusb->usb_dev->dev,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 66c0eeafcb5d..312fce7302d3 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -78,10 +78,8 @@ static void ifb_ri_tasklet(unsigned long _txp)
}
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
- u32 from = G_TC_FROM(skb->tc_verd);
-
- skb->tc_verd = 0;
- skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
+ skb->tc_redirected = 0;
+ skb->tc_skip_classify = 1;
u64_stats_update_begin(&txp->tsync);
txp->tx_packets++;
@@ -101,13 +99,12 @@ static void ifb_ri_tasklet(unsigned long _txp)
rcu_read_unlock();
skb->skb_iif = txp->dev->ifindex;
- if (from & AT_EGRESS) {
+ if (!skb->tc_from_ingress) {
dev_queue_xmit(skb);
- } else if (from & AT_INGRESS) {
+ } else {
skb_pull(skb, skb->mac_len);
netif_receive_skb(skb);
- } else
- BUG();
+ }
}
if (__netif_tx_trylock(txq)) {
@@ -129,8 +126,8 @@ resched:
}
-static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void ifb_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct ifb_dev_private *dp = netdev_priv(dev);
struct ifb_q_private *txp = dp->tx_private;
@@ -157,8 +154,6 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
}
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
-
- return stats;
}
static int ifb_dev_init(struct net_device *dev)
@@ -241,7 +236,6 @@ static void ifb_setup(struct net_device *dev)
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ifb_dev_private *dp = netdev_priv(dev);
- u32 from = G_TC_FROM(skb->tc_verd);
struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
u64_stats_update_begin(&txp->rsync);
@@ -249,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
txp->rx_bytes += skb->len;
u64_stats_update_end(&txp->rsync);
- if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
+ if (!skb->tc_redirected || !skb->skb_iif) {
dev_kfree_skb(skb);
dev->stats.rx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile
index df79910192d6..8a2c64dc9641 100644
--- a/drivers/net/ipvlan/Makefile
+++ b/drivers/net/ipvlan/Makefile
@@ -3,5 +3,6 @@
#
obj-$(CONFIG_IPVLAN) += ipvlan.o
+obj-$(CONFIG_IPVTAP) += ipvtap.o
ipvlan-objs := ipvlan_core.o ipvlan_main.o
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index dbfbb33ac66c..800a46c8d26c 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -94,9 +94,11 @@ struct ipvl_port {
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
struct list_head ipvlans;
u16 mode;
+ u16 dev_id_start;
struct work_struct wq;
struct sk_buff_head backlog;
int count;
+ struct ida ida;
};
struct ipvl_skb_cb {
@@ -133,4 +135,11 @@ struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
u16 proto);
unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
+void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
+ unsigned int len, bool success, bool mcast);
+int ipvlan_link_new(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[]);
+void ipvlan_link_delete(struct net_device *dev, struct list_head *head);
+void ipvlan_link_setup(struct net_device *dev);
+int ipvlan_link_register(struct rtnl_link_ops *ops);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 83ce74acf82d..1f3295e274d0 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -16,12 +16,9 @@ void ipvlan_init_secret(void)
net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
}
-static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
+void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
unsigned int len, bool success, bool mcast)
{
- if (!ipvlan)
- return;
-
if (likely(success)) {
struct ipvl_pcpu_stats *pcptr;
@@ -36,6 +33,7 @@ static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
}
}
+EXPORT_SYMBOL_GPL(ipvlan_count_rx);
static u8 ipvlan_get_v6_hash(const void *iaddr)
{
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 8b0f99300cbc..aa8575ccbce3 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -102,8 +102,8 @@ static int ipvlan_port_create(struct net_device *dev)
return -EINVAL;
}
- if (netif_is_macvlan_port(dev)) {
- netdev_err(dev, "Master is a macvlan port.\n");
+ if (netdev_is_rx_handler_busy(dev)) {
+ netdev_err(dev, "Device is already in use.\n");
return -EBUSY;
}
@@ -119,6 +119,8 @@ static int ipvlan_port_create(struct net_device *dev)
skb_queue_head_init(&port->backlog);
INIT_WORK(&port->wq, ipvlan_process_multicast);
+ ida_init(&port->ida);
+ port->dev_id_start = 1;
err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
if (err)
@@ -150,6 +152,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
dev_put(skb->dev);
kfree_skb(skb);
}
+ ida_destroy(&port->ida);
kfree(port);
}
@@ -301,8 +304,8 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
dev_mc_sync(ipvlan->phy_dev, dev);
}
-static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *s)
+static void ipvlan_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -339,7 +342,6 @@ static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
s->rx_dropped = rx_errs;
s->tx_dropped = tx_drps;
}
- return s;
}
static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
@@ -494,8 +496,8 @@ err:
return ret;
}
-static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+int ipvlan_link_new(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port;
@@ -533,6 +535,29 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
+ /* If the port-id base is at the MAX value, then wrap it around and
+ * begin from 0x1 again. This may be due to a busy system where lots
+ * of slaves are getting created and deleted.
+ */
+ if (port->dev_id_start == 0xFFFE)
+ port->dev_id_start = 0x1;
+
+ /* Since L2 address is shared among all IPvlan slaves including
+ * master, use unique 16 bit dev-ids to diffentiate among them.
+ * Assign IDs between 0x1 and 0xFFFE (used by the master) to each
+ * slave link [see addrconf_ifid_eui48()].
+ */
+ err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE,
+ GFP_KERNEL);
+ if (err < 0)
+ err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
+ GFP_KERNEL);
+ if (err < 0)
+ goto destroy_ipvlan_port;
+ dev->dev_id = err;
+ /* Increment id-base to the next slot for the future assignment */
+ port->dev_id_start = err + 1;
+
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
* packets.
@@ -543,7 +568,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
err = register_netdevice(dev);
if (err < 0)
- goto destroy_ipvlan_port;
+ goto remove_ida;
err = netdev_upper_dev_link(phy_dev, dev);
if (err) {
@@ -562,13 +587,16 @@ unlink_netdev:
netdev_upper_dev_unlink(phy_dev, dev);
unregister_netdev:
unregister_netdevice(dev);
+remove_ida:
+ ida_simple_remove(&port->ida, dev->dev_id);
destroy_ipvlan_port:
if (create)
ipvlan_port_destroy(phy_dev);
return err;
}
+EXPORT_SYMBOL_GPL(ipvlan_link_new);
-static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
+void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
@@ -579,12 +607,14 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
kfree_rcu(addr, rcu);
}
+ ida_simple_remove(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
}
+EXPORT_SYMBOL_GPL(ipvlan_link_delete);
-static void ipvlan_link_setup(struct net_device *dev)
+void ipvlan_link_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -595,6 +625,7 @@ static void ipvlan_link_setup(struct net_device *dev)
dev->header_ops = &ipvlan_header_ops;
dev->ethtool_ops = &ipvlan_ethtool_ops;
}
+EXPORT_SYMBOL_GPL(ipvlan_link_setup);
static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
{
@@ -605,22 +636,22 @@ static struct rtnl_link_ops ipvlan_link_ops = {
.kind = "ipvlan",
.priv_size = sizeof(struct ipvl_dev),
- .get_size = ipvlan_nl_getsize,
- .policy = ipvlan_nl_policy,
- .validate = ipvlan_nl_validate,
- .fill_info = ipvlan_nl_fillinfo,
- .changelink = ipvlan_nl_changelink,
- .maxtype = IFLA_IPVLAN_MAX,
-
.setup = ipvlan_link_setup,
.newlink = ipvlan_link_new,
.dellink = ipvlan_link_delete,
};
-static int ipvlan_link_register(struct rtnl_link_ops *ops)
+int ipvlan_link_register(struct rtnl_link_ops *ops)
{
+ ops->get_size = ipvlan_nl_getsize;
+ ops->policy = ipvlan_nl_policy;
+ ops->validate = ipvlan_nl_validate;
+ ops->fill_info = ipvlan_nl_fillinfo;
+ ops->changelink = ipvlan_nl_changelink;
+ ops->maxtype = IFLA_IPVLAN_MAX;
return rtnl_link_register(ops);
}
+EXPORT_SYMBOL_GPL(ipvlan_link_register);
static int ipvlan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
@@ -674,23 +705,22 @@ static int ipvlan_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
- if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
- netif_err(ipvlan, ifup, ipvlan->dev,
- "Failed to add IPv6=%pI6c addr for %s intf\n",
- ip6_addr, ipvlan->dev->name);
- return -EINVAL;
- }
addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
addr->master = ipvlan;
- memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
- addr->atype = IPVL_IPV6;
+ if (is_v6) {
+ memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr));
+ addr->atype = IPVL_IPV6;
+ } else {
+ memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr));
+ addr->atype = IPVL_IPV4;
+ }
list_add_tail(&addr->anode, &ipvlan->addrs);
/* If the interface is not up, the address will be added to the hash
@@ -702,11 +732,11 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
return 0;
}
-static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
- addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
+ addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
if (!addr)
return;
@@ -717,6 +747,23 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
return;
}
+static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
+ netif_err(ipvlan, ifup, ipvlan->dev,
+ "Failed to add IPv6=%pI6c addr for %s intf\n",
+ ip6_addr, ipvlan->dev->name);
+ return -EINVAL;
+ }
+
+ return ipvlan_add_addr(ipvlan, ip6_addr, true);
+}
+
+static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ return ipvlan_del_addr(ipvlan, ip6_addr, true);
+}
+
static int ipvlan_addr6_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -750,45 +797,19 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
- struct ipvl_addr *addr;
-
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
return -EINVAL;
}
- addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- addr->master = ipvlan;
- memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
- addr->atype = IPVL_IPV4;
- list_add_tail(&addr->anode, &ipvlan->addrs);
- /* If the interface is not up, the address will be added to the hash
- * list by ipvlan_open.
- */
- if (netif_running(ipvlan->dev))
- ipvlan_ht_addr_add(ipvlan, addr);
-
- return 0;
+ return ipvlan_add_addr(ipvlan, ip4_addr, false);
}
static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
- struct ipvl_addr *addr;
-
- addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
- if (!addr)
- return;
-
- ipvlan_ht_addr_del(addr);
- list_del(&addr->anode);
- kfree_rcu(addr, rcu);
-
- return;
+ return ipvlan_del_addr(ipvlan, ip4_addr, false);
}
static int ipvlan_addr4_event(struct notifier_block *unused,
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
new file mode 100644
index 000000000000..2b713b63b62c
--- /dev/null
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -0,0 +1,241 @@
+#include <linux/etherdevice.h>
+#include "ipvlan.h"
+#include <linux/if_vlan.h>
+#include <linux/if_tap.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+#include <linux/virtio_net.h>
+
+#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+ NETIF_F_TSO6 | NETIF_F_UFO)
+
+static dev_t ipvtap_major;
+static struct cdev ipvtap_cdev;
+
+static const void *ipvtap_net_namespace(struct device *d)
+{
+ struct net_device *dev = to_net_dev(d->parent);
+ return dev_net(dev);
+}
+
+static struct class ipvtap_class = {
+ .name = "ipvtap",
+ .owner = THIS_MODULE,
+ .ns_type = &net_ns_type_operations,
+ .namespace = ipvtap_net_namespace,
+};
+
+struct ipvtap_dev {
+ struct ipvl_dev vlan;
+ struct tap_dev tap;
+};
+
+static void ipvtap_count_tx_dropped(struct tap_dev *tap)
+{
+ struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
+ struct ipvl_dev *vlan = &vlantap->vlan;
+
+ this_cpu_inc(vlan->pcpu_stats->tx_drps);
+}
+
+static void ipvtap_count_rx_dropped(struct tap_dev *tap)
+{
+ struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
+ struct ipvl_dev *vlan = &vlantap->vlan;
+
+ ipvlan_count_rx(vlan, 0, 0, 0);
+}
+
+static void ipvtap_update_features(struct tap_dev *tap,
+ netdev_features_t features)
+{
+ struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
+ struct ipvl_dev *vlan = &vlantap->vlan;
+
+ vlan->sfeatures = features;
+ netdev_update_features(vlan->dev);
+}
+
+static int ipvtap_newlink(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct ipvtap_dev *vlantap = netdev_priv(dev);
+ int err;
+
+ INIT_LIST_HEAD(&vlantap->tap.queue_list);
+
+ /* Since macvlan supports all offloads by default, make
+ * tap support all offloads also.
+ */
+ vlantap->tap.tap_features = TUN_OFFLOADS;
+ vlantap->tap.count_tx_dropped = ipvtap_count_tx_dropped;
+ vlantap->tap.update_features = ipvtap_update_features;
+ vlantap->tap.count_rx_dropped = ipvtap_count_rx_dropped;
+
+ err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap);
+ if (err)
+ return err;
+
+ /* Don't put anything that may fail after macvlan_common_newlink
+ * because we can't undo what it does.
+ */
+ err = ipvlan_link_new(src_net, dev, tb, data);
+ if (err) {
+ netdev_rx_handler_unregister(dev);
+ return err;
+ }
+
+ vlantap->tap.dev = vlantap->vlan.dev;
+
+ return err;
+}
+
+static void ipvtap_dellink(struct net_device *dev,
+ struct list_head *head)
+{
+ struct ipvtap_dev *vlan = netdev_priv(dev);
+
+ netdev_rx_handler_unregister(dev);
+ tap_del_queues(&vlan->tap);
+ ipvlan_link_delete(dev, head);
+}
+
+static void ipvtap_setup(struct net_device *dev)
+{
+ ipvlan_link_setup(dev);
+ dev->tx_queue_len = TUN_READQ_SIZE;
+ dev->priv_flags &= ~IFF_NO_QUEUE;
+}
+
+static struct rtnl_link_ops ipvtap_link_ops __read_mostly = {
+ .kind = "ipvtap",
+ .setup = ipvtap_setup,
+ .newlink = ipvtap_newlink,
+ .dellink = ipvtap_dellink,
+ .priv_size = sizeof(struct ipvtap_dev),
+};
+
+static int ipvtap_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ipvtap_dev *vlantap;
+ struct device *classdev;
+ dev_t devt;
+ int err;
+ char tap_name[IFNAMSIZ];
+
+ if (dev->rtnl_link_ops != &ipvtap_link_ops)
+ return NOTIFY_DONE;
+
+ snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
+ vlantap = netdev_priv(dev);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ /* Create the device node here after the network device has
+ * been registered but before register_netdevice has
+ * finished running.
+ */
+ err = tap_get_minor(ipvtap_major, &vlantap->tap);
+ if (err)
+ return notifier_from_errno(err);
+
+ devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
+ classdev = device_create(&ipvtap_class, &dev->dev, devt,
+ dev, tap_name);
+ if (IS_ERR(classdev)) {
+ tap_free_minor(ipvtap_major, &vlantap->tap);
+ return notifier_from_errno(PTR_ERR(classdev));
+ }
+ err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
+ tap_name);
+ if (err)
+ return notifier_from_errno(err);
+ break;
+ case NETDEV_UNREGISTER:
+ /* vlan->minor == 0 if NETDEV_REGISTER above failed */
+ if (vlantap->tap.minor == 0)
+ break;
+ sysfs_remove_link(&dev->dev.kobj, tap_name);
+ devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
+ device_destroy(&ipvtap_class, devt);
+ tap_free_minor(ipvtap_major, &vlantap->tap);
+ break;
+ case NETDEV_CHANGE_TX_QUEUE_LEN:
+ if (tap_queue_resize(&vlantap->tap))
+ return NOTIFY_BAD;
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipvtap_notifier_block __read_mostly = {
+ .notifier_call = ipvtap_device_event,
+};
+
+static int ipvtap_init(void)
+{
+ int err;
+
+ err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
+
+ if (err)
+ goto out1;
+
+ err = class_register(&ipvtap_class);
+ if (err)
+ goto out2;
+
+ err = register_netdevice_notifier(&ipvtap_notifier_block);
+ if (err)
+ goto out3;
+
+ err = ipvlan_link_register(&ipvtap_link_ops);
+ if (err)
+ goto out4;
+
+ return 0;
+
+out4:
+ unregister_netdevice_notifier(&ipvtap_notifier_block);
+out3:
+ class_unregister(&ipvtap_class);
+out2:
+ tap_destroy_cdev(ipvtap_major, &ipvtap_cdev);
+out1:
+ return err;
+}
+module_init(ipvtap_init);
+
+static void ipvtap_exit(void)
+{
+ rtnl_link_unregister(&ipvtap_link_ops);
+ unregister_netdevice_notifier(&ipvtap_notifier_block);
+ class_unregister(&ipvtap_class);
+ tap_destroy_cdev(ipvtap_major, &ipvtap_cdev);
+}
+module_exit(ipvtap_exit);
+MODULE_ALIAS_RTNL_LINK("ipvtap");
+MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 44e4f386a5dc..be4ea6aa57a9 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -25,7 +25,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/ioport.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
@@ -169,8 +168,6 @@ struct au1k_private {
u32 speed;
u32 newspeed;
- struct timer_list timer;
-
struct resource *ioarea;
struct au1k_irda_platform_data *platdata;
struct clk *irda_clk;
@@ -178,8 +175,6 @@ struct au1k_private {
static int qos_mtt_bits = 0x07; /* 1 ms or more */
-#define RUN_AT(x) (jiffies + (x))
-
static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
{
if (p->platdata && p->platdata->set_phy_mode)
@@ -620,8 +615,6 @@ static int au1k_irda_start(struct net_device *dev)
/* power up */
au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
- aup->timer.expires = RUN_AT((3 * HZ));
- aup->timer.data = (unsigned long)dev;
return 0;
}
@@ -642,7 +635,6 @@ static int au1k_irda_stop(struct net_device *dev)
}
netif_stop_queue(dev);
- del_timer(&aup->timer);
/* disable the interrupt */
free_irq(aup->irq_tx, dev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index be5bb0b7f29c..3151b580dbd6 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -22,7 +22,7 @@ static int max_rate = 57600;
static int max_rate = 115200;
#endif
-static void turnaround_delay(unsigned long last_jif, int mtt)
+static void turnaround_delay(int mtt)
{
long ticks;
@@ -209,7 +209,6 @@ static void bfin_sir_rx_chars(struct net_device *dev)
UART_CLEAR_LSR(port);
ch = UART_GET_CHAR(port);
async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
- dev->last_rx = jiffies;
}
static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
@@ -510,7 +509,7 @@ static void bfin_sir_send_work(struct work_struct *work)
int tx_cnt = 10;
while (bfin_sir_is_receiving(dev) && --tx_cnt)
- turnaround_delay(dev->last_rx, self->mtt);
+ turnaround_delay(self->mtt);
bfin_sir_stop_rx(port);
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index e3fe9a286136..fede6864c737 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -547,7 +547,6 @@ static void sh_sir_rx(struct sh_sir_self *self)
async_unwrap_char(self->ndev, &self->ndev->stats,
&self->rx_buff, (u8)data);
- self->ndev->last_rx = jiffies;
if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
continue;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1e05b7c2d157..b23b71981fd5 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -97,8 +97,8 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void loopback_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
u64 bytes = 0;
u64 packets = 0;
@@ -122,7 +122,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
stats->tx_packets = packets;
stats->rx_bytes = bytes;
stats->tx_bytes = bytes;
- return stats;
}
static u32 always_on(struct net_device *dev)
@@ -164,6 +163,7 @@ static void loopback_setup(struct net_device *dev)
{
dev->mtu = 64 * 1024;
dev->hard_header_len = ETH_HLEN; /* 14 */
+ dev->min_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index f83cf6696820..ff0a5ed3ca80 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -879,6 +879,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
+ if (!err)
+ macsec_skb_cb(skb)->valid = true;
+
rcu_read_lock_bh();
pn = ntohl(macsec_ethhdr(skb)->packet_number);
if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
@@ -2888,13 +2891,13 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *s)
+static void macsec_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
{
int cpu;
if (!dev->tstats)
- return s;
+ return;
for_each_possible_cpu(cpu) {
struct pcpu_sw_netstats *stats;
@@ -2918,8 +2921,6 @@ static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
-
- return s;
}
static int macsec_get_iflink(const struct net_device *dev)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 20b3fdf282c5..9261722960a7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -855,8 +855,8 @@ static void macvlan_uninit(struct net_device *dev)
macvlan_port_destroy(port->dev);
}
-static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void macvlan_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -893,7 +893,6 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
stats->rx_dropped = rx_errors;
stats->tx_dropped = tx_dropped;
}
- return stats;
}
static int macvlan_vlan_rx_add_vid(struct net_device *dev,
@@ -1111,7 +1110,7 @@ static int macvlan_port_create(struct net_device *dev)
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
return -EINVAL;
- if (netif_is_ipvlan_port(dev))
+ if (netdev_is_rx_handler_busy(dev))
return -EBUSY;
port = kzalloc(sizeof(*port), GFP_KERNEL);
@@ -1526,7 +1525,6 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
int macvlan_link_register(struct rtnl_link_ops *ops)
{
/* common fields */
- ops->priv_size = sizeof(struct macvlan_dev);
ops->validate = macvlan_validate;
ops->maxtype = IFLA_MACVLAN_MAX;
ops->policy = macvlan_policy;
@@ -1549,6 +1547,7 @@ static struct rtnl_link_ops macvlan_link_ops = {
.newlink = macvlan_newlink,
.dellink = macvlan_dellink,
.get_link_net = macvlan_get_link_net,
+ .priv_size = sizeof(struct macvlan_dev),
};
static int macvlan_device_event(struct notifier_block *unused,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5c26653eceb5..a4bfc10b61dd 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1,5 +1,6 @@
#include <linux/etherdevice.h>
#include <linux/if_macvlan.h>
+#include <linux/if_tap.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/nsproxy.h>
@@ -23,114 +24,16 @@
#include <linux/virtio_net.h>
#include <linux/skb_array.h>
-/*
- * A macvtap queue is the central object of this driver, it connects
- * an open character device to a macvlan interface. There can be
- * multiple queues on one interface, which map back to queues
- * implemented in hardware on the underlying device.
- *
- * macvtap_proto is used to allocate queues through the sock allocation
- * mechanism.
- *
- */
-struct macvtap_queue {
- struct sock sk;
- struct socket sock;
- struct socket_wq wq;
- int vnet_hdr_sz;
- struct macvlan_dev __rcu *vlan;
- struct file *file;
- unsigned int flags;
- u16 queue_index;
- bool enabled;
- struct list_head next;
- struct skb_array skb_array;
-};
-
-#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
-
-#define MACVTAP_VNET_LE 0x80000000
-#define MACVTAP_VNET_BE 0x40000000
-
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
-{
- return q->flags & MACVTAP_VNET_BE ? false :
- virtio_legacy_is_little_endian();
-}
-
-static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
-{
- int s = !!(q->flags & MACVTAP_VNET_BE);
-
- if (put_user(s, sp))
- return -EFAULT;
-
- return 0;
-}
-
-static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
-{
- int s;
-
- if (get_user(s, sp))
- return -EFAULT;
-
- if (s)
- q->flags |= MACVTAP_VNET_BE;
- else
- q->flags &= ~MACVTAP_VNET_BE;
-
- return 0;
-}
-#else
-static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
-{
- return virtio_legacy_is_little_endian();
-}
-
-static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-
-static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
-{
- return q->flags & MACVTAP_VNET_LE ||
- macvtap_legacy_is_little_endian(q);
-}
-
-static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
-{
- return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
-}
-
-static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
-{
- return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
-}
-
-static struct proto macvtap_proto = {
- .name = "macvtap",
- .owner = THIS_MODULE,
- .obj_size = sizeof (struct macvtap_queue),
+struct macvtap_dev {
+ struct macvlan_dev vlan;
+ struct tap_dev tap;
};
/*
* Variables for dealing with macvtaps device numbers.
*/
static dev_t macvtap_major;
-#define MACVTAP_NUM_DEVS (1U << MINORBITS)
-static DEFINE_MUTEX(minor_lock);
-static DEFINE_IDR(minor_idr);
-#define GOODCOPY_LEN 128
static const void *macvtap_net_namespace(struct device *d)
{
struct net_device *dev = to_net_dev(d->parent);
@@ -145,328 +48,33 @@ static struct class macvtap_class = {
};
static struct cdev macvtap_cdev;
-static const struct proto_ops macvtap_socket_ops;
-
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6 | NETIF_F_UFO)
-#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
-
-static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
-{
- return rcu_dereference(dev->rx_handler_data);
-}
-
-/*
- * RCU usage:
- * The macvtap_queue and the macvlan_dev are loosely coupled, the
- * pointers from one to the other can only be read while rcu_read_lock
- * or rtnl is held.
- *
- * Both the file and the macvlan_dev hold a reference on the macvtap_queue
- * through sock_hold(&q->sk). When the macvlan_dev goes away first,
- * q->vlan becomes inaccessible. When the files gets closed,
- * macvtap_get_queue() fails.
- *
- * There may still be references to the struct sock inside of the
- * queue from outbound SKBs, but these never reference back to the
- * file or the dev. The data structure is freed through __sk_free
- * when both our references and any pending SKBs are gone.
- */
-
-static int macvtap_enable_queue(struct net_device *dev, struct file *file,
- struct macvtap_queue *q)
-{
- struct macvlan_dev *vlan = netdev_priv(dev);
- int err = -EINVAL;
-
- ASSERT_RTNL();
-
- if (q->enabled)
- goto out;
-
- err = 0;
- rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
- q->queue_index = vlan->numvtaps;
- q->enabled = true;
-
- vlan->numvtaps++;
-out:
- return err;
-}
-
-/* Requires RTNL */
-static int macvtap_set_queue(struct net_device *dev, struct file *file,
- struct macvtap_queue *q)
-{
- struct macvlan_dev *vlan = netdev_priv(dev);
-
- if (vlan->numqueues == MAX_MACVTAP_QUEUES)
- return -EBUSY;
-
- rcu_assign_pointer(q->vlan, vlan);
- rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
- sock_hold(&q->sk);
-
- q->file = file;
- q->queue_index = vlan->numvtaps;
- q->enabled = true;
- file->private_data = q;
- list_add_tail(&q->next, &vlan->queue_list);
-
- vlan->numvtaps++;
- vlan->numqueues++;
-
- return 0;
-}
-
-static int macvtap_disable_queue(struct macvtap_queue *q)
-{
- struct macvlan_dev *vlan;
- struct macvtap_queue *nq;
-
- ASSERT_RTNL();
- if (!q->enabled)
- return -EINVAL;
-
- vlan = rtnl_dereference(q->vlan);
-
- if (vlan) {
- int index = q->queue_index;
- BUG_ON(index >= vlan->numvtaps);
- nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
- nq->queue_index = index;
-
- rcu_assign_pointer(vlan->taps[index], nq);
- RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
- q->enabled = false;
-
- vlan->numvtaps--;
- }
-
- return 0;
-}
-
-/*
- * The file owning the queue got closed, give up both
- * the reference that the files holds as well as the
- * one from the macvlan_dev if that still exists.
- *
- * Using the spinlock makes sure that we don't get
- * to the queue again after destroying it.
- */
-static void macvtap_put_queue(struct macvtap_queue *q)
-{
- struct macvlan_dev *vlan;
-
- rtnl_lock();
- vlan = rtnl_dereference(q->vlan);
-
- if (vlan) {
- if (q->enabled)
- BUG_ON(macvtap_disable_queue(q));
-
- vlan->numqueues--;
- RCU_INIT_POINTER(q->vlan, NULL);
- sock_put(&q->sk);
- list_del_init(&q->next);
- }
-
- rtnl_unlock();
-
- synchronize_rcu();
- sock_put(&q->sk);
-}
-
-/*
- * Select a queue based on the rxq of the device on which this packet
- * arrived. If the incoming device is not mq, calculate a flow hash
- * to select a queue. If all fails, find the first available queue.
- * Cache vlan->numvtaps since it can become zero during the execution
- * of this function.
- */
-static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
- struct sk_buff *skb)
-{
- struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *tap = NULL;
- /* Access to taps array is protected by rcu, but access to numvtaps
- * isn't. Below we use it to lookup a queue, but treat it as a hint
- * and validate that the result isn't NULL - in case we are
- * racing against queue removal.
- */
- int numvtaps = ACCESS_ONCE(vlan->numvtaps);
- __u32 rxq;
-
- if (!numvtaps)
- goto out;
-
- if (numvtaps == 1)
- goto single;
-
- /* Check if we can use flow to select a queue */
- rxq = skb_get_hash(skb);
- if (rxq) {
- tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
- goto out;
- }
-
- if (likely(skb_rx_queue_recorded(skb))) {
- rxq = skb_get_rx_queue(skb);
-
- while (unlikely(rxq >= numvtaps))
- rxq -= numvtaps;
- tap = rcu_dereference(vlan->taps[rxq]);
- goto out;
- }
-
-single:
- tap = rcu_dereference(vlan->taps[0]);
-out:
- return tap;
-}
-
-/*
- * The net_device is going away, give up the reference
- * that it holds on all queues and safely set the pointer
- * from the queues to NULL.
- */
-static void macvtap_del_queues(struct net_device *dev)
+static void macvtap_count_tx_dropped(struct tap_dev *tap)
{
- struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *q, *tmp;
+ struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap);
+ struct macvlan_dev *vlan = &vlantap->vlan;
- ASSERT_RTNL();
- list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
- list_del_init(&q->next);
- RCU_INIT_POINTER(q->vlan, NULL);
- if (q->enabled)
- vlan->numvtaps--;
- vlan->numqueues--;
- sock_put(&q->sk);
- }
- BUG_ON(vlan->numvtaps);
- BUG_ON(vlan->numqueues);
- /* guarantee that any future macvtap_set_queue will fail */
- vlan->numvtaps = MAX_MACVTAP_QUEUES;
+ this_cpu_inc(vlan->pcpu_stats->tx_dropped);
}
-static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
+static void macvtap_count_rx_dropped(struct tap_dev *tap)
{
- struct sk_buff *skb = *pskb;
- struct net_device *dev = skb->dev;
- struct macvlan_dev *vlan;
- struct macvtap_queue *q;
- netdev_features_t features = TAP_FEATURES;
-
- vlan = macvtap_get_vlan_rcu(dev);
- if (!vlan)
- return RX_HANDLER_PASS;
-
- q = macvtap_get_queue(dev, skb);
- if (!q)
- return RX_HANDLER_PASS;
-
- if (__skb_array_full(&q->skb_array))
- goto drop;
-
- skb_push(skb, ETH_HLEN);
-
- /* Apply the forward feature mask so that we perform segmentation
- * according to users wishes. This only works if VNET_HDR is
- * enabled.
- */
- if (q->flags & IFF_VNET_HDR)
- features |= vlan->tap_features;
- if (netif_needs_gso(skb, features)) {
- struct sk_buff *segs = __skb_gso_segment(skb, features, false);
-
- if (IS_ERR(segs))
- goto drop;
+ struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap);
+ struct macvlan_dev *vlan = &vlantap->vlan;
- if (!segs) {
- if (skb_array_produce(&q->skb_array, skb))
- goto drop;
- goto wake_up;
- }
-
- consume_skb(skb);
- while (segs) {
- struct sk_buff *nskb = segs->next;
-
- segs->next = NULL;
- if (skb_array_produce(&q->skb_array, segs)) {
- kfree_skb(segs);
- kfree_skb_list(nskb);
- break;
- }
- segs = nskb;
- }
- } else {
- /* If we receive a partial checksum and the tap side
- * doesn't support checksum offload, compute the checksum.
- * Note: it doesn't matter which checksum feature to
- * check, we either support them all or none.
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL &&
- !(features & NETIF_F_CSUM_MASK) &&
- skb_checksum_help(skb))
- goto drop;
- if (skb_array_produce(&q->skb_array, skb))
- goto drop;
- }
-
-wake_up:
- wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
- return RX_HANDLER_CONSUMED;
-
-drop:
- /* Count errors/drops only here, thus don't care about args. */
macvlan_count_rx(vlan, 0, 0, 0);
- kfree_skb(skb);
- return RX_HANDLER_CONSUMED;
-}
-
-static int macvtap_get_minor(struct macvlan_dev *vlan)
-{
- int retval = -ENOMEM;
-
- mutex_lock(&minor_lock);
- retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
- if (retval >= 0) {
- vlan->minor = retval;
- } else if (retval == -ENOSPC) {
- netdev_err(vlan->dev, "Too many macvtap devices\n");
- retval = -EINVAL;
- }
- mutex_unlock(&minor_lock);
- return retval < 0 ? retval : 0;
-}
-
-static void macvtap_free_minor(struct macvlan_dev *vlan)
-{
- mutex_lock(&minor_lock);
- if (vlan->minor) {
- idr_remove(&minor_idr, vlan->minor);
- vlan->minor = 0;
- }
- mutex_unlock(&minor_lock);
}
-static struct net_device *dev_get_by_macvtap_minor(int minor)
+static void macvtap_update_features(struct tap_dev *tap,
+ netdev_features_t features)
{
- struct net_device *dev = NULL;
- struct macvlan_dev *vlan;
+ struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap);
+ struct macvlan_dev *vlan = &vlantap->vlan;
- mutex_lock(&minor_lock);
- vlan = idr_find(&minor_idr, minor);
- if (vlan) {
- dev = vlan->dev;
- dev_hold(dev);
- }
- mutex_unlock(&minor_lock);
- return dev;
+ vlan->set_features = features;
+ netdev_update_features(vlan->dev);
}
static int macvtap_newlink(struct net *src_net,
@@ -474,17 +82,24 @@ static int macvtap_newlink(struct net *src_net,
struct nlattr *tb[],
struct nlattr *data[])
{
- struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvtap_dev *vlantap = netdev_priv(dev);
int err;
- INIT_LIST_HEAD(&vlan->queue_list);
+ INIT_LIST_HEAD(&vlantap->tap.queue_list);
/* Since macvlan supports all offloads by default, make
* tap support all offloads also.
*/
- vlan->tap_features = TUN_OFFLOADS;
+ vlantap->tap.tap_features = TUN_OFFLOADS;
+
+ /* Register callbacks for rx/tx drops accounting and updating
+ * net_device features
+ */
+ vlantap->tap.count_tx_dropped = macvtap_count_tx_dropped;
+ vlantap->tap.count_rx_dropped = macvtap_count_rx_dropped;
+ vlantap->tap.update_features = macvtap_update_features;
- err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
+ err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap);
if (err)
return err;
@@ -497,14 +112,18 @@ static int macvtap_newlink(struct net *src_net,
return err;
}
+ vlantap->tap.dev = vlantap->vlan.dev;
+
return 0;
}
static void macvtap_dellink(struct net_device *dev,
struct list_head *head)
{
+ struct macvtap_dev *vlantap = netdev_priv(dev);
+
netdev_rx_handler_unregister(dev);
- macvtap_del_queues(dev);
+ tap_del_queues(&vlantap->tap);
macvlan_dellink(dev, head);
}
@@ -519,749 +138,14 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
.setup = macvtap_setup,
.newlink = macvtap_newlink,
.dellink = macvtap_dellink,
+ .priv_size = sizeof(struct macvtap_dev),
};
-
-static void macvtap_sock_write_space(struct sock *sk)
-{
- wait_queue_head_t *wqueue;
-
- if (!sock_writeable(sk) ||
- !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
- return;
-
- wqueue = sk_sleep(sk);
- if (wqueue && waitqueue_active(wqueue))
- wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
-}
-
-static void macvtap_sock_destruct(struct sock *sk)
-{
- struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
-
- skb_array_cleanup(&q->skb_array);
-}
-
-static int macvtap_open(struct inode *inode, struct file *file)
-{
- struct net *net = current->nsproxy->net_ns;
- struct net_device *dev;
- struct macvtap_queue *q;
- int err = -ENODEV;
-
- rtnl_lock();
- dev = dev_get_by_macvtap_minor(iminor(inode));
- if (!dev)
- goto err;
-
- err = -ENOMEM;
- q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
- &macvtap_proto, 0);
- if (!q)
- goto err;
-
- RCU_INIT_POINTER(q->sock.wq, &q->wq);
- init_waitqueue_head(&q->wq.wait);
- q->sock.type = SOCK_RAW;
- q->sock.state = SS_CONNECTED;
- q->sock.file = file;
- q->sock.ops = &macvtap_socket_ops;
- sock_init_data(&q->sock, &q->sk);
- q->sk.sk_write_space = macvtap_sock_write_space;
- q->sk.sk_destruct = macvtap_sock_destruct;
- q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
- q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
-
- /*
- * so far only KVM virtio_net uses macvtap, enable zero copy between
- * guest kernel and host kernel when lower device supports zerocopy
- *
- * The macvlan supports zerocopy iff the lower device supports zero
- * copy so we don't have to look at the lower device directly.
- */
- if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
- sock_set_flag(&q->sk, SOCK_ZEROCOPY);
-
- err = -ENOMEM;
- if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
- goto err_array;
-
- err = macvtap_set_queue(dev, file, q);
- if (err)
- goto err_queue;
-
- dev_put(dev);
-
- rtnl_unlock();
- return err;
-
-err_queue:
- skb_array_cleanup(&q->skb_array);
-err_array:
- sock_put(&q->sk);
-err:
- if (dev)
- dev_put(dev);
-
- rtnl_unlock();
- return err;
-}
-
-static int macvtap_release(struct inode *inode, struct file *file)
-{
- struct macvtap_queue *q = file->private_data;
- macvtap_put_queue(q);
- return 0;
-}
-
-static unsigned int macvtap_poll(struct file *file, poll_table * wait)
-{
- struct macvtap_queue *q = file->private_data;
- unsigned int mask = POLLERR;
-
- if (!q)
- goto out;
-
- mask = 0;
- poll_wait(file, &q->wq.wait, wait);
-
- if (!skb_array_empty(&q->skb_array))
- mask |= POLLIN | POLLRDNORM;
-
- if (sock_writeable(&q->sk) ||
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
- sock_writeable(&q->sk)))
- mask |= POLLOUT | POLLWRNORM;
-
-out:
- return mask;
-}
-
-static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
- size_t len, size_t linear,
- int noblock, int *err)
-{
- struct sk_buff *skb;
-
- /* Under a page? Don't bother with paged skb. */
- if (prepad + len < PAGE_SIZE || !linear)
- linear = len;
-
- skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err, 0);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, prepad);
- skb_put(skb, linear);
- skb->data_len = len - linear;
- skb->len += len - linear;
-
- return skb;
-}
-
-/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
-#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
-
-/* Get packet from user space buffer */
-static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
- struct iov_iter *from, int noblock)
-{
- int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
- struct sk_buff *skb;
- struct macvlan_dev *vlan;
- unsigned long total_len = iov_iter_count(from);
- unsigned long len = total_len;
- int err;
- struct virtio_net_hdr vnet_hdr = { 0 };
- int vnet_hdr_len = 0;
- int copylen = 0;
- int depth;
- bool zerocopy = false;
- size_t linear;
-
- if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = q->vnet_hdr_sz;
-
- err = -EINVAL;
- if (len < vnet_hdr_len)
- goto err;
- len -= vnet_hdr_len;
-
- err = -EFAULT;
- if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
- goto err;
- iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
- if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- macvtap16_to_cpu(q, vnet_hdr.csum_start) +
- macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
- macvtap16_to_cpu(q, vnet_hdr.hdr_len))
- vnet_hdr.hdr_len = cpu_to_macvtap16(q,
- macvtap16_to_cpu(q, vnet_hdr.csum_start) +
- macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
- err = -EINVAL;
- if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len)
- goto err;
- }
-
- err = -EINVAL;
- if (unlikely(len < ETH_HLEN))
- goto err;
-
- if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
- struct iov_iter i;
-
- copylen = vnet_hdr.hdr_len ?
- macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
- if (copylen > good_linear)
- copylen = good_linear;
- else if (copylen < ETH_HLEN)
- copylen = ETH_HLEN;
- linear = copylen;
- i = *from;
- iov_iter_advance(&i, copylen);
- if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
- zerocopy = true;
- }
-
- if (!zerocopy) {
- copylen = len;
- linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
- if (linear > good_linear)
- linear = good_linear;
- else if (linear < ETH_HLEN)
- linear = ETH_HLEN;
- }
-
- skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
- linear, noblock, &err);
- if (!skb)
- goto err;
-
- if (zerocopy)
- err = zerocopy_sg_from_iter(skb, from);
- else
- err = skb_copy_datagram_from_iter(skb, 0, from, len);
-
- if (err)
- goto err_kfree;
-
- skb_set_network_header(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- skb->protocol = eth_hdr(skb)->h_proto;
-
- if (vnet_hdr_len) {
- err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
- macvtap_is_little_endian(q));
- if (err)
- goto err_kfree;
- }
-
- skb_probe_transport_header(skb, ETH_HLEN);
-
- /* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
- skb_set_network_header(skb, depth);
-
- rcu_read_lock();
- vlan = rcu_dereference(q->vlan);
- /* copy skb_ubuf_info for callback when skb has no error */
- if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = m->msg_control;
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
- } else if (m && m->msg_control) {
- struct ubuf_info *uarg = m->msg_control;
- uarg->callback(uarg, false);
- }
-
- if (vlan) {
- skb->dev = vlan->dev;
- dev_queue_xmit(skb);
- } else {
- kfree_skb(skb);
- }
- rcu_read_unlock();
-
- return total_len;
-
-err_kfree:
- kfree_skb(skb);
-
-err:
- rcu_read_lock();
- vlan = rcu_dereference(q->vlan);
- if (vlan)
- this_cpu_inc(vlan->pcpu_stats->tx_dropped);
- rcu_read_unlock();
-
- return err;
-}
-
-static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
- struct macvtap_queue *q = file->private_data;
-
- return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
-}
-
-/* Put packet to the user space buffer */
-static ssize_t macvtap_put_user(struct macvtap_queue *q,
- const struct sk_buff *skb,
- struct iov_iter *iter)
-{
- int ret;
- int vnet_hdr_len = 0;
- int vlan_offset = 0;
- int total;
-
- if (q->flags & IFF_VNET_HDR) {
- struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = q->vnet_hdr_sz;
- if (iov_iter_count(iter) < vnet_hdr_len)
- return -EINVAL;
-
- if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
- macvtap_is_little_endian(q)))
- BUG();
-
- if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
- sizeof(vnet_hdr))
- return -EFAULT;
-
- iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
- }
- total = vnet_hdr_len;
- total += skb->len;
-
- if (skb_vlan_tag_present(skb)) {
- struct {
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- } veth;
- veth.h_vlan_proto = skb->vlan_proto;
- veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
-
- vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
- total += VLAN_HLEN;
-
- ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
- if (ret || !iov_iter_count(iter))
- goto done;
-
- ret = copy_to_iter(&veth, sizeof(veth), iter);
- if (ret != sizeof(veth) || !iov_iter_count(iter))
- goto done;
- }
-
- ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
- skb->len - vlan_offset);
-
-done:
- return ret ? ret : total;
-}
-
-static ssize_t macvtap_do_read(struct macvtap_queue *q,
- struct iov_iter *to,
- int noblock)
-{
- DEFINE_WAIT(wait);
- struct sk_buff *skb;
- ssize_t ret = 0;
-
- if (!iov_iter_count(to))
- return 0;
-
- while (1) {
- if (!noblock)
- prepare_to_wait(sk_sleep(&q->sk), &wait,
- TASK_INTERRUPTIBLE);
-
- /* Read frames from the queue */
- skb = skb_array_consume(&q->skb_array);
- if (skb)
- break;
- if (noblock) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- /* Nothing to read, let's sleep */
- schedule();
- }
- if (!noblock)
- finish_wait(sk_sleep(&q->sk), &wait);
-
- if (skb) {
- ret = macvtap_put_user(q, skb, to);
- if (unlikely(ret < 0))
- kfree_skb(skb);
- else
- consume_skb(skb);
- }
- return ret;
-}
-
-static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
- struct file *file = iocb->ki_filp;
- struct macvtap_queue *q = file->private_data;
- ssize_t len = iov_iter_count(to), ret;
-
- ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
- ret = min_t(ssize_t, ret, len);
- if (ret > 0)
- iocb->ki_pos = ret;
- return ret;
-}
-
-static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
-{
- struct macvlan_dev *vlan;
-
- ASSERT_RTNL();
- vlan = rtnl_dereference(q->vlan);
- if (vlan)
- dev_hold(vlan->dev);
-
- return vlan;
-}
-
-static void macvtap_put_vlan(struct macvlan_dev *vlan)
-{
- dev_put(vlan->dev);
-}
-
-static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
-{
- struct macvtap_queue *q = file->private_data;
- struct macvlan_dev *vlan;
- int ret;
-
- vlan = macvtap_get_vlan(q);
- if (!vlan)
- return -EINVAL;
-
- if (flags & IFF_ATTACH_QUEUE)
- ret = macvtap_enable_queue(vlan->dev, file, q);
- else if (flags & IFF_DETACH_QUEUE)
- ret = macvtap_disable_queue(q);
- else
- ret = -EINVAL;
-
- macvtap_put_vlan(vlan);
- return ret;
-}
-
-static int set_offload(struct macvtap_queue *q, unsigned long arg)
-{
- struct macvlan_dev *vlan;
- netdev_features_t features;
- netdev_features_t feature_mask = 0;
-
- vlan = rtnl_dereference(q->vlan);
- if (!vlan)
- return -ENOLINK;
-
- features = vlan->dev->features;
-
- if (arg & TUN_F_CSUM) {
- feature_mask = NETIF_F_HW_CSUM;
-
- if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
- if (arg & TUN_F_TSO_ECN)
- feature_mask |= NETIF_F_TSO_ECN;
- if (arg & TUN_F_TSO4)
- feature_mask |= NETIF_F_TSO;
- if (arg & TUN_F_TSO6)
- feature_mask |= NETIF_F_TSO6;
- }
-
- if (arg & TUN_F_UFO)
- feature_mask |= NETIF_F_UFO;
- }
-
- /* tun/tap driver inverts the usage for TSO offloads, where
- * setting the TSO bit means that the userspace wants to
- * accept TSO frames and turning it off means that user space
- * does not support TSO.
- * For macvtap, we have to invert it to mean the same thing.
- * When user space turns off TSO, we turn off GSO/LRO so that
- * user-space will not receive TSO frames.
- */
- if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
- features |= RX_OFFLOADS;
- else
- features &= ~RX_OFFLOADS;
-
- /* tap_features are the same as features on tun/tap and
- * reflect user expectations.
- */
- vlan->tap_features = feature_mask;
- vlan->set_features = features;
- netdev_update_features(vlan->dev);
-
- return 0;
-}
-
-/*
- * provide compatibility with generic tun/tap interface
- */
-static long macvtap_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct macvtap_queue *q = file->private_data;
- struct macvlan_dev *vlan;
- void __user *argp = (void __user *)arg;
- struct ifreq __user *ifr = argp;
- unsigned int __user *up = argp;
- unsigned short u;
- int __user *sp = argp;
- struct sockaddr sa;
- int s;
- int ret;
-
- switch (cmd) {
- case TUNSETIFF:
- /* ignore the name, just look at flags */
- if (get_user(u, &ifr->ifr_flags))
- return -EFAULT;
-
- ret = 0;
- if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
- ret = -EINVAL;
- else
- q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
-
- return ret;
-
- case TUNGETIFF:
- rtnl_lock();
- vlan = macvtap_get_vlan(q);
- if (!vlan) {
- rtnl_unlock();
- return -ENOLINK;
- }
-
- ret = 0;
- u = q->flags;
- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
- put_user(u, &ifr->ifr_flags))
- ret = -EFAULT;
- macvtap_put_vlan(vlan);
- rtnl_unlock();
- return ret;
-
- case TUNSETQUEUE:
- if (get_user(u, &ifr->ifr_flags))
- return -EFAULT;
- rtnl_lock();
- ret = macvtap_ioctl_set_queue(file, u);
- rtnl_unlock();
- return ret;
-
- case TUNGETFEATURES:
- if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up))
- return -EFAULT;
- return 0;
-
- case TUNSETSNDBUF:
- if (get_user(s, sp))
- return -EFAULT;
-
- q->sk.sk_sndbuf = s;
- return 0;
-
- case TUNGETVNETHDRSZ:
- s = q->vnet_hdr_sz;
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETHDRSZ:
- if (get_user(s, sp))
- return -EFAULT;
- if (s < (int)sizeof(struct virtio_net_hdr))
- return -EINVAL;
-
- q->vnet_hdr_sz = s;
- return 0;
-
- case TUNGETVNETLE:
- s = !!(q->flags & MACVTAP_VNET_LE);
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETLE:
- if (get_user(s, sp))
- return -EFAULT;
- if (s)
- q->flags |= MACVTAP_VNET_LE;
- else
- q->flags &= ~MACVTAP_VNET_LE;
- return 0;
-
- case TUNGETVNETBE:
- return macvtap_get_vnet_be(q, sp);
-
- case TUNSETVNETBE:
- return macvtap_set_vnet_be(q, sp);
-
- case TUNSETOFFLOAD:
- /* let the user check for future flags */
- if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN | TUN_F_UFO))
- return -EINVAL;
-
- rtnl_lock();
- ret = set_offload(q, arg);
- rtnl_unlock();
- return ret;
-
- case SIOCGIFHWADDR:
- rtnl_lock();
- vlan = macvtap_get_vlan(q);
- if (!vlan) {
- rtnl_unlock();
- return -ENOLINK;
- }
- ret = 0;
- u = vlan->dev->type;
- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
- copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
- put_user(u, &ifr->ifr_hwaddr.sa_family))
- ret = -EFAULT;
- macvtap_put_vlan(vlan);
- rtnl_unlock();
- return ret;
-
- case SIOCSIFHWADDR:
- if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
- return -EFAULT;
- rtnl_lock();
- vlan = macvtap_get_vlan(q);
- if (!vlan) {
- rtnl_unlock();
- return -ENOLINK;
- }
- ret = dev_set_mac_address(vlan->dev, &sa);
- macvtap_put_vlan(vlan);
- rtnl_unlock();
- return ret;
-
- default:
- return -EINVAL;
- }
-}
-
-#ifdef CONFIG_COMPAT
-static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct file_operations macvtap_fops = {
- .owner = THIS_MODULE,
- .open = macvtap_open,
- .release = macvtap_release,
- .read_iter = macvtap_read_iter,
- .write_iter = macvtap_write_iter,
- .poll = macvtap_poll,
- .llseek = no_llseek,
- .unlocked_ioctl = macvtap_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = macvtap_compat_ioctl,
-#endif
-};
-
-static int macvtap_sendmsg(struct socket *sock, struct msghdr *m,
- size_t total_len)
-{
- struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
- return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
-}
-
-static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
- size_t total_len, int flags)
-{
- struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
- int ret;
- if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
- return -EINVAL;
- ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
- if (ret > total_len) {
- m->msg_flags |= MSG_TRUNC;
- ret = flags & MSG_TRUNC ? ret : total_len;
- }
- return ret;
-}
-
-static int macvtap_peek_len(struct socket *sock)
-{
- struct macvtap_queue *q = container_of(sock, struct macvtap_queue,
- sock);
- return skb_array_peek_len(&q->skb_array);
-}
-
-/* Ops structure to mimic raw sockets with tun */
-static const struct proto_ops macvtap_socket_ops = {
- .sendmsg = macvtap_sendmsg,
- .recvmsg = macvtap_recvmsg,
- .peek_len = macvtap_peek_len,
-};
-
-/* Get an underlying socket object from tun file. Returns error unless file is
- * attached to a device. The returned object works like a packet socket, it
- * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
- * holding a reference to the file for as long as the socket is in use. */
-struct socket *macvtap_get_socket(struct file *file)
-{
- struct macvtap_queue *q;
- if (file->f_op != &macvtap_fops)
- return ERR_PTR(-EINVAL);
- q = file->private_data;
- if (!q)
- return ERR_PTR(-EBADFD);
- return &q->sock;
-}
-EXPORT_SYMBOL_GPL(macvtap_get_socket);
-
-static int macvtap_queue_resize(struct macvlan_dev *vlan)
-{
- struct net_device *dev = vlan->dev;
- struct macvtap_queue *q;
- struct skb_array **arrays;
- int n = vlan->numqueues;
- int ret, i = 0;
-
- arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
- if (!arrays)
- return -ENOMEM;
-
- list_for_each_entry(q, &vlan->queue_list, next)
- arrays[i++] = &q->skb_array;
-
- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
-
- kfree(arrays);
- return ret;
-}
-
static int macvtap_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct macvlan_dev *vlan;
+ struct macvtap_dev *vlantap;
struct device *classdev;
dev_t devt;
int err;
@@ -1271,7 +155,7 @@ static int macvtap_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
- vlan = netdev_priv(dev);
+ vlantap = netdev_priv(dev);
switch (event) {
case NETDEV_REGISTER:
@@ -1279,15 +163,15 @@ static int macvtap_device_event(struct notifier_block *unused,
* been registered but before register_netdevice has
* finished running.
*/
- err = macvtap_get_minor(vlan);
+ err = tap_get_minor(macvtap_major, &vlantap->tap);
if (err)
return notifier_from_errno(err);
- devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor);
classdev = device_create(&macvtap_class, &dev->dev, devt,
dev, tap_name);
if (IS_ERR(classdev)) {
- macvtap_free_minor(vlan);
+ tap_free_minor(macvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
}
err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
@@ -1297,15 +181,15 @@ static int macvtap_device_event(struct notifier_block *unused,
break;
case NETDEV_UNREGISTER:
/* vlan->minor == 0 if NETDEV_REGISTER above failed */
- if (vlan->minor == 0)
+ if (vlantap->tap.minor == 0)
break;
sysfs_remove_link(&dev->dev.kobj, tap_name);
- devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor);
device_destroy(&macvtap_class, devt);
- macvtap_free_minor(vlan);
+ tap_free_minor(macvtap_major, &vlantap->tap);
break;
case NETDEV_CHANGE_TX_QUEUE_LEN:
- if (macvtap_queue_resize(vlan))
+ if (tap_queue_resize(&vlantap->tap))
return NOTIFY_BAD;
break;
}
@@ -1321,38 +205,31 @@ static int macvtap_init(void)
{
int err;
- err = alloc_chrdev_region(&macvtap_major, 0,
- MACVTAP_NUM_DEVS, "macvtap");
- if (err)
- goto out1;
+ err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
- cdev_init(&macvtap_cdev, &macvtap_fops);
- err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
if (err)
- goto out2;
+ goto out1;
err = class_register(&macvtap_class);
if (err)
- goto out3;
+ goto out2;
err = register_netdevice_notifier(&macvtap_notifier_block);
if (err)
- goto out4;
+ goto out3;
err = macvlan_link_register(&macvtap_link_ops);
if (err)
- goto out5;
+ goto out4;
return 0;
-out5:
- unregister_netdevice_notifier(&macvtap_notifier_block);
out4:
- class_unregister(&macvtap_class);
+ unregister_netdevice_notifier(&macvtap_notifier_block);
out3:
- cdev_del(&macvtap_cdev);
+ class_unregister(&macvtap_class);
out2:
- unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+ tap_destroy_cdev(macvtap_major, &macvtap_cdev);
out1:
return err;
}
@@ -1363,9 +240,7 @@ static void macvtap_exit(void)
rtnl_link_unregister(&macvtap_link_ops);
unregister_netdevice_notifier(&macvtap_notifier_block);
class_unregister(&macvtap_class);
- cdev_del(&macvtap_cdev);
- unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
- idr_destroy(&minor_idr);
+ tap_destroy_cdev(macvtap_major, &macvtap_cdev);
}
module_exit(macvtap_exit);
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 3e027ed0b3bb..077364cbf439 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -342,6 +342,184 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
/**
+ * mdio45_ethtool_ksettings_get_npage - get settings for ETHTOOL_GLINKSETTINGS
+ * @mdio: MDIO interface
+ * @cmd: Ethtool request structure
+ * @npage_adv: Modes currently advertised on next pages
+ * @npage_lpa: Modes advertised by link partner on next pages
+ *
+ * The @cmd parameter is expected to have been cleared before calling
+ * mdio45_ethtool_ksettings_get_npage().
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. The
+ * caller must pass them in.
+ */
+void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
+ struct ethtool_link_ksettings *cmd,
+ u32 npage_adv, u32 npage_lpa)
+{
+ int reg;
+ u32 speed, supported = 0, advertising = 0, lp_advertising = 0;
+
+ BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22);
+ BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45);
+
+ cmd->base.phy_address = mdio->prtad;
+ cmd->base.mdio_support =
+ mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22);
+
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_CTRL2);
+ switch (reg & MDIO_PMA_CTRL2_TYPE) {
+ case MDIO_PMA_CTRL2_10GBT:
+ case MDIO_PMA_CTRL2_1000BT:
+ case MDIO_PMA_CTRL2_100BTX:
+ case MDIO_PMA_CTRL2_10BT:
+ cmd->base.port = PORT_TP;
+ supported = SUPPORTED_TP;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_SPEED);
+ if (reg & MDIO_SPEED_10G)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (reg & MDIO_PMA_SPEED_1000)
+ supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half);
+ if (reg & MDIO_PMA_SPEED_100)
+ supported |= (SUPPORTED_100baseT_Full |
+ SUPPORTED_100baseT_Half);
+ if (reg & MDIO_PMA_SPEED_10)
+ supported |= (SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half);
+ advertising = ADVERTISED_TP;
+ break;
+
+ case MDIO_PMA_CTRL2_10GBCX4:
+ cmd->base.port = PORT_OTHER;
+ supported = 0;
+ advertising = 0;
+ break;
+
+ case MDIO_PMA_CTRL2_10GBKX4:
+ case MDIO_PMA_CTRL2_10GBKR:
+ case MDIO_PMA_CTRL2_1000BKX:
+ cmd->base.port = PORT_OTHER;
+ supported = SUPPORTED_Backplane;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_PMA_EXTABLE);
+ if (reg & MDIO_PMA_EXTABLE_10GBKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (reg & MDIO_PMA_EXTABLE_10GBKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+ if (reg & MDIO_PMA_EXTABLE_1000BKX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ if (reg & MDIO_PMA_10GBR_FECABLE_ABLE)
+ supported |= SUPPORTED_10000baseR_FEC;
+ advertising = ADVERTISED_Backplane;
+ break;
+
+ /* All the other defined modes are flavours of optical */
+ default:
+ cmd->base.port = PORT_FIBRE;
+ supported = SUPPORTED_FIBRE;
+ advertising = ADVERTISED_FIBRE;
+ break;
+ }
+
+ if (mdio->mmds & MDIO_DEVS_AN) {
+ supported |= SUPPORTED_Autoneg;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
+ MDIO_CTRL1);
+ if (reg & MDIO_AN_CTRL1_ENABLE) {
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ advertising |=
+ ADVERTISED_Autoneg |
+ mdio45_get_an(mdio, MDIO_AN_ADVERTISE) |
+ npage_adv;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (cmd->base.autoneg) {
+ u32 modes = 0;
+ int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad,
+ MDIO_MMD_AN, MDIO_STAT1);
+
+ /* If AN is complete and successful, report best common
+ * mode, otherwise report best advertised mode.
+ */
+ if (an_stat & MDIO_AN_STAT1_COMPLETE) {
+ lp_advertising =
+ mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa;
+ if (an_stat & MDIO_AN_STAT1_LPABLE)
+ lp_advertising |= ADVERTISED_Autoneg;
+ modes = advertising & lp_advertising;
+ }
+ if ((modes & ~ADVERTISED_Autoneg) == 0)
+ modes = advertising;
+
+ if (modes & (ADVERTISED_10000baseT_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_10000baseKR_Full)) {
+ speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else if (modes & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseKX_Full)) {
+ speed = SPEED_1000;
+ cmd->base.duplex = !(modes & ADVERTISED_1000baseT_Half);
+ } else if (modes & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
+ speed = SPEED_100;
+ cmd->base.duplex = !!(modes & ADVERTISED_100baseT_Full);
+ } else {
+ speed = SPEED_10;
+ cmd->base.duplex = !!(modes & ADVERTISED_10baseT_Full);
+ }
+ } else {
+ /* Report forced settings */
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1);
+ speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1)
+ * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10));
+ cmd->base.duplex = (reg & MDIO_CTRL1_FULLDPLX ||
+ speed == SPEED_10000);
+ }
+
+ cmd->base.speed = speed;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
+
+ /* 10GBASE-T MDI/MDI-X */
+ if (cmd->base.port == PORT_TP && (cmd->base.speed == SPEED_10000)) {
+ switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBT_SWAPPOL)) {
+ case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
+ break;
+ case 0:
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
+ break;
+ default:
+ /* It's complicated... */
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(mdio45_ethtool_ksettings_get_npage);
+
+/**
* mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs
* @mdio: MDIO interface
* @mii_data: MII ioctl data structure
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 2de7faee9b19..b91603835d26 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -58,7 +58,7 @@ static int nlmon_close(struct net_device *dev)
return netlink_remove_tap(&nlmon->nt);
}
-static struct rtnl_link_stats64 *
+static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
int i;
@@ -86,8 +86,6 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_bytes = bytes;
stats->tx_bytes = 0;
-
- return stats;
}
static u32 always_on(struct net_device *dev)
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 356859ac7c18..407b0b601ea8 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,6 +1,7 @@
# Makefile for Linux PHY drivers and MDIO bus drivers
-libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o
+libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o \
+ mdio-boardinfo.o
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e741bf614c4e..b0492ef2cdaa 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
MODULE_LICENSE("GPL");
+static int bcm63xx_config_intr(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, MII_BCM63XX_IR);
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg &= ~MII_BCM63XX_IR_GMASK;
+ else
+ reg |= MII_BCM63XX_IR_GMASK;
+
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ return err;
+}
+
static int bcm63xx_config_init(struct phy_device *phydev)
{
int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
- .config_intr = bcm_phy_config_intr,
+ .config_intr = bcm63xx_config_intr,
}, {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
- .config_intr = bcm_phy_config_intr,
+ .config_intr = bcm63xx_config_intr,
} };
module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 264b085d796b..d1c2614dad3a 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -167,6 +167,31 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev)
+{
+ /* +1 RC_CAL codes for RL centering for both LT and HT conditions */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0xd003);
+
+ /* Cut master bias current by 2% to compensate for RC_CAL offset */
+ bcm_phy_write_misc(phydev, DSP_TAP10, 0x791b);
+
+ /* Improve hybrid leakage */
+ bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x10e3);
+
+ /* Change rx_on_tune 8 to 0xf */
+ bcm_phy_write_misc(phydev, 0x21, 0x2, 0x87f6);
+
+ /* Change 100Tx EEE bandwidth */
+ bcm_phy_write_misc(phydev, 0x22, 0x2, 0x017d);
+
+ /* Enable ffe zero detection for Vitesse interoperability */
+ bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015);
+
+ r_rc_cal_reset(phydev);
+
+ return 0;
+}
+
static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
{
u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
@@ -174,6 +199,12 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
u8 count;
int ret = 0;
+ /* Newer devices have moved the revision information back into a
+ * standard location in MII_PHYS_ID[23]
+ */
+ if (rev == 0)
+ rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+
pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
phydev_name(phydev), phydev->drv->name, rev, patch);
@@ -197,6 +228,9 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
case 0x10:
ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev);
break;
+ case 0x01:
+ ret = bcm7xxx_28nm_a0_patch_afe_config_init(phydev);
+ break;
default:
break;
}
@@ -416,8 +450,10 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+ BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
+ BCM7XXX_28NM_GPHY(PHY_ID_BCM74371, "Broadcom BCM74371"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
@@ -430,12 +466,14 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
+ { PHY_ID_BCM7278, 0xfffffff0, },
{ PHY_ID_BCM7364, 0xfffffff0, },
{ PHY_ID_BCM7366, 0xfffffff0, },
{ PHY_ID_BCM7346, 0xfffffff0, },
{ PHY_ID_BCM7362, 0xfffffff0, },
{ PHY_ID_BCM7425, 0xfffffff0, },
{ PHY_ID_BCM7429, 0xfffffff0, },
+ { PHY_ID_BCM74371, 0xfffffff0, },
{ PHY_ID_BCM7439, 0xfffffff0, },
{ PHY_ID_BCM7435, 0xfffffff0, },
{ PHY_ID_BCM7445, 0xfffffff0, },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 4223e35490b0..9cd8b27d1292 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -30,6 +30,50 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+static int bcm54210e_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+ bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
+
+ val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
+ val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+
+ return 0;
+}
+
+static int bcm54612e_config_init(struct phy_device *phydev)
+{
+ /* Clear TX internal delay unless requested. */
+ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
+ (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
+ /* Disable TXD to GTXCLK clock delay (default set) */
+ /* Bit 9 is the only field in shadow register 00011 */
+ bcm_phy_write_shadow(phydev, 0x03, 0);
+ }
+
+ /* Clear RX internal delay unless requested. */
+ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
+ (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
+ u16 reg;
+
+ reg = bcm54xx_auxctl_read(phydev,
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ /* Disable RXD to RXC delay (default set) */
+ reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ /* Clear shadow selector field */
+ reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
+ bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ MII_BCM54XX_AUXCTL_MISC_WREN | reg);
+ }
+
+ return 0;
+}
+
static int bcm54810_config(struct phy_device *phydev)
{
int rc, val;
@@ -230,7 +274,15 @@ static int bcm54xx_config_init(struct phy_device *phydev)
(phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
bcm54xx_adjust_rxrefclk(phydev);
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
+ if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
+ err = bcm54210e_config_init(phydev);
+ if (err)
+ return err;
+ } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54612E) {
+ err = bcm54612e_config_init(phydev);
+ if (err)
+ return err;
+ } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
err = bcm54810_config(phydev);
if (err)
return err;
@@ -375,41 +427,6 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
-static int bcm54612e_config_aneg(struct phy_device *phydev)
-{
- int ret;
-
- /* First, auto-negotiate. */
- ret = genphy_config_aneg(phydev);
-
- /* Clear TX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
- /* Disable TXD to GTXCLK clock delay (default set) */
- /* Bit 9 is the only field in shadow register 00011 */
- bcm_phy_write_shadow(phydev, 0x03, 0);
- }
-
- /* Clear RX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
- u16 reg;
-
- /* Errata: reads require filling in the write selector field */
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
- MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC);
- reg = phy_read(phydev, MII_BCM54XX_AUX_CTL);
- /* Disable RXD to RXC delay (default set) */
- reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW;
- /* Clear shadow selector field */
- reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
- MII_BCM54XX_AUXCTL_MISC_WREN | reg);
- }
-
- return ret;
-}
-
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@@ -544,6 +561,17 @@ static struct phy_driver broadcom_drivers[] = {
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
+ .phy_id = PHY_ID_BCM54210E,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54210E",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
@@ -561,7 +589,7 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = bcm54612e_config_aneg,
+ .config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -682,6 +710,7 @@ module_phy_driver(broadcom_drivers);
static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
+ { PHY_ID_BCM54210E, 0xfffffff0 },
{ PHY_ID_BCM5461, 0xfffffff0 },
{ PHY_ID_BCM54612E, 0xfffffff0 },
{ PHY_ID_BCM54616S, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f06279..a10d0e7fc5f7 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -17,6 +17,7 @@
#include <linux/phy.h>
#define TI_DP83848C_PHY_ID 0x20005ca0
+#define TI_DP83620_PHY_ID 0x20005ce0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
#define TI_DP83822_PHY_ID 0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
+ { TI_DP83620_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
{ TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index e84ae084e259..19865530e0b1 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -32,7 +32,9 @@
#define DP83867_CFG3 0x1e
/* Extended Registers */
+#define DP83867_CFG4 0x0031
#define DP83867_RGMIICTL 0x0032
+#define DP83867_STRAP_STS1 0x006E
#define DP83867_RGMIIDCTL 0x0086
#define DP83867_IO_MUX_CFG 0x0170
@@ -57,9 +59,13 @@
#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
+/* STRAP_STS1 bits */
+#define DP83867_STRAP_STS1_RESERVED BIT(11)
+
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
+#define DP83867_PHYCR_RESERVED_MASK BIT(11)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -70,11 +76,21 @@
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+/* CFG4 bits */
+#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
+
+enum {
+ DP83867_PORT_MIRROING_KEEP,
+ DP83867_PORT_MIRROING_EN,
+ DP83867_PORT_MIRROING_DIS,
+};
+
struct dp83867_private {
int rx_id_delay;
int tx_id_delay;
int fifo_depth;
int io_impedance;
+ int port_mirroring;
};
static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -111,6 +127,24 @@ static int dp83867_config_intr(struct phy_device *phydev)
return phy_write(phydev, MII_DP83867_MICR, micr_status);
}
+static int dp83867_config_port_mirroring(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 =
+ (struct dp83867_private *)phydev->priv;
+ u16 val;
+
+ val = phy_read_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR);
+
+ if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN)
+ val |= DP83867_CFG4_PORT_MIRROR_EN;
+ else
+ val &= ~DP83867_CFG4_PORT_MIRROR_EN;
+
+ phy_write_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR, val);
+
+ return 0;
+}
+
#ifdef CONFIG_OF_MDIO
static int dp83867_of_init(struct phy_device *phydev)
{
@@ -132,14 +166,24 @@ static int dp83867_of_init(struct phy_device *phydev)
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
return ret;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
return ret;
+ if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
+ dp83867->port_mirroring = DP83867_PORT_MIRROING_EN;
+
+ if (of_property_read_bool(of_node, "enet-phy-lane-no-swap"))
+ dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
+
return of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
}
@@ -153,7 +197,7 @@ static int dp83867_of_init(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret, val;
+ int ret, val, bs;
u16 delay;
if (!phydev->priv) {
@@ -176,6 +220,22 @@ static int dp83867_config_init(struct phy_device *phydev)
return val;
val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+
+ /* The code below checks if "port mirroring" N/A MODE4 has been
+ * enabled during power on bootstrap.
+ *
+ * Such N/A mode enabled by mistake can put PHY IC in some
+ * internal testing mode and disable RGMII transmission.
+ *
+ * In this particular case one needs to check STRAP_STS1
+ * register's bit 11 (marked as RESERVED).
+ */
+
+ bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
+ DP83867_DEVADDR);
+ if (bs & DP83867_STRAP_STS1_RESERVED)
+ val &= ~DP83867_PHYCR_RESERVED_MASK;
+
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
@@ -224,6 +284,9 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write(phydev, DP83867_CFG3, val);
}
+ if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
+ dp83867_config_port_mirroring(phydev);
+
return 0;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0b78210c0fa7..f9d0fa315a47 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -17,8 +17,10 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/unistd.h>
+#include <linux/hwmon.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -90,6 +92,17 @@
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
+#define MII_88E1121_MISC_TEST 0x1a
+#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00
+#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT 8
+#define MII_88E1510_MISC_TEST_TEMP_IRQ_EN BIT(7)
+#define MII_88E1510_MISC_TEST_TEMP_IRQ BIT(6)
+#define MII_88E1121_MISC_TEST_TEMP_SENSOR_EN BIT(5)
+#define MII_88E1121_MISC_TEST_TEMP_MASK 0x1f
+
+#define MII_88E1510_TEMP_SENSOR 0x1b
+#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
@@ -172,6 +185,8 @@ static struct marvell_hw_stat marvell_hw_stats[] = {
struct marvell_priv {
u64 stats[ARRAY_SIZE(marvell_hw_stats)];
+ char *hwmon_name;
+ struct device *hwmon_dev;
};
static int marvell_ack_interrupt(struct phy_device *phydev)
@@ -1468,6 +1483,371 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+#ifdef CONFIG_HWMON
+static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
+{
+ int ret;
+ int val;
+
+ *temp = 0;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ val = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (val < 0) {
+ ret = val;
+ goto error;
+ }
+
+ /* Disable temperature sensor */
+ ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ if (ret < 0)
+ goto error;
+
+ *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int m88e1121_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = m88e1121_get_temp(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static umode_t m88e1121_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static u32 m88e1121_hwmon_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info m88e1121_hwmon_chip = {
+ .type = hwmon_chip,
+ .config = m88e1121_hwmon_chip_config,
+};
+
+static u32 m88e1121_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info m88e1121_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = m88e1121_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e1121_hwmon_info[] = {
+ &m88e1121_hwmon_chip,
+ &m88e1121_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = {
+ .is_visible = m88e1121_hwmon_is_visible,
+ .read = m88e1121_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
+ .ops = &m88e1121_hwmon_hwmon_ops,
+ .info = m88e1121_hwmon_info,
+};
+
+static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+ if (ret < 0)
+ goto error;
+
+ *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
+ MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
+ /* convert to mC */
+ *temp *= 1000;
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
+{
+ int ret;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ temp = temp / 1000;
+ temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+ ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+ (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
+ (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
+{
+ int ret;
+
+ *alarm = false;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+ *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int m88e1510_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = m88e1510_get_temp(phydev, temp);
+ break;
+ case hwmon_temp_crit:
+ err = m88e1510_get_temp_critical(phydev, temp);
+ break;
+ case hwmon_temp_max_alarm:
+ err = m88e1510_get_temp_alarm(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int m88e1510_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ err = m88e1510_set_temp_critical(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static umode_t m88e1510_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static u32 m88e1510_hwmon_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
+ 0
+};
+
+static const struct hwmon_channel_info m88e1510_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = m88e1510_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e1510_hwmon_info[] = {
+ &m88e1121_hwmon_chip,
+ &m88e1510_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = {
+ .is_visible = m88e1510_hwmon_is_visible,
+ .read = m88e1510_hwmon_read,
+ .write = m88e1510_hwmon_write,
+};
+
+static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
+ .ops = &m88e1510_hwmon_hwmon_ops,
+ .info = m88e1510_hwmon_info,
+};
+
+static int marvell_hwmon_name(struct phy_device *phydev)
+{
+ struct marvell_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ const char *devname = dev_name(dev);
+ size_t len = strlen(devname);
+ int i, j;
+
+ priv->hwmon_name = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!priv->hwmon_name)
+ return -ENOMEM;
+
+ for (i = j = 0; i < len && devname[i]; i++) {
+ if (isalnum(devname[i]))
+ priv->hwmon_name[j++] = devname[i];
+ }
+
+ return 0;
+}
+
+static int marvell_hwmon_probe(struct phy_device *phydev,
+ const struct hwmon_chip_info *chip)
+{
+ struct marvell_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ int err;
+
+ err = marvell_hwmon_name(phydev);
+ if (err)
+ return err;
+
+ priv->hwmon_dev = devm_hwmon_device_register_with_info(
+ dev, priv->hwmon_name, phydev, chip, NULL);
+
+ return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+}
+
+static int m88e1121_hwmon_probe(struct phy_device *phydev)
+{
+ return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info);
+}
+
+static int m88e1510_hwmon_probe(struct phy_device *phydev)
+{
+ return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
+}
+#else
+static int m88e1121_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int m88e1510_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int marvell_probe(struct phy_device *phydev)
{
struct marvell_priv *priv;
@@ -1481,14 +1861,47 @@ static int marvell_probe(struct phy_device *phydev)
return 0;
}
+static int m88e1121_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return m88e1121_hwmon_probe(phydev);
+}
+
+static int m88e1510_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return m88e1510_hwmon_probe(phydev);
+}
+
+static void marvell_remove(struct phy_device *phydev)
+{
+#ifdef CONFIG_HWMON
+
+ struct marvell_priv *priv = phydev->priv;
+
+ if (priv && priv->hwmon_dev)
+ hwmon_device_unregister(priv->hwmon_dev);
+#endif
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
- .probe = marvell_probe,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &marvell_config_aneg,
.read_status = &genphy_read_status,
@@ -1560,7 +1973,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1121R",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .probe = marvell_probe,
+ .probe = &m88e1121_probe,
+ .remove = &marvell_remove,
.config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
@@ -1672,13 +2086,16 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1510",
.features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
.flags = PHY_HAS_INTERRUPT,
- .probe = marvell_probe,
+ .probe = &m88e1510_probe,
+ .remove = &marvell_remove,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .get_wol = &m88e1318_get_wol,
+ .set_wol = &m88e1318_set_wol,
.resume = &marvell_resume,
.suspend = &marvell_suspend,
.get_sset_count = marvell_get_sset_count,
@@ -1691,7 +2108,28 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1540",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .probe = marvell_probe,
+ .probe = m88e1510_probe,
+ .remove = &marvell_remove,
+ .config_init = &marvell_config_init,
+ .config_aneg = &m88e1510_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E1545,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1545",
+ .probe = m88e1510_probe,
+ .remove = &marvell_remove,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -1724,6 +2162,25 @@ static struct phy_driver marvell_drivers[] = {
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
},
+ {
+ .phy_id = MARVELL_PHY_ID_88E6390,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6390",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .probe = m88e1510_probe,
+ .config_init = &marvell_config_init,
+ .config_aneg = &m88e1510_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ },
};
module_phy_driver(marvell_drivers);
@@ -1741,7 +2198,9 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index c0b4e65267af..46fe1ae919a3 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the read operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the write operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev)
bus->read = iproc_mdio_read;
bus->write = iproc_mdio_write;
+ iproc_mdio_config_clk(priv->base);
+
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
new file mode 100644
index 000000000000..6b988f77da08
--- /dev/null
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -0,0 +1,86 @@
+/*
+ * mdio-boardinfo - Collect pre-declarations for MDIO devices
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+#include "mdio-boardinfo.h"
+
+static LIST_HEAD(mdio_board_list);
+static DEFINE_MUTEX(mdio_board_lock);
+
+/**
+ * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices
+ * from pre-collected board specific MDIO information
+ * @mdiodev: MDIO device pointer
+ * Context: can sleep
+ */
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus)
+{
+ struct mdio_board_entry *be;
+ struct mdio_device *mdiodev;
+ struct mdio_board_info *bi;
+ int ret;
+
+ mutex_lock(&mdio_board_lock);
+ list_for_each_entry(be, &mdio_board_list, list) {
+ bi = &be->board_info;
+
+ if (strcmp(bus->id, bi->bus_id))
+ continue;
+
+ mdiodev = mdio_device_create(bus, bi->mdio_addr);
+ if (IS_ERR(mdiodev))
+ continue;
+
+ strncpy(mdiodev->modalias, bi->modalias,
+ sizeof(mdiodev->modalias));
+ mdiodev->bus_match = mdio_device_bus_match;
+ mdiodev->dev.platform_data = (void *)bi->platform_data;
+
+ ret = mdio_device_register(mdiodev);
+ if (ret) {
+ mdio_device_free(mdiodev);
+ continue;
+ }
+ }
+ mutex_unlock(&mdio_board_lock);
+}
+
+/**
+ * mdio_register_board_info - register MDIO devices for a given board
+ * @info: array of devices descriptors
+ * @n: number of descriptors provided
+ * Context: can sleep
+ *
+ * The board info passed can be marked with __initdata but be pointers
+ * such as platform_data etc. are copied as-is
+ */
+int mdiobus_register_board_info(const struct mdio_board_info *info,
+ unsigned int n)
+{
+ struct mdio_board_entry *be;
+ unsigned int i;
+
+ be = kcalloc(n, sizeof(*be), GFP_KERNEL);
+ if (!be)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++, be++, info++) {
+ memcpy(&be->board_info, info, sizeof(*info));
+ mutex_lock(&mdio_board_lock);
+ list_add_tail(&be->list, &mdio_board_list);
+ mutex_unlock(&mdio_board_lock);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
new file mode 100644
index 000000000000..00f98163e90e
--- /dev/null
+++ b/drivers/net/phy/mdio-boardinfo.h
@@ -0,0 +1,19 @@
+/*
+ * mdio-boardinfo.h - board info interface internal to the mdio_bus
+ * component
+ */
+
+#ifndef __MDIO_BOARD_INFO_H
+#define __MDIO_BOARD_INFO_H
+
+#include <linux/phy.h>
+#include <linux/mutex.h>
+
+struct mdio_board_entry {
+ struct list_head list;
+ struct mdio_board_info board_info;
+};
+
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus);
+
+#endif /* __MDIO_BOARD_INFO_H */
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 27ab63064f95..7faa79b254ef 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -32,8 +32,7 @@
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;
- int mdc, mdio, mdo;
- int mdc_active_low, mdio_active_low, mdo_active_low;
+ struct gpio_desc *mdc, *mdio, *mdo;
};
static void *mdio_gpio_of_get_data(struct platform_device *pdev)
@@ -80,16 +79,14 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
* assume the pin serves as pull-up. If direction is
* output, the default value is high.
*/
- gpio_set_value_cansleep(bitbang->mdo,
- 1 ^ bitbang->mdo_active_low);
+ gpiod_set_value(bitbang->mdo, 1);
return;
}
if (dir)
- gpio_direction_output(bitbang->mdio,
- 1 ^ bitbang->mdio_active_low);
+ gpiod_direction_output(bitbang->mdio, 1);
else
- gpio_direction_input(bitbang->mdio);
+ gpiod_direction_input(bitbang->mdio);
}
static int mdio_get(struct mdiobb_ctrl *ctrl)
@@ -97,8 +94,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- return gpio_get_value_cansleep(bitbang->mdio) ^
- bitbang->mdio_active_low;
+ return gpiod_get_value(bitbang->mdio);
}
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -107,11 +103,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
container_of(ctrl, struct mdio_gpio_info, ctrl);
if (bitbang->mdo)
- gpio_set_value_cansleep(bitbang->mdo,
- what ^ bitbang->mdo_active_low);
+ gpiod_set_value(bitbang->mdo, what);
else
- gpio_set_value_cansleep(bitbang->mdio,
- what ^ bitbang->mdio_active_low);
+ gpiod_set_value(bitbang->mdio, what);
}
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -119,7 +113,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
+ gpiod_set_value(bitbang->mdc, what);
}
static struct mdiobb_ops mdio_gpio_ops = {
@@ -137,6 +131,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
struct mii_bus *new_bus;
struct mdio_gpio_info *bitbang;
int i;
+ int mdc, mdio, mdo;
+ unsigned long mdc_flags = GPIOF_OUT_INIT_LOW;
+ unsigned long mdio_flags = GPIOF_DIR_IN;
+ unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH;
bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
if (!bitbang)
@@ -144,12 +142,20 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
bitbang->ctrl.ops = &mdio_gpio_ops;
bitbang->ctrl.reset = pdata->reset;
- bitbang->mdc = pdata->mdc;
- bitbang->mdc_active_low = pdata->mdc_active_low;
- bitbang->mdio = pdata->mdio;
- bitbang->mdio_active_low = pdata->mdio_active_low;
- bitbang->mdo = pdata->mdo;
- bitbang->mdo_active_low = pdata->mdo_active_low;
+ mdc = pdata->mdc;
+ bitbang->mdc = gpio_to_desc(mdc);
+ if (pdata->mdc_active_low)
+ mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW;
+ mdio = pdata->mdio;
+ bitbang->mdio = gpio_to_desc(mdio);
+ if (pdata->mdio_active_low)
+ mdio_flags |= GPIOF_ACTIVE_LOW;
+ mdo = pdata->mdo;
+ if (mdo) {
+ bitbang->mdo = gpio_to_desc(mdo);
+ if (pdata->mdo_active_low)
+ mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW;
+ }
new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!new_bus)
@@ -174,20 +180,14 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
else
strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
- if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
+ if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc"))
goto out_free_bus;
- if (devm_gpio_request(dev, bitbang->mdio, "mdio"))
+ if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio"))
goto out_free_bus;
- if (bitbang->mdo) {
- if (devm_gpio_request(dev, bitbang->mdo, "mdo"))
- goto out_free_bus;
- gpio_direction_output(bitbang->mdo, 1);
- gpio_direction_input(bitbang->mdio);
- }
-
- gpio_direction_output(bitbang->mdc, 0);
+ if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo"))
+ goto out_free_bus;
dev_set_drvdata(dev, new_bus);
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 92af182951be..f095051beb54 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -311,6 +311,30 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl,
}
#endif
+static const struct of_device_id xgene_mdio_of_match[] = {
+ {
+ .compatible = "apm,xgene-mdio-rgmii",
+ .data = (void *)XGENE_MDIO_RGMII
+ },
+ {
+ .compatible = "apm,xgene-mdio-xfi",
+ .data = (void *)XGENE_MDIO_XFI
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_mdio_acpi_match[] = {
+ { "APMC0D65", XGENE_MDIO_RGMII },
+ { "APMC0D66", XGENE_MDIO_XFI },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
+#endif
+
+
static int xgene_mdio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -430,32 +454,6 @@ static int xgene_mdio_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_mdio_of_match[] = {
- {
- .compatible = "apm,xgene-mdio-rgmii",
- .data = (void *)XGENE_MDIO_RGMII
- },
- {
- .compatible = "apm,xgene-mdio-xfi",
- .data = (void *)XGENE_MDIO_XFI
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[] = {
- { "APMC0D65", XGENE_MDIO_RGMII },
- { "APMC0D66", XGENE_MDIO_XFI },
- { }
-};
-
-MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
-#endif
-
static struct platform_driver xgene_mdio_driver = {
.driver = {
.name = "xgene-mdio",
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index 354241b53c1d..594a11d42401 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -132,10 +132,6 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
#define GET_BIT(field, src) \
xgene_enet_get_field_value(field ## _POS, 1, src)
-static const struct of_device_id xgene_mdio_of_match[];
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[];
-#endif
int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 653d076eafe5..fa7d51f14869 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -41,6 +41,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/mdio.h>
+#include "mdio-boardinfo.h"
+
int mdiobus_register_device(struct mdio_device *mdiodev)
{
if (mdiodev->bus->mdio_map[mdiodev->addr])
@@ -343,6 +345,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
}
}
+ mdiobus_setup_mdiodev_from_board_info(bus);
+
bus->state = MDIOBUS_REGISTERED;
pr_info("%s: probed\n", bus->name);
return 0;
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 43c8fd46504b..e24f28924af8 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -34,6 +34,17 @@ static void mdio_device_release(struct device *dev)
kfree(to_mdio_device(dev));
}
+int mdio_device_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct mdio_device *mdiodev = to_mdio_device(dev);
+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+
+ if (mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)
+ return 0;
+
+ return strcmp(mdiodev->modalias, drv->name) == 0;
+}
+
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
{
struct mdio_device *mdiodev;
@@ -67,7 +78,7 @@ int mdio_device_register(struct mdio_device *mdiodev)
{
int err;
- dev_info(&mdiodev->dev, "mdio_device_register\n");
+ dev_dbg(&mdiodev->dev, "mdio_device_register\n");
err = mdiobus_register_device(mdiodev);
if (err)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9a77289109b7..6742070ca676 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
+}, {
+ .phy_id = PHY_ID_KSZ8795,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Micrel KSZ8795",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = ksz8873mll_config_aneg,
+ .read_status = ksz8873mll_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index e03ead81fffb..650c2667d523 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/netdevice.h>
+#include <dt-bindings/net/mscc-phy-vsc8531.h>
enum rgmii_rx_clock_delay {
RGMII_RX_CLK_DELAY_0_2_NS = 0,
@@ -52,6 +53,11 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_DEV_AUX_CNTL 28
#define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000
+#define MSCC_PHY_LED_MODE_SEL 29
+#define LED_1_MODE_SEL_MASK 0x00F0
+#define LED_0_MODE_SEL_MASK 0x000F
+#define LED_1_MODE_SEL_POS 4
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
@@ -99,6 +105,8 @@ enum rgmii_rx_clock_delay {
struct vsc8531_private {
int rate_magic;
+ u8 led_0_mode;
+ u8 led_1_mode;
};
#ifdef CONFIG_OF_MDIO
@@ -123,6 +131,29 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
return rc;
}
+static int vsc85xx_led_cntl_set(struct phy_device *phydev,
+ u8 led_num,
+ u8 mode)
+{
+ int rc;
+ u16 reg_val;
+
+ mutex_lock(&phydev->lock);
+ reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
+ if (led_num) {
+ reg_val &= ~LED_1_MODE_SEL_MASK;
+ reg_val |= (((u16)mode << LED_1_MODE_SEL_POS) &
+ LED_1_MODE_SEL_MASK);
+ } else {
+ reg_val &= ~LED_0_MODE_SEL_MASK;
+ reg_val |= ((u16)mode & LED_0_MODE_SEL_MASK);
+ }
+ rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val);
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
static int vsc85xx_mdix_get(struct phy_device *phydev, u8 *mdix)
{
u16 reg_val;
@@ -370,11 +401,41 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
return -EINVAL;
}
+
+static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
+ char *led,
+ u8 default_mode)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ u8 led_mode;
+ int err;
+
+ if (!of_node)
+ return -ENODEV;
+
+ led_mode = default_mode;
+ err = of_property_read_u8(of_node, led, &led_mode);
+ if (!err && (led_mode > 15 || led_mode == 7 || led_mode == 11)) {
+ phydev_err(phydev, "DT %s invalid\n", led);
+ return -EINVAL;
+ }
+
+ return led_mode;
+}
+
#else
static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{
return 0;
}
+
+static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
+ char *led,
+ u8 default_mode)
+{
+ return default_mode;
+}
#endif /* CONFIG_OF_MDIO */
static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate)
@@ -499,6 +560,14 @@ static int vsc85xx_config_init(struct phy_device *phydev)
if (rc)
return rc;
+ rc = vsc85xx_led_cntl_set(phydev, 1, vsc8531->led_1_mode);
+ if (rc)
+ return rc;
+
+ rc = vsc85xx_led_cntl_set(phydev, 0, vsc8531->led_0_mode);
+ if (rc)
+ return rc;
+
rc = genphy_config_init(phydev);
return rc;
@@ -555,8 +624,9 @@ static int vsc85xx_read_status(struct phy_device *phydev)
static int vsc85xx_probe(struct phy_device *phydev)
{
- int rate_magic;
struct vsc8531_private *vsc8531;
+ int rate_magic;
+ int led_mode;
rate_magic = vsc85xx_edge_rate_magic_get(phydev);
if (rate_magic < 0)
@@ -570,6 +640,19 @@ static int vsc85xx_probe(struct phy_device *phydev)
vsc8531->rate_magic = rate_magic;
+ /* LED[0] and LED[1] mode */
+ led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-0-mode",
+ VSC8531_LINK_1000_ACTIVITY);
+ if (led_mode < 0)
+ return led_mode;
+ vsc8531->led_0_mode = led_mode;
+
+ led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-1-mode",
+ VSC8531_LINK_100_ACTIVITY);
+ if (led_mode < 0)
+ return led_mode;
+ vsc8531->led_1_mode = led_mode;
+
return 0;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 48da6e93c3f7..d6f7838455dd 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
@@ -579,7 +580,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
return 0;
case SIOCSHWTSTAMP:
- if (phydev->drv->hwtstamp)
+ if (phydev->drv && phydev->drv->hwtstamp)
return phydev->drv->hwtstamp(phydev, ifr);
/* fall through */
@@ -602,6 +603,9 @@ int phy_start_aneg(struct phy_device *phydev)
{
int err;
+ if (!phydev->drv)
+ return -EIO;
+
mutex_lock(&phydev->lock);
if (AUTONEG_DISABLE == phydev->autoneg)
@@ -649,14 +653,18 @@ void phy_start_machine(struct phy_device *phydev)
* phy_trigger_machine - trigger the state machine to run
*
* @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: There has been a change in state which requires that the
* state machine runs.
*/
-static void phy_trigger_machine(struct phy_device *phydev)
+static void phy_trigger_machine(struct phy_device *phydev, bool sync)
{
- cancel_delayed_work_sync(&phydev->state_queue);
+ if (sync)
+ cancel_delayed_work_sync(&phydev->state_queue);
+ else
+ cancel_delayed_work(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
}
@@ -693,7 +701,7 @@ static void phy_error(struct phy_device *phydev)
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, false);
}
/**
@@ -840,7 +848,7 @@ void phy_change(struct phy_device *phydev)
}
/* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, true);
return;
ignore:
@@ -942,7 +950,7 @@ void phy_start(struct phy_device *phydev)
if (do_resume)
phy_resume(phydev);
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, true);
}
EXPORT_SYMBOL(phy_start);
@@ -970,7 +978,7 @@ void phy_state_machine(struct work_struct *work)
old_state = phydev->state;
- if (phydev->drv->link_change_notify)
+ if (phydev->drv && phydev->drv->link_change_notify)
phydev->drv->link_change_notify(phydev);
switch (phydev->state) {
@@ -1281,6 +1289,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect);
*/
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
{
+ if (!phydev->drv)
+ return -EIO;
+
/* According to 802.3az,the EEE is supported only in full duplex-mode.
* Also EEE feature is active when core is operating with MII, GMII
* or RGMII (all kinds). Internal PHYs are also allowed to proceed and
@@ -1358,6 +1369,9 @@ EXPORT_SYMBOL(phy_init_eee);
*/
int phy_get_eee_err(struct phy_device *phydev)
{
+ if (!phydev->drv)
+ return -EIO;
+
return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
}
EXPORT_SYMBOL(phy_get_eee_err);
@@ -1374,6 +1388,9 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
int val;
+ if (!phydev->drv)
+ return -EIO;
+
/* Get Supported EEE */
val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
if (val < 0)
@@ -1407,6 +1424,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ if (!phydev->drv)
+ return -EIO;
+
/* Mask prohibited EEE modes */
val &= ~phydev->eee_broken_modes;
@@ -1418,7 +1438,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
- if (phydev->drv->set_wol)
+ if (phydev->drv && phydev->drv->set_wol)
return phydev->drv->set_wol(phydev, wol);
return -EOPNOTSUPP;
@@ -1427,7 +1447,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
- if (phydev->drv->get_wol)
+ if (phydev->drv && phydev->drv->get_wol)
phydev->drv->get_wol(phydev, wol);
}
EXPORT_SYMBOL(phy_ethtool_get_wol);
@@ -1463,6 +1483,9 @@ int phy_ethtool_nway_reset(struct net_device *ndev)
if (!phydev)
return -ENODEV;
+ if (!phydev->drv)
+ return -EIO;
+
return genphy_restart_aneg(phydev);
}
EXPORT_SYMBOL(phy_ethtool_nway_reset);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 92b08383cafa..daec6555f3b1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -908,6 +908,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
+ bool using_genphy = false;
int err;
/* For Ethernet device drivers that register their own MDIO bus, we
@@ -933,12 +934,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
d->driver =
&genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
+ using_genphy = true;
+ }
+
+ if (!try_module_get(d->driver->owner)) {
+ dev_err(&dev->dev, "failed to get the device driver module\n");
+ err = -EIO;
+ goto error_put_device;
+ }
+
+ if (using_genphy) {
err = d->driver->probe(d);
if (err >= 0)
err = device_bind_driver(d);
if (err)
- goto error;
+ goto error_module_put;
}
if (phydev->attached_dev) {
@@ -975,7 +986,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
return err;
error:
+ /* phy_detach() does all of the cleanup below */
phy_detach(phydev);
+ return err;
+
+error_module_put:
+ module_put(d->driver->owner);
+error_put_device:
put_device(d);
if (ndev_owner != bus->owner)
module_put(bus->owner);
@@ -1039,6 +1056,8 @@ void phy_detach(struct phy_device *phydev)
phy_led_triggers_unregister(phydev);
+ module_put(phydev->mdio.dev.driver->owner);
+
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
@@ -1075,7 +1094,7 @@ int phy_suspend(struct phy_device *phydev)
if (wol.wolopts)
return -EBUSY;
- if (phydrv->suspend)
+ if (phydev->drv && phydrv->suspend)
ret = phydrv->suspend(phydev);
if (ret)
@@ -1092,7 +1111,7 @@ int phy_resume(struct phy_device *phydev)
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
int ret = 0;
- if (phydrv->resume)
+ if (phydev->drv && phydrv->resume)
ret = phydrv->resume(phydev);
if (ret)
@@ -1765,11 +1784,13 @@ static int phy_remove(struct device *dev)
{
struct phy_device *phydev = to_phy_device(dev);
+ cancel_delayed_work_sync(&phydev->state_queue);
+
mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
mutex_unlock(&phydev->lock);
- if (phydev->drv->remove)
+ if (phydev->drv && phydev->drv->remove)
phydev->drv->remove(phydev);
phydev->drv = NULL;
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index fa62bdf2f526..94ca42e630bb 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -12,6 +12,7 @@
*/
#include <linux/leds.h>
#include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
#include <linux/netdevice.h>
static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
sizeof(struct phy_led_trigger) *
phy->phy_num_led_triggers,
GFP_KERNEL);
- if (!phy->phy_led_triggers)
- return -ENOMEM;
+ if (!phy->phy_led_triggers) {
+ err = -ENOMEM;
+ goto out_clear;
+ }
for (i = 0; i < phy->phy_num_led_triggers; i++) {
err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+out_clear:
+ phy->phy_num_led_triggers = 0;
return err;
}
EXPORT_SYMBOL_GPL(phy_led_triggers_register);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3d3b1f4339ef..a411b43a69eb 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1297,7 +1297,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-static struct rtnl_link_stats64*
+static void
ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
{
struct ppp *ppp = netdev_priv(dev);
@@ -1317,8 +1317,6 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
stats64->rx_dropped = dev->stats.rx_dropped;
stats64->tx_dropped = dev->stats.tx_dropped;
stats64->rx_length_errors = dev->stats.rx_length_errors;
-
- return stats64;
}
static int ppp_dev_init(struct net_device *dev)
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 9841f3dc0682..08db4d687533 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -566,7 +566,7 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
/* Netdevice get statistics request */
-static struct rtnl_link_stats64 *
+static void
sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct net_device_stats *devstats = &dev->stats;
@@ -597,7 +597,6 @@ sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->collisions += comp->sls_o_misses;
}
#endif
- return stats;
}
/* Netdevice register callback */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
new file mode 100644
index 000000000000..35b55a2fa1a1
--- /dev/null
+++ b/drivers/net/tap.c
@@ -0,0 +1,1285 @@
+#include <linux/etherdevice.h>
+#include <linux/if_tap.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+#include <linux/virtio_net.h>
+#include <linux/skb_array.h>
+
+#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
+
+#define TAP_VNET_LE 0x80000000
+#define TAP_VNET_BE 0x40000000
+
+#ifdef CONFIG_TUN_VNET_CROSS_LE
+static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
+{
+ return q->flags & TAP_VNET_BE ? false :
+ virtio_legacy_is_little_endian();
+}
+
+static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
+{
+ int s = !!(q->flags & TAP_VNET_BE);
+
+ if (put_user(s, sp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
+{
+ int s;
+
+ if (get_user(s, sp))
+ return -EFAULT;
+
+ if (s)
+ q->flags |= TAP_VNET_BE;
+ else
+ q->flags &= ~TAP_VNET_BE;
+
+ return 0;
+}
+#else
+static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
+{
+ return virtio_legacy_is_little_endian();
+}
+
+static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
+{
+ return -EINVAL;
+}
+
+static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_TUN_VNET_CROSS_LE */
+
+static inline bool tap_is_little_endian(struct tap_queue *q)
+{
+ return q->flags & TAP_VNET_LE ||
+ tap_legacy_is_little_endian(q);
+}
+
+static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
+{
+ return __virtio16_to_cpu(tap_is_little_endian(q), val);
+}
+
+static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
+{
+ return __cpu_to_virtio16(tap_is_little_endian(q), val);
+}
+
+static struct proto tap_proto = {
+ .name = "tap",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct tap_queue),
+};
+
+#define TAP_NUM_DEVS (1U << MINORBITS)
+
+static LIST_HEAD(major_list);
+
+struct major_info {
+ struct rcu_head rcu;
+ dev_t major;
+ struct idr minor_idr;
+ struct mutex minor_lock;
+ const char *device_name;
+ struct list_head next;
+};
+
+#define GOODCOPY_LEN 128
+
+static const struct proto_ops tap_socket_ops;
+
+#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
+
+static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
+/*
+ * RCU usage:
+ * The tap_queue and the macvlan_dev are loosely coupled, the
+ * pointers from one to the other can only be read while rcu_read_lock
+ * or rtnl is held.
+ *
+ * Both the file and the macvlan_dev hold a reference on the tap_queue
+ * through sock_hold(&q->sk). When the macvlan_dev goes away first,
+ * q->vlan becomes inaccessible. When the files gets closed,
+ * tap_get_queue() fails.
+ *
+ * There may still be references to the struct sock inside of the
+ * queue from outbound SKBs, but these never reference back to the
+ * file or the dev. The data structure is freed through __sk_free
+ * when both our references and any pending SKBs are gone.
+ */
+
+static int tap_enable_queue(struct tap_dev *tap, struct file *file,
+ struct tap_queue *q)
+{
+ int err = -EINVAL;
+
+ ASSERT_RTNL();
+
+ if (q->enabled)
+ goto out;
+
+ err = 0;
+ rcu_assign_pointer(tap->taps[tap->numvtaps], q);
+ q->queue_index = tap->numvtaps;
+ q->enabled = true;
+
+ tap->numvtaps++;
+out:
+ return err;
+}
+
+/* Requires RTNL */
+static int tap_set_queue(struct tap_dev *tap, struct file *file,
+ struct tap_queue *q)
+{
+ if (tap->numqueues == MAX_TAP_QUEUES)
+ return -EBUSY;
+
+ rcu_assign_pointer(q->tap, tap);
+ rcu_assign_pointer(tap->taps[tap->numvtaps], q);
+ sock_hold(&q->sk);
+
+ q->file = file;
+ q->queue_index = tap->numvtaps;
+ q->enabled = true;
+ file->private_data = q;
+ list_add_tail(&q->next, &tap->queue_list);
+
+ tap->numvtaps++;
+ tap->numqueues++;
+
+ return 0;
+}
+
+static int tap_disable_queue(struct tap_queue *q)
+{
+ struct tap_dev *tap;
+ struct tap_queue *nq;
+
+ ASSERT_RTNL();
+ if (!q->enabled)
+ return -EINVAL;
+
+ tap = rtnl_dereference(q->tap);
+
+ if (tap) {
+ int index = q->queue_index;
+ BUG_ON(index >= tap->numvtaps);
+ nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
+ nq->queue_index = index;
+
+ rcu_assign_pointer(tap->taps[index], nq);
+ RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
+ q->enabled = false;
+
+ tap->numvtaps--;
+ }
+
+ return 0;
+}
+
+/*
+ * The file owning the queue got closed, give up both
+ * the reference that the files holds as well as the
+ * one from the macvlan_dev if that still exists.
+ *
+ * Using the spinlock makes sure that we don't get
+ * to the queue again after destroying it.
+ */
+static void tap_put_queue(struct tap_queue *q)
+{
+ struct tap_dev *tap;
+
+ rtnl_lock();
+ tap = rtnl_dereference(q->tap);
+
+ if (tap) {
+ if (q->enabled)
+ BUG_ON(tap_disable_queue(q));
+
+ tap->numqueues--;
+ RCU_INIT_POINTER(q->tap, NULL);
+ sock_put(&q->sk);
+ list_del_init(&q->next);
+ }
+
+ rtnl_unlock();
+
+ synchronize_rcu();
+ sock_put(&q->sk);
+}
+
+/*
+ * Select a queue based on the rxq of the device on which this packet
+ * arrived. If the incoming device is not mq, calculate a flow hash
+ * to select a queue. If all fails, find the first available queue.
+ * Cache vlan->numvtaps since it can become zero during the execution
+ * of this function.
+ */
+static struct tap_queue *tap_get_queue(struct tap_dev *tap,
+ struct sk_buff *skb)
+{
+ struct tap_queue *queue = NULL;
+ /* Access to taps array is protected by rcu, but access to numvtaps
+ * isn't. Below we use it to lookup a queue, but treat it as a hint
+ * and validate that the result isn't NULL - in case we are
+ * racing against queue removal.
+ */
+ int numvtaps = ACCESS_ONCE(tap->numvtaps);
+ __u32 rxq;
+
+ if (!numvtaps)
+ goto out;
+
+ if (numvtaps == 1)
+ goto single;
+
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_hash(skb);
+ if (rxq) {
+ queue = rcu_dereference(tap->taps[rxq % numvtaps]);
+ goto out;
+ }
+
+ if (likely(skb_rx_queue_recorded(skb))) {
+ rxq = skb_get_rx_queue(skb);
+
+ while (unlikely(rxq >= numvtaps))
+ rxq -= numvtaps;
+
+ queue = rcu_dereference(tap->taps[rxq]);
+ goto out;
+ }
+
+single:
+ queue = rcu_dereference(tap->taps[0]);
+out:
+ return queue;
+}
+
+/*
+ * The net_device is going away, give up the reference
+ * that it holds on all queues and safely set the pointer
+ * from the queues to NULL.
+ */
+void tap_del_queues(struct tap_dev *tap)
+{
+ struct tap_queue *q, *tmp;
+
+ ASSERT_RTNL();
+ list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
+ list_del_init(&q->next);
+ RCU_INIT_POINTER(q->tap, NULL);
+ if (q->enabled)
+ tap->numvtaps--;
+ tap->numqueues--;
+ sock_put(&q->sk);
+ }
+ BUG_ON(tap->numvtaps);
+ BUG_ON(tap->numqueues);
+ /* guarantee that any future tap_set_queue will fail */
+ tap->numvtaps = MAX_TAP_QUEUES;
+}
+EXPORT_SYMBOL_GPL(tap_del_queues);
+
+rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct net_device *dev = skb->dev;
+ struct tap_dev *tap;
+ struct tap_queue *q;
+ netdev_features_t features = TAP_FEATURES;
+
+ tap = tap_dev_get_rcu(dev);
+ if (!tap)
+ return RX_HANDLER_PASS;
+
+ q = tap_get_queue(tap, skb);
+ if (!q)
+ return RX_HANDLER_PASS;
+
+ if (__skb_array_full(&q->skb_array))
+ goto drop;
+
+ skb_push(skb, ETH_HLEN);
+
+ /* Apply the forward feature mask so that we perform segmentation
+ * according to users wishes. This only works if VNET_HDR is
+ * enabled.
+ */
+ if (q->flags & IFF_VNET_HDR)
+ features |= tap->tap_features;
+ if (netif_needs_gso(skb, features)) {
+ struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+
+ if (IS_ERR(segs))
+ goto drop;
+
+ if (!segs) {
+ if (skb_array_produce(&q->skb_array, skb))
+ goto drop;
+ goto wake_up;
+ }
+
+ consume_skb(skb);
+ while (segs) {
+ struct sk_buff *nskb = segs->next;
+
+ segs->next = NULL;
+ if (skb_array_produce(&q->skb_array, segs)) {
+ kfree_skb(segs);
+ kfree_skb_list(nskb);
+ break;
+ }
+ segs = nskb;
+ }
+ } else {
+ /* If we receive a partial checksum and the tap side
+ * doesn't support checksum offload, compute the checksum.
+ * Note: it doesn't matter which checksum feature to
+ * check, we either support them all or none.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !(features & NETIF_F_CSUM_MASK) &&
+ skb_checksum_help(skb))
+ goto drop;
+ if (skb_array_produce(&q->skb_array, skb))
+ goto drop;
+ }
+
+wake_up:
+ wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
+ return RX_HANDLER_CONSUMED;
+
+drop:
+ /* Count errors/drops only here, thus don't care about args. */
+ if (tap->count_rx_dropped)
+ tap->count_rx_dropped(tap);
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(tap_handle_frame);
+
+static struct major_info *tap_get_major(int major)
+{
+ struct major_info *tap_major;
+
+ list_for_each_entry_rcu(tap_major, &major_list, next) {
+ if (tap_major->major == major)
+ return tap_major;
+ }
+
+ return NULL;
+}
+
+int tap_get_minor(dev_t major, struct tap_dev *tap)
+{
+ int retval = -ENOMEM;
+ struct major_info *tap_major;
+
+ rcu_read_lock();
+ tap_major = tap_get_major(MAJOR(major));
+ if (!tap_major) {
+ retval = -EINVAL;
+ goto unlock;
+ }
+
+ mutex_lock(&tap_major->minor_lock);
+ retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL);
+ if (retval >= 0) {
+ tap->minor = retval;
+ } else if (retval == -ENOSPC) {
+ netdev_err(tap->dev, "Too many tap devices\n");
+ retval = -EINVAL;
+ }
+ mutex_unlock(&tap_major->minor_lock);
+
+unlock:
+ rcu_read_unlock();
+ return retval < 0 ? retval : 0;
+}
+EXPORT_SYMBOL_GPL(tap_get_minor);
+
+void tap_free_minor(dev_t major, struct tap_dev *tap)
+{
+ struct major_info *tap_major;
+
+ rcu_read_lock();
+ tap_major = tap_get_major(MAJOR(major));
+ if (!tap_major) {
+ goto unlock;
+ }
+
+ mutex_lock(&tap_major->minor_lock);
+ if (tap->minor) {
+ idr_remove(&tap_major->minor_idr, tap->minor);
+ tap->minor = 0;
+ }
+ mutex_unlock(&tap_major->minor_lock);
+
+unlock:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(tap_free_minor);
+
+static struct tap_dev *dev_get_by_tap_file(int major, int minor)
+{
+ struct net_device *dev = NULL;
+ struct tap_dev *tap;
+ struct major_info *tap_major;
+
+ rcu_read_lock();
+ tap_major = tap_get_major(major);
+ if (!tap_major) {
+ tap = NULL;
+ goto unlock;
+ }
+
+ mutex_lock(&tap_major->minor_lock);
+ tap = idr_find(&tap_major->minor_idr, minor);
+ if (tap) {
+ dev = tap->dev;
+ dev_hold(dev);
+ }
+ mutex_unlock(&tap_major->minor_lock);
+
+unlock:
+ rcu_read_unlock();
+ return tap;
+}
+
+static void tap_sock_write_space(struct sock *sk)
+{
+ wait_queue_head_t *wqueue;
+
+ if (!sock_writeable(sk) ||
+ !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ return;
+
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
+}
+
+static void tap_sock_destruct(struct sock *sk)
+{
+ struct tap_queue *q = container_of(sk, struct tap_queue, sk);
+
+ skb_array_cleanup(&q->skb_array);
+}
+
+static int tap_open(struct inode *inode, struct file *file)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct tap_dev *tap;
+ struct tap_queue *q;
+ int err = -ENODEV;
+
+ rtnl_lock();
+ tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
+ if (!tap)
+ goto err;
+
+ err = -ENOMEM;
+ q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+ &tap_proto, 0);
+ if (!q)
+ goto err;
+
+ RCU_INIT_POINTER(q->sock.wq, &q->wq);
+ init_waitqueue_head(&q->wq.wait);
+ q->sock.type = SOCK_RAW;
+ q->sock.state = SS_CONNECTED;
+ q->sock.file = file;
+ q->sock.ops = &tap_socket_ops;
+ sock_init_data(&q->sock, &q->sk);
+ q->sk.sk_write_space = tap_sock_write_space;
+ q->sk.sk_destruct = tap_sock_destruct;
+ q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+ q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+
+ /*
+ * so far only KVM virtio_net uses tap, enable zero copy between
+ * guest kernel and host kernel when lower device supports zerocopy
+ *
+ * The macvlan supports zerocopy iff the lower device supports zero
+ * copy so we don't have to look at the lower device directly.
+ */
+ if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
+ sock_set_flag(&q->sk, SOCK_ZEROCOPY);
+
+ err = -ENOMEM;
+ if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
+ goto err_array;
+
+ err = tap_set_queue(tap, file, q);
+ if (err)
+ goto err_queue;
+
+ dev_put(tap->dev);
+
+ rtnl_unlock();
+ return err;
+
+err_queue:
+ skb_array_cleanup(&q->skb_array);
+err_array:
+ sock_put(&q->sk);
+err:
+ if (tap)
+ dev_put(tap->dev);
+
+ rtnl_unlock();
+ return err;
+}
+
+static int tap_release(struct inode *inode, struct file *file)
+{
+ struct tap_queue *q = file->private_data;
+ tap_put_queue(q);
+ return 0;
+}
+
+static unsigned int tap_poll(struct file *file, poll_table *wait)
+{
+ struct tap_queue *q = file->private_data;
+ unsigned int mask = POLLERR;
+
+ if (!q)
+ goto out;
+
+ mask = 0;
+ poll_wait(file, &q->wq.wait, wait);
+
+ if (!skb_array_empty(&q->skb_array))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sock_writeable(&q->sk) ||
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
+ sock_writeable(&q->sk)))
+ mask |= POLLOUT | POLLWRNORM;
+
+out:
+ return mask;
+}
+
+static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
+ size_t len, size_t linear,
+ int noblock, int *err)
+{
+ struct sk_buff *skb;
+
+ /* Under a page? Don't bother with paged skb. */
+ if (prepad + len < PAGE_SIZE || !linear)
+ linear = len;
+
+ skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
+ err, 0);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, prepad);
+ skb_put(skb, linear);
+ skb->data_len = len - linear;
+ skb->len += len - linear;
+
+ return skb;
+}
+
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
+/* Get packet from user space buffer */
+static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
+ struct iov_iter *from, int noblock)
+{
+ int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
+ struct sk_buff *skb;
+ struct tap_dev *tap;
+ unsigned long total_len = iov_iter_count(from);
+ unsigned long len = total_len;
+ int err;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int vnet_hdr_len = 0;
+ int copylen = 0;
+ int depth;
+ bool zerocopy = false;
+ size_t linear;
+
+ if (q->flags & IFF_VNET_HDR) {
+ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+
+ err = -EINVAL;
+ if (len < vnet_hdr_len)
+ goto err;
+ len -= vnet_hdr_len;
+
+ err = -EFAULT;
+ if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
+ goto err;
+ iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
+ if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ tap16_to_cpu(q, vnet_hdr.csum_start) +
+ tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
+ tap16_to_cpu(q, vnet_hdr.hdr_len))
+ vnet_hdr.hdr_len = cpu_to_tap16(q,
+ tap16_to_cpu(q, vnet_hdr.csum_start) +
+ tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
+ err = -EINVAL;
+ if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
+ goto err;
+ }
+
+ err = -EINVAL;
+ if (unlikely(len < ETH_HLEN))
+ goto err;
+
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ struct iov_iter i;
+
+ copylen = vnet_hdr.hdr_len ?
+ tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
+ if (copylen > good_linear)
+ copylen = good_linear;
+ else if (copylen < ETH_HLEN)
+ copylen = ETH_HLEN;
+ linear = copylen;
+ i = *from;
+ iov_iter_advance(&i, copylen);
+ if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
+ zerocopy = true;
+ }
+
+ if (!zerocopy) {
+ copylen = len;
+ linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
+ if (linear > good_linear)
+ linear = good_linear;
+ else if (linear < ETH_HLEN)
+ linear = ETH_HLEN;
+ }
+
+ skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
+ linear, noblock, &err);
+ if (!skb)
+ goto err;
+
+ if (zerocopy)
+ err = zerocopy_sg_from_iter(skb, from);
+ else
+ err = skb_copy_datagram_from_iter(skb, 0, from, len);
+
+ if (err)
+ goto err_kfree;
+
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ if (vnet_hdr_len) {
+ err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
+ tap_is_little_endian(q));
+ if (err)
+ goto err_kfree;
+ }
+
+ skb_probe_transport_header(skb, ETH_HLEN);
+
+ /* Move network header to the right position for VLAN tagged packets */
+ if ((skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)) &&
+ __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ /* copy skb_ubuf_info for callback when skb has no error */
+ if (zerocopy) {
+ skb_shinfo(skb)->destructor_arg = m->msg_control;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ } else if (m && m->msg_control) {
+ struct ubuf_info *uarg = m->msg_control;
+ uarg->callback(uarg, false);
+ }
+
+ if (tap) {
+ skb->dev = tap->dev;
+ dev_queue_xmit(skb);
+ } else {
+ kfree_skb(skb);
+ }
+ rcu_read_unlock();
+
+ return total_len;
+
+err_kfree:
+ kfree_skb(skb);
+
+err:
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (tap && tap->count_tx_dropped)
+ tap->count_tx_dropped(tap);
+ rcu_read_unlock();
+
+ return err;
+}
+
+static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct tap_queue *q = file->private_data;
+
+ return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
+}
+
+/* Put packet to the user space buffer */
+static ssize_t tap_put_user(struct tap_queue *q,
+ const struct sk_buff *skb,
+ struct iov_iter *iter)
+{
+ int ret;
+ int vnet_hdr_len = 0;
+ int vlan_offset = 0;
+ int total;
+
+ if (q->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr vnet_hdr;
+ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ if (iov_iter_count(iter) < vnet_hdr_len)
+ return -EINVAL;
+
+ if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
+ tap_is_little_endian(q), true))
+ BUG();
+
+ if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+ sizeof(vnet_hdr))
+ return -EFAULT;
+
+ iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
+ }
+ total = vnet_hdr_len;
+ total += skb->len;
+
+ if (skb_vlan_tag_present(skb)) {
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } veth;
+ veth.h_vlan_proto = skb->vlan_proto;
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ total += VLAN_HLEN;
+
+ ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
+ if (ret || !iov_iter_count(iter))
+ goto done;
+
+ ret = copy_to_iter(&veth, sizeof(veth), iter);
+ if (ret != sizeof(veth) || !iov_iter_count(iter))
+ goto done;
+ }
+
+ ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
+ skb->len - vlan_offset);
+
+done:
+ return ret ? ret : total;
+}
+
+static ssize_t tap_do_read(struct tap_queue *q,
+ struct iov_iter *to,
+ int noblock)
+{
+ DEFINE_WAIT(wait);
+ struct sk_buff *skb;
+ ssize_t ret = 0;
+
+ if (!iov_iter_count(to))
+ return 0;
+
+ while (1) {
+ if (!noblock)
+ prepare_to_wait(sk_sleep(&q->sk), &wait,
+ TASK_INTERRUPTIBLE);
+
+ /* Read frames from the queue */
+ skb = skb_array_consume(&q->skb_array);
+ if (skb)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* Nothing to read, let's sleep */
+ schedule();
+ }
+ if (!noblock)
+ finish_wait(sk_sleep(&q->sk), &wait);
+
+ if (skb) {
+ ret = tap_put_user(q, skb, to);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
+ }
+ return ret;
+}
+
+static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct tap_queue *q = file->private_data;
+ ssize_t len = iov_iter_count(to), ret;
+
+ ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
+ return ret;
+}
+
+static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
+{
+ struct tap_dev *tap;
+
+ ASSERT_RTNL();
+ tap = rtnl_dereference(q->tap);
+ if (tap)
+ dev_hold(tap->dev);
+
+ return tap;
+}
+
+static void tap_put_tap_dev(struct tap_dev *tap)
+{
+ dev_put(tap->dev);
+}
+
+static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
+{
+ struct tap_queue *q = file->private_data;
+ struct tap_dev *tap;
+ int ret;
+
+ tap = tap_get_tap_dev(q);
+ if (!tap)
+ return -EINVAL;
+
+ if (flags & IFF_ATTACH_QUEUE)
+ ret = tap_enable_queue(tap, file, q);
+ else if (flags & IFF_DETACH_QUEUE)
+ ret = tap_disable_queue(q);
+ else
+ ret = -EINVAL;
+
+ tap_put_tap_dev(tap);
+ return ret;
+}
+
+static int set_offload(struct tap_queue *q, unsigned long arg)
+{
+ struct tap_dev *tap;
+ netdev_features_t features;
+ netdev_features_t feature_mask = 0;
+
+ tap = rtnl_dereference(q->tap);
+ if (!tap)
+ return -ENOLINK;
+
+ features = tap->dev->features;
+
+ if (arg & TUN_F_CSUM) {
+ feature_mask = NETIF_F_HW_CSUM;
+
+ if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
+ if (arg & TUN_F_TSO_ECN)
+ feature_mask |= NETIF_F_TSO_ECN;
+ if (arg & TUN_F_TSO4)
+ feature_mask |= NETIF_F_TSO;
+ if (arg & TUN_F_TSO6)
+ feature_mask |= NETIF_F_TSO6;
+ }
+
+ if (arg & TUN_F_UFO)
+ feature_mask |= NETIF_F_UFO;
+ }
+
+ /* tun/tap driver inverts the usage for TSO offloads, where
+ * setting the TSO bit means that the userspace wants to
+ * accept TSO frames and turning it off means that user space
+ * does not support TSO.
+ * For tap, we have to invert it to mean the same thing.
+ * When user space turns off TSO, we turn off GSO/LRO so that
+ * user-space will not receive TSO frames.
+ */
+ if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+ features |= RX_OFFLOADS;
+ else
+ features &= ~RX_OFFLOADS;
+
+ /* tap_features are the same as features on tun/tap and
+ * reflect user expectations.
+ */
+ tap->tap_features = feature_mask;
+ if (tap->update_features)
+ tap->update_features(tap, features);
+
+ return 0;
+}
+
+/*
+ * provide compatibility with generic tun/tap interface
+ */
+static long tap_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tap_queue *q = file->private_data;
+ struct tap_dev *tap;
+ void __user *argp = (void __user *)arg;
+ struct ifreq __user *ifr = argp;
+ unsigned int __user *up = argp;
+ unsigned short u;
+ int __user *sp = argp;
+ struct sockaddr sa;
+ int s;
+ int ret;
+
+ switch (cmd) {
+ case TUNSETIFF:
+ /* ignore the name, just look at flags */
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+
+ ret = 0;
+ if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
+ ret = -EINVAL;
+ else
+ q->flags = (q->flags & ~TAP_IFFEATURES) | u;
+
+ return ret;
+
+ case TUNGETIFF:
+ rtnl_lock();
+ tap = tap_get_tap_dev(q);
+ if (!tap) {
+ rtnl_unlock();
+ return -ENOLINK;
+ }
+
+ ret = 0;
+ u = q->flags;
+ if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
+ put_user(u, &ifr->ifr_flags))
+ ret = -EFAULT;
+ tap_put_tap_dev(tap);
+ rtnl_unlock();
+ return ret;
+
+ case TUNSETQUEUE:
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+ rtnl_lock();
+ ret = tap_ioctl_set_queue(file, u);
+ rtnl_unlock();
+ return ret;
+
+ case TUNGETFEATURES:
+ if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETSNDBUF:
+ if (get_user(s, sp))
+ return -EFAULT;
+
+ q->sk.sk_sndbuf = s;
+ return 0;
+
+ case TUNGETVNETHDRSZ:
+ s = q->vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ q->vnet_hdr_sz = s;
+ return 0;
+
+ case TUNGETVNETLE:
+ s = !!(q->flags & TAP_VNET_LE);
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETLE:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s)
+ q->flags |= TAP_VNET_LE;
+ else
+ q->flags &= ~TAP_VNET_LE;
+ return 0;
+
+ case TUNGETVNETBE:
+ return tap_get_vnet_be(q, sp);
+
+ case TUNSETVNETBE:
+ return tap_set_vnet_be(q, sp);
+
+ case TUNSETOFFLOAD:
+ /* let the user check for future flags */
+ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+ TUN_F_TSO_ECN | TUN_F_UFO))
+ return -EINVAL;
+
+ rtnl_lock();
+ ret = set_offload(q, arg);
+ rtnl_unlock();
+ return ret;
+
+ case SIOCGIFHWADDR:
+ rtnl_lock();
+ tap = tap_get_tap_dev(q);
+ if (!tap) {
+ rtnl_unlock();
+ return -ENOLINK;
+ }
+ ret = 0;
+ u = tap->dev->type;
+ if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
+ copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
+ put_user(u, &ifr->ifr_hwaddr.sa_family))
+ ret = -EFAULT;
+ tap_put_tap_dev(tap);
+ rtnl_unlock();
+ return ret;
+
+ case SIOCSIFHWADDR:
+ if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+ return -EFAULT;
+ rtnl_lock();
+ tap = tap_get_tap_dev(q);
+ if (!tap) {
+ rtnl_unlock();
+ return -ENOLINK;
+ }
+ ret = dev_set_mac_address(tap->dev, &sa);
+ tap_put_tap_dev(tap);
+ rtnl_unlock();
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long tap_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+const struct file_operations tap_fops = {
+ .owner = THIS_MODULE,
+ .open = tap_open,
+ .release = tap_release,
+ .read_iter = tap_read_iter,
+ .write_iter = tap_write_iter,
+ .poll = tap_poll,
+ .llseek = no_llseek,
+ .unlocked_ioctl = tap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tap_compat_ioctl,
+#endif
+};
+
+static int tap_sendmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len)
+{
+ struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+ return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
+}
+
+static int tap_recvmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len, int flags)
+{
+ struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+static int tap_peek_len(struct socket *sock)
+{
+ struct tap_queue *q = container_of(sock, struct tap_queue,
+ sock);
+ return skb_array_peek_len(&q->skb_array);
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops tap_socket_ops = {
+ .sendmsg = tap_sendmsg,
+ .recvmsg = tap_recvmsg,
+ .peek_len = tap_peek_len,
+};
+
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *tap_get_socket(struct file *file)
+{
+ struct tap_queue *q;
+ if (file->f_op != &tap_fops)
+ return ERR_PTR(-EINVAL);
+ q = file->private_data;
+ if (!q)
+ return ERR_PTR(-EBADFD);
+ return &q->sock;
+}
+EXPORT_SYMBOL_GPL(tap_get_socket);
+
+int tap_queue_resize(struct tap_dev *tap)
+{
+ struct net_device *dev = tap->dev;
+ struct tap_queue *q;
+ struct skb_array **arrays;
+ int n = tap->numqueues;
+ int ret, i = 0;
+
+ arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
+ if (!arrays)
+ return -ENOMEM;
+
+ list_for_each_entry(q, &tap->queue_list, next)
+ arrays[i++] = &q->skb_array;
+
+ ret = skb_array_resize_multiple(arrays, n,
+ dev->tx_queue_len, GFP_KERNEL);
+
+ kfree(arrays);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tap_queue_resize);
+
+static int tap_list_add(dev_t major, const char *device_name)
+{
+ struct major_info *tap_major;
+
+ tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
+ if (!tap_major)
+ return -ENOMEM;
+
+ tap_major->major = MAJOR(major);
+
+ idr_init(&tap_major->minor_idr);
+ mutex_init(&tap_major->minor_lock);
+
+ tap_major->device_name = device_name;
+
+ list_add_tail_rcu(&tap_major->next, &major_list);
+ return 0;
+}
+
+int tap_create_cdev(struct cdev *tap_cdev,
+ dev_t *tap_major, const char *device_name)
+{
+ int err;
+
+ err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
+ if (err)
+ goto out1;
+
+ cdev_init(tap_cdev, &tap_fops);
+ err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
+ if (err)
+ goto out2;
+
+ err = tap_list_add(*tap_major, device_name);
+ if (err)
+ goto out3;
+
+ return 0;
+
+out3:
+ cdev_del(tap_cdev);
+out2:
+ unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
+out1:
+ return err;
+}
+EXPORT_SYMBOL_GPL(tap_create_cdev);
+
+void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
+{
+ struct major_info *tap_major, *tmp;
+
+ cdev_del(tap_cdev);
+ unregister_chrdev_region(major, TAP_NUM_DEVS);
+ list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
+ if (tap_major->major == MAJOR(major)) {
+ idr_destroy(&tap_major->minor_idr);
+ list_del_rcu(&tap_major->next);
+ kfree_rcu(tap_major, rcu);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(tap_destroy_cdev);
+
+MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
+MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bdc58567d10e..4a24b5d15f5a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1798,7 +1798,7 @@ unwind:
return err;
}
-static struct rtnl_link_stats64 *
+static void
team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct team *team = netdev_priv(dev);
@@ -1835,7 +1835,6 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_dropped = rx_dropped;
stats->tx_dropped = tx_dropped;
stats->rx_nohandler = rx_nohandler;
- return stats;
}
static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
@@ -2002,8 +2001,6 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_add_slave = team_add_slave,
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
- .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
- .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
.ndo_change_carrier = team_change_carrier,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cd8e02c94be0..30863e378925 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -218,6 +218,7 @@ struct tun_struct {
struct list_head disabled;
void *security;
u32 flow_count;
+ u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
};
@@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile)
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -953,7 +955,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
tun->align = new_hr;
}
-static struct rtnl_link_stats64 *
+static void
tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
@@ -987,7 +989,6 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_dropped = rx_dropped;
stats->rx_frame_errors = rx_frame_errors;
stats->tx_dropped = tx_dropped;
- return stats;
}
static const struct net_device_ops tun_netdev_ops = {
@@ -1140,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
+ struct sk_buff *skb, int more)
+{
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ struct sk_buff_head process_queue;
+ u32 rx_batched = tun->rx_batched;
+ bool rcv = false;
+
+ if (!rx_batched || (!more && skb_queue_empty(queue))) {
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+ return;
+ }
+
+ spin_lock(&queue->lock);
+ if (!more || skb_queue_len(queue) == rx_batched) {
+ __skb_queue_head_init(&process_queue);
+ skb_queue_splice_tail_init(queue, &process_queue);
+ rcv = true;
+ } else {
+ __skb_queue_tail(queue, skb);
+ }
+ spin_unlock(&queue->lock);
+
+ if (rcv) {
+ struct sk_buff *nskb;
+
+ local_bh_disable();
+ while ((nskb = __skb_dequeue(&process_queue)))
+ netif_receive_skb(nskb);
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
- int noblock)
+ int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -1170,9 +1207,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (tun->flags & IFF_VNET_HDR) {
- if (len < tun->vnet_hdr_sz)
+ int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+ if (len < vnet_hdr_sz)
return -EINVAL;
- len -= tun->vnet_hdr_sz;
+ len -= vnet_hdr_sz;
if (!copy_from_iter_full(&gso, sizeof(gso), from))
return -EFAULT;
@@ -1183,7 +1222,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (tun16_to_cpu(tun, gso.hdr_len) > len)
return -EINVAL;
- iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+ iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
}
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1284,9 +1323,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rxhash = skb_get_hash(skb);
#ifndef CONFIG_4KSTACKS
- local_bh_disable();
- netif_receive_skb(skb);
- local_bh_enable();
+ tun_rx_batched(tun, tfile, skb, more);
#else
netif_rx_ni(skb);
#endif
@@ -1312,7 +1349,8 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!tun)
return -EBADFD;
- result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
+ result = tun_get_user(tun, tfile, NULL, from,
+ file->f_flags & O_NONBLOCK, false);
tun_put(tun);
return result;
@@ -1335,7 +1373,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
vlan_hlen = VLAN_HLEN;
if (tun->flags & IFF_VNET_HDR)
- vnet_hdr_sz = tun->vnet_hdr_sz;
+ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
total = skb->len + vlan_hlen + vnet_hdr_sz;
@@ -1360,7 +1398,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
return -EINVAL;
if (virtio_net_hdr_from_skb(skb, &gso,
- tun_is_little_endian(tun))) {
+ tun_is_little_endian(tun), true)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
@@ -1570,7 +1608,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return -EBADFD;
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
- m->msg_flags & MSG_DONTWAIT);
+ m->msg_flags & MSG_DONTWAIT,
+ m->msg_flags & MSG_MORE);
tun_put(tun);
return ret;
}
@@ -1771,6 +1810,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->align = NET_SKB_PAD;
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
+ tun->rx_batched = 0;
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
if (!tun->pcpu_stats) {
@@ -2439,6 +2479,29 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
#endif
}
+static int tun_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames = tun->rx_batched;
+
+ return 0;
+}
+
+static int tun_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
+ tun->rx_batched = NAPI_POLL_WEIGHT;
+ else
+ tun->rx_batched = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
static const struct ethtool_ops tun_ethtool_ops = {
.get_settings = tun_get_settings,
.get_drvinfo = tun_get_drvinfo,
@@ -2446,6 +2509,8 @@ static const struct ethtool_ops tun_ethtool_ops = {
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = tun_get_coalesce,
+ .set_coalesce = tun_set_coalesce,
};
static int tun_queue_resize(struct tun_struct *tun)
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 3daa41bdd4ea..0acc9b640419 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct net_device *netdev;
struct catc *catc;
u8 broadcast[ETH_ALEN];
- int i, pktsz;
+ int pktsz, ret;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_free;
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
+ u32 *buf;
+ int i;
+
dev_dbg(dev, "Checking memory size\n");
- i = 0x12345678;
- catc_write_mem(catc, 0x7a80, &i, 4);
- i = 0x87654321;
- catc_write_mem(catc, 0xfa80, &i, 4);
- catc_read_mem(catc, 0x7a80, &i, 4);
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ *buf = 0x12345678;
+ catc_write_mem(catc, 0x7a80, buf, 4);
+ *buf = 0x87654321;
+ catc_write_mem(catc, 0xfa80, buf, 4);
+ catc_read_mem(catc, 0x7a80, buf, 4);
- switch (i) {
+ switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
@@ -867,6 +872,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "32k Memory\n");
break;
}
+
+ kfree(buf);
dev_dbg(dev, "Getting MAC from SEEROM.\n");
@@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
- if (register_netdev(netdev) != 0) {
- usb_set_intfdata(intf, NULL);
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -EIO;
- }
+ ret = register_netdev(netdev);
+ if (ret)
+ goto fail_clear_intfdata;
+
return 0;
+
+fail_clear_intfdata:
+ usb_set_intfdata(intf, NULL);
+fail_free:
+ usb_free_urb(catc->ctrl_urb);
+ usb_free_urb(catc->tx_urb);
+ usb_free_urb(catc->rx_urb);
+ usb_free_urb(catc->irq_urb);
+ free_netdev(netdev);
+ return ret;
}
static void catc_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe7b2886cb6b..f5552aaaa77a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -466,7 +466,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* connected. This causes the link state to be incorrect. Work around this by
* always setting the state to off, then on.
*/
-void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
+static void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
{
struct usb_cdc_notification *event;
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
#define NVIDIA_VENDOR_ID 0x0955
+#define HP_VENDOR_ID 0x03f0
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 08f8703e4d54..9889a70ff4f6 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -35,6 +35,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/microchipphy.h>
+#include <linux/phy.h>
#include "lan78xx.h"
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 24e803fe9a53..36674484c6fb 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
{
+ u8 *buf;
int ret;
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
- indx, data, size, 1000);
+ indx, buf, size, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ else if (ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
return ret;
}
-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
+ const void *data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
- indx, data, size, 100);
+ indx, buf, size, 100);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(&data, 1, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
- indx, &data, 1, 1000);
+ indx, buf, 1, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6fe1cdb0174f..24d5272cdce5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HP lt2523 (Novatel E371) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index be418563cb18..986243c932cc 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "6"
+#define NET_VERSION "8"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
u8 checksum = CHECKSUM_NONE;
u32 opts2, opts3;
- if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
+ if (!(tp->netdev->features & NETIF_F_RXCSUM))
goto return_result;
opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
+ else if (!skb_queue_empty(&tp->tx_queue) &&
+ !list_empty(&tp->tx_free))
+ napi_schedule(napi);
}
return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ netif_info(tp, link, netdev, "carrier on\n");
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
napi_enable(&tp->napi);
+ netif_info(tp, link, netdev, "carrier off\n");
}
}
}
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
if (!netif_running(netdev))
return 0;
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) {
- netif_stop_queue(netdev);
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control);
- netif_wake_queue(netdev);
}
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
return 0;
}
@@ -3572,19 +3584,26 @@ static bool delay_autosuspend(struct r8152 *tp)
*/
if (!sw_linking && tp->rtl_ops.in_nway(tp))
return true;
+ else if (!skb_queue_empty(&tp->tx_queue))
+ return true;
else
return false;
}
-static int rtl8152_rumtime_suspend(struct r8152 *tp)
+static int rtl8152_runtime_suspend(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
int ret = 0;
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
u32 rcr = 0;
if (delay_autosuspend(tp)) {
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
@@ -3601,6 +3620,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
if (!(ocp_data & RXFIFO_EMPTY)) {
rxdy_gated_en(tp, false);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
@@ -3620,8 +3641,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
}
}
- set_bit(SELECTIVE_SUSPEND, &tp->flags);
-
out1:
return ret;
}
@@ -3653,7 +3672,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
mutex_lock(&tp->control);
if (PMSG_IS_AUTO(message))
- ret = rtl8152_rumtime_suspend(tp);
+ ret = rtl8152_runtime_suspend(tp);
else
ret = rtl8152_system_suspend(tp);
@@ -3677,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.autosuspend_en(tp, false);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
} else {
tp->rtl_ops.up(tp);
netif_carrier_off(tp->netdev);
@@ -4356,6 +4378,11 @@ static int rtl8152_probe(struct usb_interface *intf,
NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ if (tp->version == RTL_VER_01) {
+ netdev->features &= ~NETIF_F_RXCSUM;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+ }
+
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 95b7bd0d7abc..c81c79110cef 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
*/
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+ indx, 0, buf, size, 500);
+ if (ret > 0 && ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
+ return ret;
}
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
{
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+ indx, 0, buf, size, 500);
+ kfree(buf);
+ return ret;
}
static void async_set_reg_cb(struct urb *urb)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 12071f1582df..d9440bc022f2 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
/* Private data structure */
struct sierra_net_data {
- u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
-
u16 link_up; /* air link up or down */
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
@@ -122,6 +120,7 @@ struct param {
/* LSI Protocol types */
#define SIERRA_NET_PROTOCOL_UMTS 0x01
+#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
/* LSI Coverage */
#define SIERRA_NET_COVERAGE_NONE 0x00
#define SIERRA_NET_COVERAGE_NOPACKET 0x01
@@ -129,7 +128,8 @@ struct param {
/* LSI Session */
#define SIERRA_NET_SESSION_IDLE 0x00
/* LSI Link types */
-#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
struct lsi_umts {
u8 protocol;
@@ -137,9 +137,14 @@ struct lsi_umts {
__be16 length;
/* eventually use a union for the rest - assume umts for now */
u8 coverage;
- u8 unused2[41];
+ u8 network_len; /* network name len */
+ u8 network[40]; /* network name (UCS2, bigendian) */
u8 session_state;
u8 unused3[33];
+} __packed;
+
+struct lsi_umts_single {
+ struct lsi_umts lsi;
u8 link_type;
u8 pdp_addr_len; /* NW-supplied PDP address len */
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@ struct lsi_umts {
u8 reserved[8];
} __packed;
+struct lsi_umts_dual {
+ struct lsi_umts lsi;
+ u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
+ u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
+ u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
+ u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
+ u8 unused4[23];
+ u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
+ u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
+ u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
+ u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
+ u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
+ u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
+ u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
+ u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
+ u8 unused5[68];
+} __packed;
+
#define SIERRA_NET_LSI_COMMON_LEN 4
-#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
+#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
+ (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
/* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata);
@@ -190,10 +216,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
dev->data[0] = (unsigned long)priv;
}
-/* is packet IPv4 */
+/* is packet IPv4/IPv6 */
static inline int is_ip(struct sk_buff *skb)
{
- return skb->protocol == cpu_to_be16(ETH_P_IP);
+ return skb->protocol == cpu_to_be16(ETH_P_IP) ||
+ skb->protocol == cpu_to_be16(ETH_P_IPV6);
}
/*
@@ -349,49 +376,54 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
+ u32 expected_length;
- if (datalen < sizeof(struct lsi_umts)) {
- netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
- __func__, datalen,
- sizeof(struct lsi_umts));
+ if (datalen < sizeof(struct lsi_umts_single)) {
+ netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
+ __func__, datalen, sizeof(struct lsi_umts_single));
return -1;
}
- if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
- netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
- __func__, be16_to_cpu(lsi->length),
- (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
- return -1;
+ /* Validate the session state */
+ if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
+ netdev_err(dev->net, "Session idle, 0x%02x\n",
+ lsi->session_state);
+ return 0;
}
/* Validate the protocol - only support UMTS for now */
- if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
+ if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
+ struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
+
+ /* Validate the link type */
+ if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
+ single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
+ netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+ single->link_type);
+ return -1;
+ }
+ expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
+ } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
+ expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
+ } else {
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
- lsi->protocol);
+ lsi->protocol);
return -1;
}
- /* Validate the link type */
- if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
- netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
- lsi->link_type);
+ if (be16_to_cpu(lsi->length) != expected_length) {
+ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+ __func__, be16_to_cpu(lsi->length), expected_length);
return -1;
}
/* Validate the coverage */
- if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
- || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
+ lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
- /* Validate the session state */
- if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
- netdev_err(dev->net, "Session idle, 0x%02x\n",
- lsi->session_state);
- return 0;
- }
-
/* Set link_sense true */
return 1;
}
@@ -652,7 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
u8 numendpoints;
u16 fwattr = 0;
int status;
- struct ethhdr *eth;
struct sierra_net_data *priv;
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -690,11 +721,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
- /* we will have to manufacture ethernet headers, prepare template */
- eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
- memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
- eth->h_proto = cpu_to_be16(ETH_P_IP);
-
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
/* set context index initially to 0 - prepares tx hdr template */
@@ -824,9 +850,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, hh.hdrlen);
- /* We are going to accept this packet, prepare it */
- memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
- ETH_HLEN);
+ /* We are going to accept this packet, prepare it.
+ * In case protocol is IPv6, keep it, otherwise force IPv4.
+ */
+ skb_reset_mac_header(skb);
+ if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
+ eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
+ eth_zero_addr(eth_hdr(skb)->h_source);
+ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Last packet in batch handled by usbnet */
if (hh.payload_len.word == skb->len)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0520952aa096..8c39d6d690e5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -158,8 +158,8 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
return atomic64_read(&priv->dropped);
}
-static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
+static void veth_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
@@ -177,8 +177,6 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
tot->rx_packets = one.packets;
}
rcu_read_unlock();
-
- return tot;
}
/* fake multicast ability */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a105006ca63..bf95016f442a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -23,12 +23,12 @@
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
-#include <net/busy_poll.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -41,6 +41,11 @@ module_param(gso, bool, 0444);
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
+#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
+
+/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
+#define VIRTIO_XDP_HEADROOM 256
+
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -48,8 +53,16 @@ module_param(gso, bool, 0444);
*/
DECLARE_EWMA(pkt_len, 1, 64)
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -330,105 +343,49 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
-static void virtnet_xdp_xmit(struct virtnet_info *vi,
+static bool virtnet_xdp_xmit(struct virtnet_info *vi,
struct receive_queue *rq,
- struct send_queue *sq,
- struct xdp_buff *xdp,
- void *data)
+ struct xdp_buff *xdp)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
- unsigned int num_sg, len;
+ unsigned int len;
+ struct send_queue *sq;
+ unsigned int qp;
void *xdp_sent;
int err;
+ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+ sq = &vi->sq[qp];
+
/* Free up any pending old buffers before queueing new ones. */
while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- struct page *sent_page = virt_to_head_page(xdp_sent);
-
- put_page(sent_page);
- } else { /* small buffer */
- struct sk_buff *skb = xdp_sent;
+ struct page *sent_page = virt_to_head_page(xdp_sent);
- kfree_skb(skb);
- }
+ put_page(sent_page);
}
- if (vi->mergeable_rx_bufs) {
- /* Zero header and leave csum up to XDP layers */
- hdr = xdp->data;
- memset(hdr, 0, vi->hdr_len);
+ xdp->data -= vi->hdr_len;
+ /* Zero header and leave csum up to XDP layers */
+ hdr = xdp->data;
+ memset(hdr, 0, vi->hdr_len);
- num_sg = 1;
- sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
- } else { /* small buffer */
- struct sk_buff *skb = data;
+ sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
- /* Zero header and leave csum up to XDP layers */
- hdr = skb_vnet_hdr(skb);
- memset(hdr, 0, vi->hdr_len);
-
- num_sg = 2;
- sg_init_table(sq->sg, 2);
- sg_set_buf(sq->sg, hdr, vi->hdr_len);
- skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
- }
- err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
- data, GFP_ATOMIC);
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
if (unlikely(err)) {
- if (vi->mergeable_rx_bufs) {
- struct page *page = virt_to_head_page(xdp->data);
+ struct page *page = virt_to_head_page(xdp->data);
- put_page(page);
- } else /* small buffer */
- kfree_skb(data);
- return; // On error abort to avoid unnecessary kick
+ put_page(page);
+ return false;
}
virtqueue_kick(sq->vq);
+ return true;
}
-static u32 do_xdp_prog(struct virtnet_info *vi,
- struct receive_queue *rq,
- struct bpf_prog *xdp_prog,
- void *data, int len)
+static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
- int hdr_padded_len;
- struct xdp_buff xdp;
- void *buf;
- unsigned int qp;
- u32 act;
-
- if (vi->mergeable_rx_bufs) {
- hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
- xdp.data = data + hdr_padded_len;
- xdp.data_end = xdp.data + (len - vi->hdr_len);
- buf = data;
- } else { /* small buffers */
- struct sk_buff *skb = data;
-
- xdp.data = skb->data;
- xdp.data_end = xdp.data + len;
- buf = skb->data;
- }
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
- switch (act) {
- case XDP_PASS:
- return XDP_PASS;
- case XDP_TX:
- qp = vi->curr_queue_pairs -
- vi->xdp_queue_pairs +
- smp_processor_id();
- xdp.data = buf;
- virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
- return XDP_TX;
- default:
- bpf_warn_invalid_xdp_action(act);
- case XDP_ABORTED:
- case XDP_DROP:
- return XDP_DROP;
- }
+ return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
}
static struct sk_buff *receive_small(struct net_device *dev,
@@ -436,40 +393,72 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct receive_queue *rq,
void *buf, unsigned int len)
{
- struct sk_buff * skb = buf;
+ struct sk_buff *skb;
struct bpf_prog *xdp_prog;
-
+ unsigned int xdp_headroom = virtnet_get_headroom(vi);
+ unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
+ unsigned int headroom = vi->hdr_len + header_offset;
+ unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ unsigned int delta = 0;
len -= vi->hdr_len;
- skb_trim(skb, len);
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
- struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
+ struct xdp_buff xdp;
+ void *orig_data;
u32 act;
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
goto err_xdp;
- act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
+
+ xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
+ xdp.data = xdp.data_hard_start + xdp_headroom;
+ xdp.data_end = xdp.data + len;
+ orig_data = xdp.data;
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
switch (act) {
case XDP_PASS:
+ /* Recalculate length in case bpf program changed it */
+ delta = orig_data - xdp.data;
break;
case XDP_TX:
+ if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ trace_xdp_exception(vi->dev, xdp_prog, act);
rcu_read_unlock();
goto xdp_xmit;
- case XDP_DROP:
default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(vi->dev, xdp_prog, act);
+ case XDP_DROP:
goto err_xdp;
}
}
rcu_read_unlock();
+ skb = build_skb(buf, buflen);
+ if (!skb) {
+ put_page(virt_to_head_page(buf));
+ goto err;
+ }
+ skb_reserve(skb, headroom - delta);
+ skb_put(skb, len + delta);
+ if (!delta) {
+ buf += header_offset;
+ memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
+ } /* keep zeroed vnet hdr since packet was changed by bpf */
+
+err:
return skb;
err_xdp:
rcu_read_unlock();
dev->stats.rx_dropped++;
- kfree_skb(skb);
+ put_page(virt_to_head_page(buf));
xdp_xmit:
return NULL;
}
@@ -512,7 +501,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
unsigned int *len)
{
struct page *page = alloc_page(GFP_ATOMIC);
- unsigned int page_off = 0;
+ unsigned int page_off = VIRTIO_XDP_HEADROOM;
if (!page)
return NULL;
@@ -548,7 +537,8 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
put_page(p);
}
- *len = page_off;
+ /* Headroom does not contribute to packet length */
+ *len = page_off - VIRTIO_XDP_HEADROOM;
return page;
err_buf:
__free_pages(page, 0);
@@ -576,6 +566,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
struct page *xdp_page;
+ struct xdp_buff xdp;
+ void *data;
u32 act;
/* This happens when rx buffer size is underestimated */
@@ -585,7 +577,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
page, offset, &len);
if (!xdp_page)
goto err_xdp;
- offset = 0;
+ offset = VIRTIO_XDP_HEADROOM;
} else {
xdp_page = page;
}
@@ -598,28 +590,47 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
- act = do_xdp_prog(vi, rq, xdp_prog,
- page_address(xdp_page) + offset, len);
+ /* Allow consuming headroom but reserve enough space to push
+ * the descriptor on if we get an XDP_TX return code.
+ */
+ data = page_address(xdp_page) + offset;
+ xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
+ xdp.data = data + vi->hdr_len;
+ xdp.data_end = xdp.data + (len - vi->hdr_len);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
switch (act) {
case XDP_PASS:
+ /* recalculate offset to account for any header
+ * adjustments. Note other cases do not build an
+ * skb and avoid using offset
+ */
+ offset = xdp.data -
+ page_address(xdp_page) - vi->hdr_len;
+
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page,
- 0, len, PAGE_SIZE);
+ offset, len, PAGE_SIZE);
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
return head_skb;
}
break;
case XDP_TX:
+ if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ trace_xdp_exception(vi->dev, xdp_prog, act);
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
if (unlikely(xdp_page != page))
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
- case XDP_DROP:
default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(vi->dev, xdp_prog, act);
+ case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
@@ -706,13 +717,13 @@ xdp_xmit:
return NULL;
}
-static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len)
+static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, unsigned int len)
{
struct net_device *dev = vi->dev;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
+ int ret;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -724,9 +735,9 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
} else if (vi->big_packets) {
give_pages(rq, buf);
} else {
- dev_kfree_skb(buf);
+ put_page(virt_to_head_page(buf));
}
- return;
+ return 0;
}
if (vi->mergeable_rx_bufs)
@@ -737,14 +748,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
skb = receive_small(dev, vi, rq, buf, len);
if (unlikely(!skb))
- return;
+ return 0;
hdr = skb_vnet_hdr(skb);
- u64_stats_update_begin(&stats->rx_syncp);
- stats->rx_bytes += skb->len;
- stats->rx_packets++;
- u64_stats_update_end(&stats->rx_syncp);
+ ret = skb->len;
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -762,34 +770,36 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
ntohs(skb->protocol), skb->len, skb->pkt_type);
napi_gro_receive(&rq->napi, skb);
- return;
+ return ret;
frame_err:
dev->stats.rx_frame_errors++;
dev_kfree_skb(skb);
+ return 0;
}
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
- struct sk_buff *skb;
- struct virtio_net_hdr_mrg_rxbuf *hdr;
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ char *buf;
+ unsigned int xdp_headroom = virtnet_get_headroom(vi);
+ int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
int err;
- skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
- if (unlikely(!skb))
+ len = SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
return -ENOMEM;
- skb_put(skb, GOOD_PACKET_LEN);
-
- hdr = skb_vnet_hdr(skb);
- sg_init_table(rq->sg, 2);
- sg_set_buf(rq->sg, hdr, vi->hdr_len);
- skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
-
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ get_page(alloc_frag->page);
+ alloc_frag->offset += len;
+ sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
+ vi->hdr_len + GOOD_PACKET_LEN);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
if (err < 0)
- dev_kfree_skb(skb);
+ put_page(virt_to_head_page(buf));
return err;
}
@@ -853,24 +863,27 @@ static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
}
-static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
+static int add_recvbuf_mergeable(struct virtnet_info *vi,
+ struct receive_queue *rq, gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
+ unsigned int headroom = virtnet_get_headroom(vi);
char *buf;
unsigned long ctx;
int err;
unsigned int len, hole;
len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
- if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
+ if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp)))
return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ buf += headroom; /* advance address leaving hole at front of pkt */
ctx = mergeable_buf_to_ctx(buf, len);
get_page(alloc_frag->page);
- alloc_frag->offset += len;
+ alloc_frag->offset += len + headroom;
hole = alloc_frag->size - alloc_frag->offset;
- if (hole < len) {
+ if (hole < len + headroom) {
/* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to
* the current buffer. This extra space is not included in
@@ -904,7 +917,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
- err = add_recvbuf_mergeable(rq, gfp);
+ err = add_recvbuf_mergeable(vi, rq, gfp);
else if (vi->big_packets)
err = add_recvbuf_big(vi, rq, gfp);
else
@@ -971,12 +984,13 @@ static void refill_work(struct work_struct *work)
static int virtnet_receive(struct receive_queue *rq, int budget)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
- unsigned int len, received = 0;
+ unsigned int len, received = 0, bytes = 0;
void *buf;
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- receive_buf(vi, rq, buf, len);
+ bytes += receive_buf(vi, rq, buf, len);
received++;
}
@@ -985,6 +999,11 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
schedule_delayed_work(&vi->refill, 0);
}
+ u64_stats_update_begin(&stats->rx_syncp);
+ stats->rx_bytes += bytes;
+ stats->rx_packets += received;
+ u64_stats_update_end(&stats->rx_syncp);
+
return received;
}
@@ -999,53 +1018,17 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
r = virtqueue_enable_cb_prepare(rq->vq);
- napi_complete_done(napi, received);
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- __napi_schedule(napi);
- }
- }
-
- return received;
-}
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int virtnet_busy_poll(struct napi_struct *napi)
-{
- struct receive_queue *rq =
- container_of(napi, struct receive_queue, napi);
- struct virtnet_info *vi = rq->vq->vdev->priv;
- int r, received = 0, budget = 4;
-
- if (!(vi->status & VIRTIO_NET_S_LINK_UP))
- return LL_FLUSH_FAILED;
-
- if (!napi_schedule_prep(napi))
- return LL_FLUSH_BUSY;
-
- virtqueue_disable_cb(rq->vq);
-
-again:
- received += virtnet_receive(rq, budget);
-
- r = virtqueue_enable_cb_prepare(rq->vq);
- clear_bit(NAPI_STATE_SCHED, &napi->state);
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- if (received < budget) {
- budget -= received;
- goto again;
- } else {
- __napi_schedule(napi);
+ if (napi_complete_done(napi, received)) {
+ if (unlikely(virtqueue_poll(rq->vq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(rq->vq);
+ __napi_schedule(napi);
+ }
}
}
return received;
}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
static int virtnet_open(struct net_device *dev)
{
@@ -1069,17 +1052,28 @@ static void free_old_xmit_skbs(struct send_queue *sq)
unsigned int len;
struct virtnet_info *vi = sq->vq->vdev->priv;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- u64_stats_update_begin(&stats->tx_syncp);
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
- u64_stats_update_end(&stats->tx_syncp);
+ bytes += skb->len;
+ packets++;
dev_kfree_skb_any(skb);
}
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&stats->tx_syncp);
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ u64_stats_update_end(&stats->tx_syncp);
}
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
@@ -1104,7 +1098,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
hdr = skb_vnet_hdr(skb);
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev)))
+ virtio_is_little_endian(vi->vdev), false))
BUG();
if (vi->mergeable_rx_bufs)
@@ -1236,10 +1230,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
struct sockaddr *addr;
struct scatterlist sg;
- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+ addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
- memcpy(addr, p, sizeof(*addr));
ret = eth_prepare_mac_addr_change(dev, addr);
if (ret)
@@ -1273,8 +1266,8 @@ out:
return ret;
}
-static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
+static void virtnet_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
{
struct virtnet_info *vi = netdev_priv(dev);
int cpu;
@@ -1307,8 +1300,6 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
-
- return tot;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1331,7 +1322,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
rtnl_unlock();
}
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct scatterlist sg;
struct net_device *dev = vi->dev;
@@ -1357,6 +1348,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
return 0;
}
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+{
+ int err;
+
+ rtnl_lock();
+ err = _virtnet_set_queues(vi, queue_pairs);
+ rtnl_unlock();
+ return err;
+}
+
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -1609,7 +1610,7 @@ static int virtnet_set_channels(struct net_device *dev,
return -EINVAL;
get_online_cpus();
- err = virtnet_set_queues(vi, queue_pairs);
+ err = _virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
@@ -1699,6 +1700,84 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_settings = virtnet_set_settings,
};
+static void virtnet_freeze_down(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+ int i;
+
+ /* Make sure no work handler is accessing the device */
+ flush_work(&vi->config_work);
+
+ netif_device_detach(vi->dev);
+ cancel_delayed_work_sync(&vi->refill);
+
+ if (netif_running(vi->dev)) {
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ napi_disable(&vi->rq[i].napi);
+ }
+}
+
+static int init_vqs(struct virtnet_info *vi);
+static void _remove_vq_common(struct virtnet_info *vi);
+
+static int virtnet_restore_up(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+ int err, i;
+
+ err = init_vqs(vi);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+
+ if (netif_running(vi->dev)) {
+ for (i = 0; i < vi->curr_queue_pairs; i++)
+ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_enable(&vi->rq[i]);
+ }
+
+ netif_device_attach(vi->dev);
+ return err;
+}
+
+static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp)
+{
+ struct virtio_device *dev = vi->vdev;
+ int ret;
+
+ virtio_config_disable(dev);
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+ virtnet_freeze_down(dev);
+ _remove_vq_common(vi);
+
+ dev->config->reset(dev);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+
+ ret = virtio_finalize_features(dev);
+ if (ret)
+ goto err;
+
+ vi->xdp_queue_pairs = xdp_qp;
+ ret = virtnet_restore_up(dev);
+ if (ret)
+ goto err;
+ ret = _virtnet_set_queues(vi, curr_qp);
+ if (ret)
+ goto err;
+
+ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ virtio_config_enable(dev);
+ return 0;
+err:
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ return ret;
+}
+
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
{
unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
@@ -1736,21 +1815,25 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
return -ENOMEM;
}
- err = virtnet_set_queues(vi, curr_qp + xdp_qp);
- if (err) {
- dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
- return err;
- }
-
if (prog) {
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
- if (IS_ERR(prog)) {
- virtnet_set_queues(vi, curr_qp);
+ if (IS_ERR(prog))
return PTR_ERR(prog);
+ }
+
+ /* Changing the headroom in buffers is a disruptive operation because
+ * existing buffers must be flushed and reallocated. This will happen
+ * when a xdp program is initially added or xdp is disabled by removing
+ * the xdp program resulting in number of XDP queues changing.
+ */
+ if (vi->xdp_queue_pairs != xdp_qp) {
+ err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp);
+ if (err) {
+ dev_warn(&dev->dev, "XDP reset failure.\n");
+ goto virtio_reset_err;
}
}
- vi->xdp_queue_pairs = xdp_qp;
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1761,6 +1844,15 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
}
return 0;
+
+virtio_reset_err:
+ /* On reset error do our best to unwind XDP changes inflight and return
+ * error up to user space for resolution. The underlying reset hung on
+ * us so not much we can do here.
+ */
+ if (prog)
+ bpf_prog_sub(prog, vi->max_queue_pairs - 1);
+ return err;
}
static bool virtnet_xdp_query(struct net_device *dev)
@@ -1801,9 +1893,6 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = virtnet_busy_poll,
-#endif
.ndo_xdp = virtnet_xdp,
};
@@ -1864,12 +1953,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
kfree(vi->sq);
}
-static void free_receive_bufs(struct virtnet_info *vi)
+static void _free_receive_bufs(struct virtnet_info *vi)
{
struct bpf_prog *old_prog;
int i;
- rtnl_lock();
for (i = 0; i < vi->max_queue_pairs; i++) {
while (vi->rq[i].pages)
__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
@@ -1879,6 +1967,12 @@ static void free_receive_bufs(struct virtnet_info *vi)
if (old_prog)
bpf_prog_put(old_prog);
}
+}
+
+static void free_receive_bufs(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ _free_receive_bufs(vi);
rtnl_unlock();
}
@@ -1890,7 +1984,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
-static bool is_xdp_queue(struct virtnet_info *vi, int q)
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
@@ -1908,7 +2002,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_queue(vi, i))
+ if (!is_xdp_raw_buffer_queue(vi, i))
dev_kfree_skb(buf);
else
put_page(virt_to_head_page(buf));
@@ -1926,7 +2020,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
} else if (vi->big_packets) {
give_pages(&vi->rq[i], buf);
} else {
- dev_kfree_skb(buf);
+ put_page(virt_to_head_page(buf));
}
}
}
@@ -2313,9 +2407,7 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
- rtnl_lock();
virtnet_set_queues(vi, vi->curr_queue_pairs);
- rtnl_unlock();
/* Assume link up if device can't report link status,
otherwise get link status from config. */
@@ -2347,6 +2439,15 @@ free:
return err;
}
+static void _remove_vq_common(struct virtnet_info *vi)
+{
+ vi->vdev->config->reset(vi->vdev);
+ free_unused_bufs(vi);
+ _free_receive_bufs(vi);
+ free_receive_page_frags(vi);
+ virtnet_del_vqs(vi);
+}
+
static void remove_vq_common(struct virtnet_info *vi)
{
vi->vdev->config->reset(vi->vdev);
@@ -2382,21 +2483,9 @@ static void virtnet_remove(struct virtio_device *vdev)
static int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int i;
virtnet_cpu_notif_remove(vi);
-
- /* Make sure no work handler is accessing the device */
- flush_work(&vi->config_work);
-
- netif_device_detach(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
-
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
- napi_disable(&vi->rq[i].napi);
- }
-
+ virtnet_freeze_down(vdev);
remove_vq_common(vi);
return 0;
@@ -2405,28 +2494,12 @@ static int virtnet_freeze(struct virtio_device *vdev)
static int virtnet_restore(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int err, i;
+ int err;
- err = init_vqs(vi);
+ err = virtnet_restore_up(vdev);
if (err)
return err;
-
- virtio_device_ready(vdev);
-
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
- for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(&vi->rq[i]);
- }
-
- netif_device_attach(vi->dev);
-
- rtnl_lock();
virtnet_set_queues(vi, vi->curr_queue_pairs);
- rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
if (err)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e34b1297c96a..25bc764ae7dc 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, int budget)
rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
if (rxd_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rxd_done);
vmxnet3_enable_all_intrs(rx_queue->adapter);
}
return rxd_done;
@@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rxd_done);
vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
}
return rxd_done;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index aabc6ef366b4..f88ffafebfbf 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -113,7 +113,7 @@ vmxnet3_global_stats[] = {
};
-struct rtnl_link_stats64 *
+void
vmxnet3_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -160,8 +160,6 @@ vmxnet3_get_stats64(struct net_device *netdev,
stats->rx_dropped += drvRxStats->drop_total;
stats->multicast += devRxStats->mcastPktsRxOK;
}
-
- return stats;
}
static int
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 59e077be8829..ba1c9f93592b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -465,8 +465,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
void vmxnet3_set_ethtool_ops(struct net_device *netdev);
-struct rtnl_link_stats64 *
-vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+void vmxnet3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
extern char vmxnet3_driver_name[];
#endif
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 454f907d419a..22379da63400 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -77,8 +77,8 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
kfree_skb(skb);
}
-static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void vrf_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
int i;
@@ -102,7 +102,6 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
stats->rx_bytes += rbytes;
stats->rx_packets += rpkts;
}
- return stats;
}
/* Local traffic destined to local address. Reinsert the packet to rx
@@ -379,7 +378,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
if (!IS_ERR(neigh)) {
- ret = dst_neigh_output(dst, neigh, skb);
+ sock_confirm_neigh(skb, neigh);
+ ret = neigh_output(neigh, skb);
rcu_read_unlock_bh();
return ret;
}
@@ -575,8 +575,10 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
- if (!IS_ERR(neigh))
- ret = dst_neigh_output(dst, neigh, skb);
+ if (!IS_ERR(neigh)) {
+ sock_confirm_neigh(skb, neigh);
+ ret = neigh_output(neigh, skb);
+ }
rcu_read_unlock_bh();
err:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bb70dd5723b5..556953f53437 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -75,6 +75,7 @@ struct vxlan_fdb {
struct list_head remotes;
u8 eth_addr[ETH_ALEN];
u16 state; /* see ndm_state */
+ __be32 vni;
u8 flags; /* see ndm_flags */
};
@@ -302,6 +303,10 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
goto nla_put_failure;
+ if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
+ nla_put_u32(skb, NDA_SRC_VNI,
+ be32_to_cpu(fdb->vni)))
+ goto nla_put_failure;
if (rdst->remote_ifindex &&
nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
goto nla_put_failure;
@@ -400,34 +405,51 @@ static u32 eth_hash(const unsigned char *addr)
return hash_64(value, FDB_HASH_BITS);
}
+static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+{
+ /* use 1 byte of OUI and 3 bytes of NIC */
+ u32 key = get_unaligned((u32 *)(addr + 2));
+
+ return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
+}
+
/* Hash chain to use given mac address */
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
- const u8 *mac)
+ const u8 *mac, __be32 vni)
{
- return &vxlan->fdb_head[eth_hash(mac)];
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA)
+ return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
+ else
+ return &vxlan->fdb_head[eth_hash(mac)];
}
/* Look up Ethernet address in forwarding table */
static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
- const u8 *mac)
+ const u8 *mac, __be32 vni)
{
- struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
+ struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
struct vxlan_fdb *f;
hlist_for_each_entry_rcu(f, head, hlist) {
- if (ether_addr_equal(mac, f->eth_addr))
- return f;
+ if (ether_addr_equal(mac, f->eth_addr)) {
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (vni == f->vni)
+ return f;
+ } else {
+ return f;
+ }
+ }
}
return NULL;
}
static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
- const u8 *mac)
+ const u8 *mac, __be32 vni)
{
struct vxlan_fdb *f;
- f = __vxlan_find_mac(vxlan, mac);
+ f = __vxlan_find_mac(vxlan, mac, vni);
if (f)
f->used = jiffies;
@@ -605,15 +627,15 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
- __be16 port, __be32 vni, __u32 ifindex,
- __u8 ndm_flags)
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u8 ndm_flags)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int notify = 0;
int rc;
- f = __vxlan_find_mac(vxlan, mac);
+ f = __vxlan_find_mac(vxlan, mac, src_vni);
if (f) {
if (flags & NLM_F_EXCL) {
netdev_dbg(vxlan->dev,
@@ -670,6 +692,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
f->state = state;
f->flags = ndm_flags;
f->updated = f->used = jiffies;
+ f->vni = src_vni;
INIT_LIST_HEAD(&f->remotes);
memcpy(f->eth_addr, mac, ETH_ALEN);
@@ -681,7 +704,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac));
+ vxlan_fdb_head(vxlan, mac, src_vni));
}
if (notify) {
@@ -718,8 +741,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
}
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
- union vxlan_addr *ip, __be16 *port, __be32 *vni,
- u32 *ifindex)
+ union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
+ __be32 *vni, u32 *ifindex)
{
struct net *net = dev_net(vxlan->dev);
int err;
@@ -757,6 +780,14 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
*vni = vxlan->default_dst.remote_vni;
}
+ if (tb[NDA_SRC_VNI]) {
+ if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
+ return -EINVAL;
+ *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
+ } else {
+ *src_vni = vxlan->default_dst.remote_vni;
+ }
+
if (tb[NDA_IFINDEX]) {
struct net_device *tdev;
@@ -782,7 +813,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* struct net *net = dev_net(vxlan->dev); */
union vxlan_addr ip;
__be16 port;
- __be32 vni;
+ __be32 src_vni, vni;
u32 ifindex;
int err;
@@ -795,7 +826,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (tb[NDA_DST] == NULL)
return -EINVAL;
- err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
+ err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
if (err)
return err;
@@ -804,36 +835,24 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
- port, vni, ifindex, ndm->ndm_flags);
+ port, src_vni, vni, ifindex, ndm->ndm_flags);
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
-/* Delete entry (via netlink) */
-static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ const unsigned char *addr, union vxlan_addr ip,
+ __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
+ u16 vid)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
- union vxlan_addr ip;
- __be16 port;
- __be32 vni;
- u32 ifindex;
- int err;
-
- err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
- if (err)
- return err;
-
- err = -ENOENT;
+ int err = -ENOENT;
- spin_lock_bh(&vxlan->hash_lock);
- f = vxlan_find_mac(vxlan, addr);
+ f = vxlan_find_mac(vxlan, addr, src_vni);
if (!f)
- goto out;
+ return err;
if (!vxlan_addr_any(&ip)) {
rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
@@ -841,8 +860,6 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
goto out;
}
- err = 0;
-
/* remove a destination if it's not the only one on the list,
* otherwise destroy the fdb entry
*/
@@ -856,6 +873,28 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
vxlan_fdb_destroy(vxlan, f);
out:
+ return 0;
+}
+
+/* Delete entry (via netlink) */
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ union vxlan_addr ip;
+ __be32 src_vni, vni;
+ __be16 port;
+ u32 ifindex;
+ int err;
+
+ err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
+ if (err)
+ return err;
+
+ spin_lock_bh(&vxlan->hash_lock);
+ err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
+ vid);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -901,12 +940,13 @@ out:
* Return true if packet is bogus and should be dropped.
*/
static bool vxlan_snoop(struct net_device *dev,
- union vxlan_addr *src_ip, const u8 *src_mac)
+ union vxlan_addr *src_ip, const u8 *src_mac,
+ __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
- f = vxlan_find_mac(vxlan, src_mac);
+ f = vxlan_find_mac(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
@@ -935,6 +975,7 @@ static bool vxlan_snoop(struct net_device *dev,
NUD_REACHABLE,
NLM_F_EXCL|NLM_F_CREATE,
vxlan->cfg.dst_port,
+ vni,
vxlan->default_dst.remote_vni,
0, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
@@ -1202,7 +1243,7 @@ static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
- struct sk_buff *skb)
+ struct sk_buff *skb, __be32 vni)
{
union vxlan_addr saddr;
@@ -1226,7 +1267,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
}
if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+ vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni))
return false;
return true;
@@ -1268,6 +1309,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
__be16 protocol = htons(ETH_P_TEB);
bool raw_proto = false;
void *oiph;
+ __be32 vni = 0;
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1289,7 +1331,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vs)
goto drop;
- vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
+ vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+
+ vxlan = vxlan_vs_find_vni(vs, vni);
if (!vxlan)
goto drop;
@@ -1307,7 +1351,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
if (vxlan_collect_metadata(vs)) {
- __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
struct metadata_dst *tun_dst;
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
@@ -1345,7 +1388,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
}
if (!raw_proto) {
- if (!vxlan_set_mac(vxlan, vs, skb))
+ if (!vxlan_set_mac(vxlan, vs, skb, vni))
goto drop;
} else {
skb_reset_mac_header(skb);
@@ -1377,7 +1420,7 @@ drop:
return 0;
}
-static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct arphdr *parp;
@@ -1424,7 +1467,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha);
+ f = vxlan_find_mac(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
@@ -1548,12 +1591,12 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
return reply;
}
-static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct nd_msg *msg;
const struct ipv6hdr *iphdr;
- const struct in6_addr *saddr, *daddr;
+ const struct in6_addr *daddr;
struct neighbour *n;
struct inet6_dev *in6_dev;
@@ -1562,7 +1605,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
goto out;
iphdr = ipv6_hdr(skb);
- saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
msg = (struct nd_msg *)skb_transport_header(skb);
@@ -1585,7 +1627,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha);
+ f = vxlan_find_mac(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
@@ -1798,7 +1840,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
- __be32 daddr, __be32 *saddr,
+ __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1824,6 +1866,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
fl4.saddr = *saddr;
+ fl4.fl4_dport = dport;
+ fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
if (likely(!IS_ERR(rt))) {
@@ -1851,6 +1895,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
__be32 label,
const struct in6_addr *daddr,
struct in6_addr *saddr,
+ __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1877,6 +1922,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_dport = dport;
+ fl6.fl6_sport = sport;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
sock6->sock->sk,
@@ -1901,7 +1948,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
- struct vxlan_dev *dst_vxlan)
+ struct vxlan_dev *dst_vxlan, __be32 vni)
{
struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
@@ -1927,7 +1974,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
}
if (dst_vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
+ vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -1946,7 +1993,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *vxlan, union vxlan_addr *daddr,
- __be32 dst_port, __be32 vni, struct dst_entry *dst,
+ __be16 dst_port, __be32 vni, struct dst_entry *dst,
u32 rt_flags)
{
#if IS_ENABLED(CONFIG_IPV6)
@@ -1971,7 +2018,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
return -ENOENT;
}
- vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
return 1;
}
@@ -1979,7 +2026,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
}
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
- struct vxlan_rdst *rdst, bool did_rsc)
+ __be32 default_vni, struct vxlan_rdst *rdst,
+ bool did_rsc)
{
struct dst_cache *dst_cache;
struct ip_tunnel_info *info;
@@ -2006,14 +2054,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (vxlan_addr_any(dst)) {
if (did_rsc) {
/* short-circuited back to local bridge */
- vxlan_encap_bypass(skb, vxlan, vxlan);
+ vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
return;
}
goto drop;
}
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
- vni = rdst->remote_vni;
+ vni = (rdst->remote_vni) ? : default_vni;
src = &vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
md->gbp = skb->mark;
@@ -2068,6 +2116,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
dst->sin.sin_addr.s_addr,
&src->sin.sin_addr.s_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -2104,6 +2153,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
label, &dst->sin6.sin6_addr,
&src->sin6.sin6_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
@@ -2166,23 +2216,29 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
bool did_rsc = false;
struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
+ __be32 vni = 0;
info = skb_tunnel_info(skb);
skb_reset_mac_header(skb);
if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
- if (info && info->mode & IP_TUNNEL_INFO_TX)
- vxlan_xmit_one(skb, dev, NULL, false);
- else
- kfree_skb(skb);
- return NETDEV_TX_OK;
+ if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
+ info->mode & IP_TUNNEL_INFO_TX) {
+ vni = tunnel_id_to_key32(info->key.tun_id);
+ } else {
+ if (info && info->mode & IP_TUNNEL_INFO_TX)
+ vxlan_xmit_one(skb, dev, vni, NULL, false);
+ else
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
}
if (vxlan->flags & VXLAN_F_PROXY) {
eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_ARP)
- return arp_reduce(dev, skb);
+ return arp_reduce(dev, skb, vni);
#if IS_ENABLED(CONFIG_IPV6)
else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
pskb_may_pull(skb, sizeof(struct ipv6hdr)
@@ -2193,13 +2249,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
msg = (struct nd_msg *)skb_transport_header(skb);
if (msg->icmph.icmp6_code == 0 &&
msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
- return neigh_reduce(dev, skb);
+ return neigh_reduce(dev, skb, vni);
}
#endif
}
eth = eth_hdr(skb);
- f = vxlan_find_mac(vxlan, eth->h_dest);
+ f = vxlan_find_mac(vxlan, eth->h_dest, vni);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
@@ -2207,11 +2263,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
- f = vxlan_find_mac(vxlan, eth->h_dest);
+ f = vxlan_find_mac(vxlan, eth->h_dest, vni);
}
if (f == NULL) {
- f = vxlan_find_mac(vxlan, all_zeros_mac);
+ f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f == NULL) {
if ((vxlan->flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
@@ -2232,11 +2288,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
- vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+ vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
}
if (fdst)
- vxlan_xmit_one(skb, dev, fdst, did_rsc);
+ vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
else
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -2261,7 +2317,7 @@ static void vxlan_cleanup(unsigned long arg)
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state & NUD_PERMANENT)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2300,12 +2356,12 @@ static int vxlan_init(struct net_device *dev)
return 0;
}
-static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
+static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
{
struct vxlan_fdb *f;
spin_lock_bh(&vxlan->hash_lock);
- f = __vxlan_find_mac(vxlan, all_zeros_mac);
+ f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
vxlan_fdb_destroy(vxlan, f);
spin_unlock_bh(&vxlan->hash_lock);
@@ -2315,7 +2371,7 @@ static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- vxlan_fdb_delete_default(vxlan);
+ vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
free_percpu(dev->tstats);
}
@@ -2347,7 +2403,7 @@ static int vxlan_open(struct net_device *dev)
}
/* Purge the forwarding table */
-static void vxlan_flush(struct vxlan_dev *vxlan)
+static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
{
unsigned int h;
@@ -2357,6 +2413,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
+ if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
+ continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f);
@@ -2378,7 +2436,7 @@ static int vxlan_stop(struct net_device *dev)
del_timer_sync(&vxlan->age_timer);
- vxlan_flush(vxlan);
+ vxlan_flush(vxlan, false);
vxlan_sock_release(vxlan);
return ret;
@@ -2430,7 +2488,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
- &info->key.u.ipv4.src, NULL, info);
+ &info->key.u.ipv4.src, dport, sport,
+ &info->dst_cache, info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
@@ -2441,7 +2500,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
- &info->key.u.ipv6.src, NULL, info);
+ &info->key.u.ipv6.src, dport, sport,
+ &info->dst_cache, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
@@ -2774,39 +2834,40 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
}
static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
- struct vxlan_config *conf)
+ struct vxlan_config *conf,
+ bool changelink)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
- int err;
bool use_ipv6 = false;
__be16 default_port = vxlan->cfg.dst_port;
struct net_device *lowerdev = NULL;
- if (conf->flags & VXLAN_F_GPE) {
- /* For now, allow GPE only together with COLLECT_METADATA.
- * This can be relaxed later; in such case, the other side
- * of the PtP link will have to be provided.
- */
- if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
- !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
- pr_info("unsupported combination of extensions\n");
- return -EINVAL;
+ if (!changelink) {
+ if (conf->flags & VXLAN_F_GPE) {
+ /* For now, allow GPE only together with
+ * COLLECT_METADATA. This can be relaxed later; in such
+ * case, the other side of the PtP link will have to be
+ * provided.
+ */
+ if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ pr_info("unsupported combination of extensions\n");
+ return -EINVAL;
+ }
+ vxlan_raw_setup(dev);
+ } else {
+ vxlan_ether_setup(dev);
}
- vxlan_raw_setup(dev);
- } else {
- vxlan_ether_setup(dev);
+ /* MTU range: 68 - 65535 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
+ vxlan->net = src_net;
}
- /* MTU range: 68 - 65535 */
- dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = ETH_MAX_MTU;
-
- vxlan->net = src_net;
-
dst->remote_vni = conf->vni;
memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
@@ -2828,12 +2889,14 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- if (conf->remote_ifindex) {
+ if (conf->remote_ifindex &&
+ conf->remote_ifindex != vxlan->cfg.remote_ifindex) {
lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
dst->remote_ifindex = conf->remote_ifindex;
if (!lowerdev) {
- pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
+ pr_info("ifindex %d does not exist\n",
+ dst->remote_ifindex);
return -ENODEV;
}
@@ -2852,7 +2915,8 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
needed_headroom = lowerdev->hard_header_len;
- } else if (vxlan_addr_multicast(&dst->remote_ip)) {
+ } else if (!conf->remote_ifindex &&
+ vxlan_addr_multicast(&dst->remote_ip)) {
pr_info("multicast destination requires interface to be specified\n");
return -EINVAL;
}
@@ -2883,7 +2947,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
memcpy(&vxlan->cfg, conf, sizeof(*conf));
if (!vxlan->cfg.dst_port) {
if (conf->flags & VXLAN_F_GPE)
- vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+ vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
else
vxlan->cfg.dst_port = default_port;
}
@@ -2892,6 +2956,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
if (!vxlan->cfg.age_interval)
vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
+ if (changelink)
+ return 0;
+
list_for_each_entry(tmp, &vn->vxlan_list, next) {
if (tmp->cfg.vni == conf->vni &&
(tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
@@ -2904,146 +2971,296 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
}
}
- dev->ethtool_ops = &vxlan_ethtool_ops;
-
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE|NUD_PERMANENT,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->cfg.dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
- NTF_SELF);
- if (err)
- return err;
- }
-
- err = register_netdevice(dev);
- if (err) {
- vxlan_fdb_delete_default(vxlan);
- return err;
- }
-
- list_add(&vxlan->next, &vn->vxlan_list);
-
return 0;
}
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
+ struct net_device *dev, struct vxlan_config *conf,
+ bool changelink)
{
- struct vxlan_config conf;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ memset(conf, 0, sizeof(*conf));
+
+ /* if changelink operation, start with old existing cfg */
+ if (changelink)
+ memcpy(conf, &vxlan->cfg, sizeof(*conf));
- memset(&conf, 0, sizeof(conf));
+ if (data[IFLA_VXLAN_ID]) {
+ __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
- if (data[IFLA_VXLAN_ID])
- conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
+ if (changelink && (vni != conf->vni))
+ return -EOPNOTSUPP;
+ conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
+ }
if (data[IFLA_VXLAN_GROUP]) {
- conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+ conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
} else if (data[IFLA_VXLAN_GROUP6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
- conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
- conf.remote_ip.sa.sa_family = AF_INET6;
+ conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
+ conf->remote_ip.sa.sa_family = AF_INET6;
}
if (data[IFLA_VXLAN_LOCAL]) {
- conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
- conf.saddr.sa.sa_family = AF_INET;
+ conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
+ conf->saddr.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_LOCAL6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
/* TODO: respect scope id */
- conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
- conf.saddr.sa.sa_family = AF_INET6;
+ conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
+ conf->saddr.sa.sa_family = AF_INET6;
}
if (data[IFLA_VXLAN_LINK])
- conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
+ conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
if (data[IFLA_VXLAN_TOS])
- conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
+ conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
if (data[IFLA_VXLAN_TTL])
- conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+ conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
if (data[IFLA_VXLAN_LABEL])
- conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
+ conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
IPV6_FLOWLABEL_MASK;
- if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
- conf.flags |= VXLAN_F_LEARN;
+ if (data[IFLA_VXLAN_LEARNING]) {
+ if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) {
+ conf->flags |= VXLAN_F_LEARN;
+ } else {
+ conf->flags &= ~VXLAN_F_LEARN;
+ vxlan->flags &= ~VXLAN_F_LEARN;
+ }
+ } else if (!changelink) {
+ /* default to learn on a new device */
+ conf->flags |= VXLAN_F_LEARN;
+ }
- if (data[IFLA_VXLAN_AGEING])
- conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
+ if (data[IFLA_VXLAN_AGEING]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
+ }
- if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
- conf.flags |= VXLAN_F_PROXY;
+ if (data[IFLA_VXLAN_PROXY]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_PROXY]))
+ conf->flags |= VXLAN_F_PROXY;
+ }
- if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
- conf.flags |= VXLAN_F_RSC;
+ if (data[IFLA_VXLAN_RSC]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_RSC]))
+ conf->flags |= VXLAN_F_RSC;
+ }
- if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
- conf.flags |= VXLAN_F_L2MISS;
+ if (data[IFLA_VXLAN_L2MISS]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_L2MISS]))
+ conf->flags |= VXLAN_F_L2MISS;
+ }
- if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
- conf.flags |= VXLAN_F_L3MISS;
+ if (data[IFLA_VXLAN_L3MISS]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_L3MISS]))
+ conf->flags |= VXLAN_F_L3MISS;
+ }
- if (data[IFLA_VXLAN_LIMIT])
- conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+ if (data[IFLA_VXLAN_LIMIT]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+ }
- if (data[IFLA_VXLAN_COLLECT_METADATA] &&
- nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
- conf.flags |= VXLAN_F_COLLECT_METADATA;
+ if (data[IFLA_VXLAN_COLLECT_METADATA]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
+ conf->flags |= VXLAN_F_COLLECT_METADATA;
+ }
if (data[IFLA_VXLAN_PORT_RANGE]) {
- const struct ifla_vxlan_port_range *p
- = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
- conf.port_min = ntohs(p->low);
- conf.port_max = ntohs(p->high);
+ if (!changelink) {
+ const struct ifla_vxlan_port_range *p
+ = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
+ conf->port_min = ntohs(p->low);
+ conf->port_max = ntohs(p->high);
+ } else {
+ return -EOPNOTSUPP;
+ }
}
- if (data[IFLA_VXLAN_PORT])
- conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ if (data[IFLA_VXLAN_PORT]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ }
- if (data[IFLA_VXLAN_UDP_CSUM] &&
- !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
- conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
+ if (data[IFLA_VXLAN_UDP_CSUM]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
+ conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
+ }
- if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
- nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
- conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+ if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
+ conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+ }
- if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
- nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
- conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+ if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
+ conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+ }
- if (data[IFLA_VXLAN_REMCSUM_TX] &&
- nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
- conf.flags |= VXLAN_F_REMCSUM_TX;
+ if (data[IFLA_VXLAN_REMCSUM_TX]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
+ conf->flags |= VXLAN_F_REMCSUM_TX;
+ }
- if (data[IFLA_VXLAN_REMCSUM_RX] &&
- nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
- conf.flags |= VXLAN_F_REMCSUM_RX;
+ if (data[IFLA_VXLAN_REMCSUM_RX]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
+ conf->flags |= VXLAN_F_REMCSUM_RX;
+ }
- if (data[IFLA_VXLAN_GBP])
- conf.flags |= VXLAN_F_GBP;
+ if (data[IFLA_VXLAN_GBP]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ conf->flags |= VXLAN_F_GBP;
+ }
- if (data[IFLA_VXLAN_GPE])
- conf.flags |= VXLAN_F_GPE;
+ if (data[IFLA_VXLAN_GPE]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ conf->flags |= VXLAN_F_GPE;
+ }
- if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
- conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ }
- if (tb[IFLA_MTU])
- conf.mtu = nla_get_u32(tb[IFLA_MTU]);
+ if (tb[IFLA_MTU]) {
+ if (changelink)
+ return -EOPNOTSUPP;
+ conf->mtu = nla_get_u32(tb[IFLA_MTU]);
+ }
- return vxlan_dev_configure(src_net, dev, &conf);
+ return 0;
+}
+
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_config conf;
+ int err;
+
+ err = vxlan_nl2conf(tb, data, dev, &conf, false);
+ if (err)
+ return err;
+
+ err = vxlan_dev_configure(src_net, dev, &conf, false);
+ if (err)
+ return err;
+
+ dev->ethtool_ops = &vxlan_ethtool_ops;
+
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_EXCL | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
+ return err;
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
+
+ return 0;
+}
+
+static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct vxlan_rdst old_dst;
+ struct vxlan_config conf;
+ int err;
+
+ err = vxlan_nl2conf(tb, data,
+ dev, &conf, true);
+ if (err)
+ return err;
+
+ memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
+
+ err = vxlan_dev_configure(vxlan->net, dev, &conf, true);
+ if (err)
+ return err;
+
+ /* handle default dst entry */
+ if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
+ spin_lock_bh(&vxlan->hash_lock);
+ if (!vxlan_addr_any(&old_dst.remote_ip))
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ old_dst.remote_ip,
+ vxlan->cfg.dst_port,
+ old_dst.remote_vni,
+ old_dst.remote_vni,
+ old_dst.remote_ifindex, 0);
+
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &dst->remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_CREATE | NLM_F_APPEND,
+ vxlan->cfg.dst_port,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
+ NTF_SELF);
+ if (err) {
+ spin_unlock_bh(&vxlan->hash_lock);
+ return err;
+ }
+ }
+ spin_unlock_bh(&vxlan->hash_lock);
+ }
+
+ return 0;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -3051,6 +3268,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ vxlan_flush(vxlan, true);
+
spin_lock(&vn->sock_lock);
if (!hlist_unhashed(&vxlan->hlist))
hlist_del_rcu(&vxlan->hlist);
@@ -3197,6 +3416,7 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.setup = vxlan_setup,
.validate = vxlan_validate,
.newlink = vxlan_newlink,
+ .changelink = vxlan_changelink,
.dellink = vxlan_dellink,
.get_size = vxlan_get_size,
.fill_info = vxlan_fill_info,
@@ -3218,7 +3438,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
if (IS_ERR(dev))
return dev;
- err = vxlan_dev_configure(net, dev, conf);
+ err = vxlan_dev_configure(net, dev, conf, false);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index e38ce4da3efb..a5045b5279d7 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
howmany += hdlc_rx_done(priv, budget - howmany);
if (howmany < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, howmany);
qe_setbits32(priv->uccf->p_uccm,
(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
}
@@ -1175,3 +1175,4 @@ static struct platform_driver ucc_hdlc_driver = {
};
module_platform_driver(ucc_hdlc_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 7ef49dab6855..cff0cfadd650 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget)
received = sca_rx_done(port, budget);
if (received < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, received);
enable_intr(port);
}
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 9d9b4e0def2a..1f6bc8791d51 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -241,7 +241,6 @@ static struct spi_driver slic_ds26522_driver = {
.driver = {
.name = "ds26522",
.bus = &spi_bus_type,
- .owner = THIS_MODULE,
.of_match_table = slic_ds26522_match,
},
.probe = slic_ds26522_probe,
@@ -249,15 +248,4 @@ static struct spi_driver slic_ds26522_driver = {
.id_table = slic_ds26522_id,
};
-static int __init slic_ds26522_init(void)
-{
- return spi_register_driver(&slic_ds26522_driver);
-}
-
-static void __exit slic_ds26522_exit(void)
-{
- spi_unregister_driver(&slic_ds26522_driver);
-}
-
-module_init(slic_ds26522_init);
-module_exit(slic_ds26522_exit);
+module_spi_driver(slic_ds26522_driver);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 70ecd82d674d..098c814e22c8 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -413,6 +413,13 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
skb_tail_pointer(newskb),
RX_PKT_SIZE,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(priv->pdev,
+ priv->rx_buffers[entry].mapping)) {
+ priv->rx_buffers[entry].skb = NULL;
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ /* TODO: update rx dropped stats */
+ }
} else {
skb = NULL;
/* TODO: update rx dropped stats */
@@ -1450,6 +1457,12 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
skb_tail_pointer(rx_info->skb),
RX_PKT_SIZE,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
+ dev_kfree_skb(rx_info->skb);
+ rx_info->skb = NULL;
+ break;
+ }
+
desc->buffer1 = cpu_to_le32(rx_info->mapping);
desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
}
@@ -1613,7 +1626,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
}
/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
-static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
+static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
u16 plcp_signal,
size_t hdrlen)
{
@@ -1625,6 +1638,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping))
+ return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
@@ -1657,6 +1672,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
/* Trigger transmit poll */
ADM8211_CSR_WRITE(TDR, 0);
+
+ return 0;
}
/* Put adm8211_tx_hdr on skb and transmit */
@@ -1710,7 +1727,10 @@ static void adm8211_tx(struct ieee80211_hw *dev,
txhdr->retry_limit = info->control.rates[0].count;
- adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
+ if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
+ /* Drop packet */
+ ieee80211_free_txskb(dev, skb);
+ }
}
static int adm8211_alloc_rings(struct ieee80211_hw *dev)
@@ -1843,7 +1863,8 @@ static int adm8211_probe(struct pci_dev *pdev,
priv->rx_ring_size = rx_ring_size;
priv->tx_ring_size = tx_ring_size;
- if (adm8211_alloc_rings(dev)) {
+ err = adm8211_alloc_rings(dev);
+ if (err) {
printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
pci_name(pdev));
goto err_iounmap;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index db1ca629cbd6..b4241cf9b7ed 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -3,6 +3,7 @@ config ATH10K
depends on MAC80211 && HAS_DMA
select ATH_COMMON
select CRC32
+ select WANT_DEV_COREDUMP
---help---
This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets.
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 766c63bf05c4..45226dbee5ce 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+#define QCA4019_SRAM_ADDR 0x000C0000
+#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
+
static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
{
return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
@@ -699,6 +702,25 @@ out:
return ret;
}
+static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+
+ if (region >= QCA4019_SRAM_ADDR && region <=
+ (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
+ /* SRAM contents for QCA4019 can be directly accessed and
+ * no conversions are required
+ */
+ val |= region;
+ } else {
+ val |= 0x100000 | region;
+ }
+
+ return val;
+}
+
static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar;
ar_pci->bus_ops = &ath10k_ahb_bus_ops;
+ ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ret = ath10k_pci_setup_resource(ar);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 0b4d79659884..4045657e0a6e 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -958,10 +958,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- dma_alloc_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr, GFP_KERNEL);
+ dma_zalloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
return ERR_PTR(-ENOMEM);
@@ -969,13 +969,6 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
dest_ring->base_addr_ce_space_unaligned = base_addr;
- /*
- * Correctly initialize memory to 0 to prevent garbage
- * data crashing system when download firmware
- */
- memset(dest_ring->base_addr_owner_space_unaligned, 0,
- nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
-
dest_ring->base_addr_owner_space = PTR_ALIGN(
dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
@@ -1130,3 +1123,42 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
ce_state->src_ring = NULL;
ce_state->dest_ring = NULL;
}
+
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_crash_data ce;
+ u32 addr, id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_err(ar, "Copy Engine register dump:\n");
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ for (id = 0; id < CE_COUNT; id++) {
+ addr = ath10k_ce_base_address(ar, id);
+ ce.base_addr = cpu_to_le32(addr);
+
+ ce.src_wr_idx =
+ cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
+ ce.src_r_idx =
+ cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
+ ce.dst_wr_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
+ ce.dst_r_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
+
+ if (crash_data)
+ crash_data->ce_crash_data[id] = ce;
+
+ ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
+ le32_to_cpu(ce.base_addr),
+ le32_to_cpu(ce.src_wr_idx),
+ le32_to_cpu(ce.src_r_idx),
+ le32_to_cpu(ce.dst_wr_idx),
+ le32_to_cpu(ce.dst_r_idx));
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index dfc098606bee..e76a98242b98 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -20,8 +20,6 @@
#include "hif.h"
-/* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 12
#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
/* Descriptor rings must be aligned to this boundary */
@@ -228,6 +226,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 749e381edd38..dd902b43f8f7 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -349,7 +349,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar,
char *buf,
size_t buf_len)
{
- unsigned int len = 0;
+ size_t len = 0;
int i;
for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
@@ -454,7 +454,10 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
dir = ".";
snprintf(filename, sizeof(filename), "%s/%s", dir, file);
- ret = request_firmware(&fw, filename, ar->dev);
+ ret = request_firmware_direct(&fw, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+
if (ret)
return ERR_PTR(ret);
@@ -694,8 +697,12 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
"boot get otp board id result 0x%08x board_id %d chip_id %d\n",
result, board_id, chip_id);
- if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+ if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
+ (board_id == 0)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "board id does not exist in otp, ignore it\n");
return -EOPNOTSUPP;
+ }
ar->id.bmi_ids_valid = true;
ar->id.bmi_board_id = board_id;
@@ -1091,7 +1098,8 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar)
ar->bd_api = 1;
ret = ath10k_core_fetch_board_data_api_1(ar);
if (ret) {
- ath10k_err(ar, "failed to fetch board data\n");
+ ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n",
+ ar->hw_params.fw.dir);
return ret;
}
@@ -1112,12 +1120,8 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
/* first fetch the firmware file (firmware-*.bin) */
fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
name);
- if (IS_ERR(fw_file->firmware)) {
- ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
- ar->hw_params.fw.dir, name,
- PTR_ERR(fw_file->firmware));
+ if (IS_ERR(fw_file->firmware))
return PTR_ERR(fw_file->firmware);
- }
data = fw_file->firmware->data;
len = fw_file->firmware->size;
@@ -1281,44 +1285,39 @@ err:
return ret;
}
+static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
+ size_t fw_name_len, int fw_api)
+{
+ scnprintf(fw_name, fw_name_len, "%s-%d.bin", ATH10K_FW_FILE_BASE, fw_api);
+}
+
static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
- int ret;
+ int ret, i;
+ char fw_name[100];
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
- ar->fw_api = 5;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) {
+ ar->fw_api = i;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n",
+ ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
- &ar->normal_mode_fw.fw_file);
- if (ret == 0)
- goto success;
-
- ar->fw_api = 4;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
-
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
- &ar->normal_mode_fw.fw_file);
- if (ret == 0)
- goto success;
+ ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api);
+ ret = ath10k_core_fetch_firmware_api_n(ar, fw_name,
+ &ar->normal_mode_fw.fw_file);
+ if (!ret)
+ goto success;
+ }
- ar->fw_api = 3;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ /* we end up here if we couldn't fetch any firmware */
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
- &ar->normal_mode_fw.fw_file);
- if (ret == 0)
- goto success;
-
- ar->fw_api = 2;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d",
+ ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir,
+ ret);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
- &ar->normal_mode_fw.fw_file);
- if (ret)
- return ret;
+ return ret;
success:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
@@ -1510,6 +1509,7 @@ static int ath10k_init_hw_params(struct ath10k *ar)
static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ int ret;
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
@@ -1561,6 +1561,11 @@ static void ath10k_core_restart(struct work_struct *work)
}
mutex_unlock(&ar->conf_mutex);
+
+ ret = ath10k_debug_fw_devcoredump(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
+ ret);
}
static void ath10k_core_set_coverage_class_work(struct work_struct *work)
@@ -1913,7 +1918,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
- if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+ if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
+ mode == ATH10K_FIRMWARE_MODE_NORMAL) {
val = 0;
if (ath10k_peer_stats_enabled(ar))
val = WMI_10_4_PEER_STATS;
@@ -1966,10 +1972,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* possible to implicitly make it correct by creating a dummy vdev and
* then deleting it.
*/
- status = ath10k_core_reset_rx_filter(ar);
- if (status) {
- ath10k_err(ar, "failed to reset rx filter: %d\n", status);
- goto err_hif_stop;
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_core_reset_rx_filter(ar);
+ if (status) {
+ ath10k_err(ar,
+ "failed to reset rx filter: %d\n", status);
+ goto err_hif_stop;
+ }
}
/* If firmware indicates Full Rx Reorder support it must be used in a
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 09ff8b8a6441..757242ef52ac 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -46,7 +46,7 @@
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH10K_NUM_CHANS 39
+#define ATH10K_NUM_CHANS 40
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
@@ -314,6 +314,7 @@ struct ath10k_peer {
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
+ bool removed;
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -419,6 +420,21 @@ struct ath10k_vif_iter {
struct ath10k_vif *arvif;
};
+/* Copy Engine register dump, protected by ce-lock */
+struct ath10k_ce_crash_data {
+ __le32 base_addr;
+ __le32 src_wr_idx;
+ __le32 src_r_idx;
+ __le32 dst_wr_idx;
+ __le32 dst_r_idx;
+};
+
+struct ath10k_ce_crash_hdr {
+ __le32 ce_count;
+ __le32 reserved[3]; /* for future use */
+ struct ath10k_ce_crash_data entries[];
+};
+
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
bool crashed_since_read;
@@ -426,6 +442,7 @@ struct ath10k_fw_crash_data {
uuid_le uuid;
struct timespec timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
+ struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
};
struct ath10k_debug {
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 82a4c67f3672..fb0ade3adb07 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -21,6 +21,7 @@
#include <linux/utsname.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
+#include <linux/devcoredump.h>
#include "core.h"
#include "debug.h"
@@ -40,6 +41,7 @@
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+ ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
ATH10K_FW_CRASH_DUMP_MAX,
};
@@ -235,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
{
struct ath10k *ar = file->private_data;
char *buf;
- unsigned int len = 0, buf_len = 4096;
+ size_t len = 0, buf_len = 4096;
const char *name;
ssize_t ret_cnt;
bool enabled;
@@ -399,6 +401,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
* prevent firmware from DoS-ing the host.
*/
ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
ath10k_warn(ar, "dropping fw peer stats\n");
goto free;
}
@@ -409,10 +412,12 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
goto free;
}
+ if (!list_empty(&stats.peers))
+ list_splice_tail_init(&stats.peers_extd,
+ &ar->debug.fw_stats.peers_extd);
+
list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
- list_splice_tail_init(&stats.peers_extd,
- &ar->debug.fw_stats.peers_extd);
}
complete(&ar->debug.fw_stats_complete);
@@ -524,7 +529,7 @@ static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
const char *buf = file->private_data;
- unsigned int len = strlen(buf);
+ size_t len = strlen(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -542,17 +547,16 @@ static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- int ret, len, buf_len;
+ int ret;
+ size_t len = 0, buf_len = 500;
char *buf;
- buf_len = 500;
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
spin_lock_bh(&ar->data_lock);
- len = 0;
len += scnprintf(buf + len, buf_len - len,
"fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
len += scnprintf(buf + len, buf_len - len,
@@ -691,7 +695,7 @@ static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- unsigned int len;
+ size_t len;
char buf[50];
len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
@@ -721,17 +725,21 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
+static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
+ bool mark_read)
{
struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
+ struct ath10k_ce_crash_hdr *ce_hdr;
struct ath10k_dump_file_data *dump_data;
struct ath10k_tlv_dump_data *dump_tlv;
- int hdr_len = sizeof(*dump_data);
- unsigned int len, sofar = 0;
+ size_t hdr_len = sizeof(*dump_data);
+ size_t len, sofar = 0;
unsigned char *buf;
len = hdr_len;
len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+ len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
sofar += hdr_len;
@@ -790,19 +798,66 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
sizeof(crash_data->registers));
sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
- ar->debug.fw_crash_data->crashed_since_read = false;
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+ ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+ memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+ memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+ ar->debug.fw_crash_data->crashed_since_read = !mark_read;
spin_unlock_bh(&ar->data_lock);
return dump_data;
}
+int ath10k_debug_fw_devcoredump(struct ath10k *ar)
+{
+ struct ath10k_dump_file_data *dump;
+ void *dump_ptr;
+ u32 dump_len;
+
+ /* To keep the dump file available also for debugfs don't mark the
+ * file read, only debugfs should do that.
+ */
+ dump = ath10k_build_dump_file(ar, false);
+ if (!dump) {
+ ath10k_warn(ar, "no crash dump data found for devcoredump");
+ return -ENODATA;
+ }
+
+ /* Make a copy of the dump file for dev_coredumpv() as during the
+ * transition period we need to own the original file. Once
+ * fw_crash_dump debugfs file is removed no need to have a copy
+ * anymore.
+ */
+ dump_len = le32_to_cpu(dump->len);
+ dump_ptr = vzalloc(dump_len);
+
+ if (!dump_ptr)
+ return -ENOMEM;
+
+ memcpy(dump_ptr, dump, dump_len);
+
+ dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
+
+ return 0;
+}
+
static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
{
struct ath10k *ar = inode->i_private;
struct ath10k_dump_file_data *dump;
- dump = ath10k_build_dump_file(ar);
+ ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
+
+ dump = ath10k_build_dump_file(ar, true);
if (!dump)
return -ENODATA;
@@ -844,7 +899,7 @@ static ssize_t ath10k_reg_addr_read(struct file *file,
{
struct ath10k *ar = file->private_data;
u8 buf[32];
- unsigned int len = 0;
+ size_t len = 0;
u32 reg_addr;
mutex_lock(&ar->conf_mutex);
@@ -892,7 +947,7 @@ static ssize_t ath10k_reg_value_read(struct file *file,
{
struct ath10k *ar = file->private_data;
u8 buf[48];
- unsigned int len;
+ size_t len;
u32 reg_addr, reg_val;
int ret;
@@ -1115,7 +1170,7 @@ static ssize_t ath10k_read_htt_stats_mask(struct file *file,
{
struct ath10k *ar = file->private_data;
char buf[32];
- unsigned int len;
+ size_t len;
len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
@@ -1169,7 +1224,7 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
struct ath10k *ar = file->private_data;
char buf[64];
u8 amsdu, ampdu;
- unsigned int len;
+ size_t len;
mutex_lock(&ar->conf_mutex);
@@ -1229,7 +1284,7 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- unsigned int len;
+ size_t len;
char buf[96];
len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
@@ -1555,11 +1610,10 @@ static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- int len = 0;
+ size_t len;
char buf[32];
- len = scnprintf(buf, sizeof(buf) - len, "%d\n",
- ar->ani_enabled);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1584,11 +1638,10 @@ static ssize_t ath10k_read_nf_cal_period(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- unsigned int len;
+ size_t len;
char buf[32];
- len = scnprintf(buf, sizeof(buf), "%d\n",
- ar->debug.nf_cal_period);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1684,9 +1737,10 @@ void ath10k_debug_tpc_stats_process(struct ath10k *ar,
}
static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
- unsigned int j, char *buf, unsigned int *len)
+ unsigned int j, char *buf, size_t *len)
{
- unsigned int i, buf_len;
+ int i;
+ size_t buf_len;
static const char table_str[][5] = { "CDD",
"STBC",
"TXBF" };
@@ -1726,7 +1780,8 @@ static void ath10k_tpc_stats_fill(struct ath10k *ar,
struct ath10k_tpc_stats *tpc_stats,
char *buf)
{
- unsigned int len, j, buf_len;
+ int j;
+ size_t len, buf_len;
len = 0;
buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
@@ -1860,7 +1915,7 @@ static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
const char *buf = file->private_data;
- unsigned int len = strlen(buf);
+ size_t len = strlen(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -2284,7 +2339,7 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- unsigned int len = 0, buf_len = 4096;
+ size_t len = 0, buf_len = 4096;
ssize_t ret_cnt;
char *buf;
@@ -2500,7 +2555,7 @@ void ath10k_dbg_dump(struct ath10k *ar,
const void *buf, size_t len)
{
char linebuf[256];
- unsigned int linebuflen;
+ size_t linebuflen;
const void *ptr;
if (ath10k_debug_mask & mask) {
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 335512b11ca2..2368f47314ae 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -84,6 +84,9 @@ struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+
+int ath10k_debug_fw_devcoredump(struct ath10k *ar);
+
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -166,6 +169,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return 0;
}
+static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
+{
+ return 0;
+}
+
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index fce6f8137d33..7353e7ea88f1 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -306,6 +306,69 @@ static const struct file_operations fops_delba = {
.llseek = default_llseek,
};
+static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[8];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len,
+ "Write 1 to once trigger the debug logs\n");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u8 peer_debug_trigger;
+ int ret;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger))
+ return -EINVAL;
+
+ if (peer_debug_trigger != 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
+ WMI_PEER_DEBUG, peer_debug_trigger);
+ if (ret) {
+ ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return count;
+}
+
+static const struct file_operations fops_peer_debug_trigger = {
+ .open = simple_open,
+ .read = ath10k_dbg_sta_read_peer_debug_trigger,
+ .write = ath10k_dbg_sta_write_peer_debug_trigger,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -314,4 +377,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
+ debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
+ &fops_peer_debug_trigger);
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 175aae38c375..9f6a915f91bf 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -474,33 +474,16 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
}
}
-static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
-{
- struct ath10k_htc_svc_tx_credits *entry;
-
- entry = &htc->service_tx_alloc[0];
-
- /*
- * for PCIE allocate all credists/HTC buffers to WMI.
- * no buffers are used/required for data. data always
- * remains on host.
- */
- entry++;
- entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
- entry->credit_allocation = htc->total_transmit_credits;
-}
-
static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
u16 service_id)
{
u8 allocation = 0;
- int i;
- for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
- if (htc->service_tx_alloc[i].service_id == service_id)
- allocation =
- htc->service_tx_alloc[i].credit_allocation;
- }
+ /* The WMI control service is the only service with flow control.
+ * Let it have all transmit credits.
+ */
+ if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
+ allocation = htc->total_transmit_credits;
return allocation;
}
@@ -574,8 +557,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
return -ECOMM;
}
- ath10k_htc_setup_target_buffer_assignments(htc);
-
/* setup our pseudo HTC control endpoint connection */
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
@@ -726,12 +707,6 @@ setup:
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
- ep->tx_credit_size = htc->target_credit_size;
- ep->tx_credits_per_max_message = ep->max_ep_message_len /
- htc->target_credit_size;
-
- if (ep->max_ep_message_len % htc->target_credit_size)
- ep->tx_credits_per_max_message++;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 0c55cd92a951..6ababa345e2b 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -314,8 +314,6 @@ struct ath10k_htc_ep {
u8 seq_no; /* for debugging */
int tx_credits;
- int tx_credit_size;
- int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
};
@@ -339,7 +337,6 @@ struct ath10k_htc {
struct completion ctl_resp;
int total_transmit_credits;
- struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
int target_credit_size;
};
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 44b25cf00553..90c2f72666b8 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1636,7 +1636,7 @@ struct ath10k_htt {
int size;
/* size - 1 */
- unsigned size_mask;
+ unsigned int size_mask;
/* how many rx buffers to keep in the ring */
int fill_level;
@@ -1657,7 +1657,7 @@ struct ath10k_htt {
/* where HTT SW has processed bufs filled by rx MAC DMA */
struct {
- unsigned msdu_payld;
+ unsigned int msdu_payld;
} sw_rd_idx;
/*
@@ -1820,7 +1820,7 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
-int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
int ath10k_htt_tx(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 86d082cf4eef..02a3fc81fbe3 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -702,6 +702,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
/* 80MHZ */
case 2:
status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ break;
+ case 3:
+ status->vht_flag |= RX_VHT_FLAG_160MHZ;
+ break;
}
status->flag |= RX_FLAG_VHT;
@@ -926,7 +930,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -940,6 +944,7 @@ static void ath10k_process_rx(struct ath10k *ar,
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+ status->vht_flag & RX_VHT_FLAG_160MHZ ? "160" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
@@ -2231,6 +2236,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
return;
}
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
@@ -2245,7 +2252,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
rate *= 10;
if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
rate = rate - 5;
- arsta->txrate.legacy = rate * 10;
+ arsta->txrate.legacy = rate;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
arsta->txrate.mcs = txrate.mcs;
@@ -2451,8 +2458,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
u32 freq = __le32_to_cpu(resp->chan_change.freq);
- ar->tgt_oper_chan =
- __ieee80211_get_channel(ar->hw->wiphy, freq);
+ ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt chan change freq %u phymode %s\n",
freq, ath10k_wmi_phymode_str(phymode));
@@ -2486,7 +2492,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
- };
+ }
return true;
}
EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 27e49db4287a..86b427f5e2bc 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -239,6 +239,7 @@ static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
+ htt->txbuf.vaddr = NULL;
}
static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
@@ -268,6 +269,7 @@ static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
size,
htt->frag_desc.vaddr,
htt->frag_desc.paddr);
+ htt->frag_desc.vaddr = NULL;
}
static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 883547f3347c..f0fda0f2b3b4 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+#define ATH10K_FW_FILE_BASE "firmware"
+#define ATH10K_FW_API_MAX 5
+#define ATH10K_FW_API_MIN 2
+
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
@@ -512,7 +516,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
/* Target specific defines for WMI-TLV firmware */
#define TARGET_TLV_NUM_VDEVS 4
#define TARGET_TLV_NUM_STATIONS 32
-#define TARGET_TLV_NUM_PEERS 35
+#define TARGET_TLV_NUM_PEERS 33
#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
@@ -578,6 +582,9 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_10_4_IPHDR_PAD_CONFIG 1
#define TARGET_10_4_QWRAP_CONFIG 0
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 12
+
/* Number of Copy Engines supported */
#define CE_COUNT ar->hw_values->ce_count
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index aa545a1dbdc7..3029f257a19a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -569,10 +569,14 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
case NL80211_CHAN_WIDTH_80:
phymode = MODE_11AC_VHT80;
break;
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_11AC_VHT160;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ phymode = MODE_11AC_VHT80_80;
+ break;
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_160:
phymode = MODE_UNKNOWN;
break;
}
@@ -971,6 +975,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
/* TODO setup this dynamically, what in case we
don't have any vifs? */
@@ -1227,6 +1232,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar)
return ath10k_monitor_stop(ar);
}
+static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!arvif->is_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->protection_mode;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
+ arvif->vdev_id, arvif->use_cts_prot);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->use_cts_prot ? 1 : 0);
+}
+
static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
@@ -1245,6 +1280,9 @@ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
WMI_RTSCTS_PROFILE);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
rts_cts);
}
@@ -1384,6 +1422,7 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
arg.channel.freq = chandef->chan->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
arg.channel.mode = chan_to_phymode(chandef);
arg.channel.min_power = 0;
@@ -1954,7 +1993,7 @@ static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
{
struct sk_buff *skb = data;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
if (vif->type != NL80211_IFTYPE_STATION)
return;
@@ -1977,7 +2016,7 @@ static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
u32 *vdev_id = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k *ar = arvif->ar;
struct ieee80211_hw *hw = ar->hw;
@@ -2044,7 +2083,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
u32 aid;
lockdep_assert_held(&ar->conf_mutex);
@@ -2120,7 +2159,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
@@ -2183,7 +2222,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -2407,7 +2446,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@@ -2447,6 +2486,9 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->peer_flags |= ar->wmi.peer_flags->bw80;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->peer_flags |= ar->wmi.peer_flags->bw160;
+
arg->peer_vht_rates.rx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->peer_vht_rates.rx_mcs_set =
@@ -2465,7 +2507,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
@@ -2500,12 +2542,39 @@ static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
+static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -2546,12 +2615,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
*/
if (sta->vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
- phymode = MODE_11AC_VHT80;
- else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
- phymode = MODE_11AC_VHT40;
- else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
- phymode = MODE_11AC_VHT20;
+ phymode = ath10k_mac_get_phymode_vht(ar, sta);
} else if (sta->ht_cap.ht_supported &&
!ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
@@ -2625,7 +2689,7 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta_vht_cap vht_cap)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
int ret;
u32 param;
u32 value;
@@ -2692,7 +2756,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
struct wmi_peer_assoc_complete_arg peer_arg;
@@ -2785,7 +2849,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ieee80211_sta_vht_cap vht_cap = {};
int ret;
@@ -2818,7 +2882,7 @@ static int ath10k_station_assoc(struct ath10k *ar,
struct ieee80211_sta *sta,
bool reassoc)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
@@ -2885,7 +2949,7 @@ static int ath10k_station_disassoc(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
@@ -3111,7 +3175,7 @@ static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k *ar = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
if (arvif->tx_paused)
return;
@@ -3198,7 +3262,7 @@ struct ath10k_mac_tx_pause {
static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_mac_tx_pause *arg = data;
if (arvif->vdev_id != arg->vdev_id)
@@ -3294,7 +3358,7 @@ static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
return false;
if (vif)
- return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+ return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
return true;
}
@@ -3359,7 +3423,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
/* This is case only for P2P_GO */
if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
@@ -3495,7 +3559,6 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
*/
static int ath10k_mac_tx(struct ath10k *ar,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
enum ath10k_hw_txrx_mode txmode,
enum ath10k_mac_tx_path txpath,
struct sk_buff *skb)
@@ -3637,7 +3700,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
if (ret) {
ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
ret);
@@ -3742,6 +3805,9 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
if (!peer)
return NULL;
+ if (peer->removed)
+ return NULL;
+
if (peer->sta)
return peer->sta->txq[tid];
else if (peer->vif)
@@ -3824,7 +3890,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
if (unlikely(ret)) {
ath10k_warn(ar, "failed to push frame: %d\n", ret);
@@ -4105,7 +4171,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
if (ret) {
ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
if (is_htt) {
@@ -4279,6 +4345,13 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
vht_cap.cap |= val;
}
+ /* Currently the firmware seems to be buggy, don't enable 80+80
+ * mode until that's resolved.
+ */
+ if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
+ !(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+ vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+
mcs_map = 0;
for (i = 0; i < 8; i++) {
if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
@@ -4669,7 +4742,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
- WARN_ON(arvif->txpower < 0);
+ if (arvif->txpower <= 0)
+ continue;
if (txpower == -1)
txpower = arvif->txpower;
@@ -4677,8 +4751,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
txpower = min(txpower, arvif->txpower);
}
- if (WARN_ON(txpower == -1))
- return -EINVAL;
+ if (txpower == -1)
+ return 0;
ret = ath10k_mac_txpower_setup(ar, txpower);
if (ret) {
@@ -4775,7 +4849,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
enum wmi_sta_powersave_param param;
int ret = 0;
@@ -5111,7 +5185,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
int ret;
int i;
@@ -5194,6 +5268,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
}
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+
spin_lock_bh(&ar->htt.tx_lock);
ath10k_mac_vif_tx_unlock_all(arvif);
spin_unlock_bh(&ar->htt.tx_lock);
@@ -5242,7 +5320,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
u32 changed)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
int ret = 0;
u32 vdev_param, pdev_param, slottime, preamble;
@@ -5328,20 +5406,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
arvif->use_cts_prot = info->use_cts_prot;
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
- arvif->vdev_id, info->use_cts_prot);
ret = ath10k_recalc_rtscts_prot(arvif);
if (ret)
ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
- vdev_param = ar->wmi.vdev_param->protection_mode;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- info->use_cts_prot ? 1 : 0);
- if (ret)
- ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
- info->use_cts_prot, arvif->vdev_id, ret);
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -5436,7 +5512,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_scan_request *req = &hw_req->req;
struct wmi_start_scan_arg arg;
int ret = 0;
@@ -5568,7 +5644,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
const u8 *peer_addr;
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
@@ -5707,7 +5783,7 @@ static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
int keyidx)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
int ret;
mutex_lock(&arvif->ar->conf_mutex);
@@ -5888,7 +5964,7 @@ static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
int *num_tdls_vifs = data;
if (vif->type != NL80211_IFTYPE_STATION)
@@ -5916,7 +5992,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k_peer *peer;
int ret = 0;
@@ -6151,7 +6227,7 @@ exit:
static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
u16 ac, bool enable)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct wmi_sta_uapsd_auto_trig_arg arg = {};
u32 prio = 0, acc = 0;
u32 value = 0;
@@ -6259,7 +6335,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct wmi_wmm_params_arg *p = NULL;
int ret;
@@ -6333,7 +6409,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
enum ieee80211_roc_type type)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct wmi_start_scan_arg arg;
int ret = 0;
u32 scan_time_msec;
@@ -6833,7 +6909,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
struct ath10k *ar = arvif->ar;
enum nl80211_band band;
@@ -6934,6 +7010,9 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
sta->bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
@@ -6981,7 +7060,7 @@ static void ath10k_offset_tsf(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, s64 tsf_offset)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
u32 offset, vdev_param;
int ret;
@@ -7006,7 +7085,7 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_ampdu_params *params)
{
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
@@ -7104,7 +7183,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
ath10k_monitor_stop(ar);
for (i = 0; i < n_vifs; i++) {
- arvif = ath10k_vif_to_arvif(vifs[i].vif);
+ arvif = (void *)vifs[i].vif->drv_priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
@@ -7137,7 +7216,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
spin_unlock_bh(&ar->data_lock);
for (i = 0; i < n_vifs; i++) {
- arvif = ath10k_vif_to_arvif(vifs[i].vif);
+ arvif = (void *)vifs[i].vif->drv_priv;
if (WARN_ON(!arvif->is_started))
continue;
@@ -7364,6 +7443,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_up = true;
}
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7434,6 +7520,20 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
return 0;
}
+static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar;
+ struct ath10k_peer *peer;
+
+ ar = hw->priv;
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (peer->sta == sta)
+ peer->removed = true;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_mac_op_tx,
.wake_tx_queue = ath10k_mac_op_wake_tx_queue,
@@ -7474,6 +7574,7 @@ static const struct ieee80211_ops ath10k_ops = {
.assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
.unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
+ .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
@@ -7548,6 +7649,7 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(157, 5785, 0),
CHAN5G(161, 5805, 0),
CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
};
struct ath10k *ath10k_mac_create(size_t priv_size)
@@ -7771,7 +7873,7 @@ static void ath10k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k_vif_iter *arvif_iter = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
if (arvif->vdev_id == arvif_iter->vdev_id)
arvif_iter->arvif = arvif;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 1bd29ecfcdcc..553747bc19ed 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -83,17 +83,12 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
u8 tid);
int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
-static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
-{
- return (struct ath10k_vif *)vif->drv_priv;
-}
-
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
if (arvif->tx_seq_no == 0)
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
index c0b6ffaf3ec1..7e621ee194e3 100644
--- a/drivers/net/wireless/ath/ath10k/p2p.c
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -132,7 +132,7 @@ struct ath10k_p2p_noa_arg {
static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_p2p_noa_arg *arg = data;
if (arvif->vdev_id != arg->vdev_id)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index b541a1c74488..6094372307aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -840,31 +840,35 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar);
}
-static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
- u32 val = 0;
+ u32 val = 0, region = addr & 0xfffff;
- switch (ar->hw_rev) {
- case ATH10K_HW_QCA988X:
- case ATH10K_HW_QCA9887:
- case ATH10K_HW_QCA6174:
- case ATH10K_HW_QCA9377:
- val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CORE_CTRL_ADDRESS) &
- 0x7ff) << 21;
- break;
- case ATH10K_HW_QCA9888:
- case ATH10K_HW_QCA99X0:
- case ATH10K_HW_QCA9984:
- case ATH10K_HW_QCA4019:
- val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
- break;
- }
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= 0x100000 | region;
+ return val;
+}
- val |= 0x100000 | (addr & 0xfffff);
+static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ val |= 0x100000 | region;
return val;
}
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
+ return -ENOTSUPP;
+
+ return ar_pci->targ_cpu_to_ce_addr(ar, addr);
+}
+
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
@@ -896,7 +900,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
*/
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
- data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
@@ -905,7 +909,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
ret = -ENOMEM;
goto done;
}
- memset(data_buf, 0, alloc_nbytes);
remaining_bytes = nbytes;
ce_data = ce_data_base;
@@ -1474,6 +1477,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
ath10k_print_driver_info(ar);
ath10k_pci_dump_registers(ar, crash_data);
+ ath10k_ce_dump_registers(ar, crash_data);
spin_unlock_bh(&ar->data_lock);
@@ -1590,7 +1594,7 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI.
*/
- break;
+ break;
}
}
@@ -1647,6 +1651,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+ napi_enable(&ar->napi);
+
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
@@ -1937,7 +1943,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
{
u32 addr, val;
- addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+ addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
val = ath10k_pci_read32(ar, addr);
val |= CORE_CTRL_CPU_INTR_MASK;
ath10k_pci_write32(ar, addr, val);
@@ -1973,7 +1979,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
}
break;
case QCA9377_1_0_DEVICE_ID:
- return 2;
+ return 4;
}
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
@@ -2531,7 +2537,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
- napi_enable(&ar->napi);
return 0;
@@ -2799,7 +2804,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
done = ath10k_htt_txrx_compl_task(ar, budget);
if (done < budget) {
- napi_complete(ctx);
+ napi_complete_done(ctx, done);
/* In case of MSI, it is possible that interrupts are received
* while NAPI poll is inprogress. So pending interrupts that are
* received after processing all copy engine pipes by NAPI poll
@@ -3132,7 +3137,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
- if (QCA_REV_6174(ar))
+ if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
ath10k_pci_override_ce_config(ar);
ret = ath10k_pci_alloc_pipes(ar);
@@ -3170,6 +3175,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
@@ -3177,12 +3183,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA9887_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9887;
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
@@ -3190,30 +3198,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9984_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9984;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9888_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9888;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@@ -3240,6 +3253,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->bus_ops = &ath10k_pci_bus_ops;
ar_pci->pci_soft_reset = pci_soft_reset;
ar_pci->pci_hard_reset = pci_hard_reset;
+ ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 9854ad56b2de..c1e08ad63940 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -25,11 +25,6 @@
#include "ahb.h"
/*
- * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
- */
-#define DIAG_TRANSFER_LIMIT 2048
-
-/*
* maximum number of bytes that can be
* handled atomically by DiagRead/DiagWrite
*/
@@ -238,6 +233,11 @@ struct ath10k_pci {
/* Chip specific pci full reset function */
int (*pci_hard_reset)(struct ath10k *ar);
+ /* chip specific methods for converting target CPU virtual address
+ * space to CE address space
+ */
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 2ffc1fe4923b..c061d6958bd1 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -278,7 +278,7 @@ static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
{
struct ath10k *ar = file->private_data;
char *mode = "";
- unsigned int len;
+ size_t len;
enum ath10k_spectral_mode spectral_mode;
mutex_lock(&ar->conf_mutex);
@@ -370,7 +370,7 @@ static ssize_t read_file_spectral_count(struct file *file,
{
struct ath10k *ar = file->private_data;
char buf[32];
- unsigned int len;
+ size_t len;
u8 spectral_count;
mutex_lock(&ar->conf_mutex);
@@ -422,7 +422,8 @@ static ssize_t read_file_spectral_bins(struct file *file,
{
struct ath10k *ar = file->private_data;
char buf[32];
- unsigned int len, bins, fft_size, bin_scale;
+ unsigned int bins, fft_size, bin_scale;
+ size_t len;
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index ed85f938e3c0..8bb36c18a749 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -150,7 +150,10 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
- ret = request_firmware(&fw_file->firmware, filename, ar->dev);
+ ret = request_firmware_direct(&fw_file->firmware, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode fw request '%s': %d\n",
+ filename, ret);
+
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index f304f6632c4f..f9188027a6f6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct ath10k_fw_stats_pdev *dst;
src = data;
- if (data_len < sizeof(*src))
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
return -EPROTO;
+ }
data += sizeof(*src);
data_len -= sizeof(*src);
@@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct ath10k_fw_stats_vdev *dst;
src = data;
- if (data_len < sizeof(*src))
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
return -EPROTO;
+ }
data += sizeof(*src);
data_len -= sizeof(*src);
@@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct ath10k_fw_stats_peer *dst;
src = data;
- if (data_len < sizeof(*src))
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
return -EPROTO;
+ }
data += sizeof(*src);
data_len -= sizeof(*src);
@@ -3631,6 +3637,7 @@ static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
.vht = WMI_TLV_PEER_VHT,
.bw80 = WMI_TLV_PEER_80MHZ,
.pmf = WMI_TLV_PEER_PMF,
+ .bw160 = WMI_TLV_PEER_160MHZ,
};
/************/
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index b8aa6000573c..22cf011e839a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -543,6 +543,7 @@ enum wmi_tlv_peer_flags {
WMI_TLV_PEER_VHT = 0x02000000,
WMI_TLV_PEER_80MHZ = 0x04000000,
WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_TLV_PEER_160MHZ = 0x20000000,
};
enum wmi_tlv_tag {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 50d6ee6afe26..2f1743e60fa1 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -28,6 +28,7 @@
#include "wmi-ops.h"
#include "p2p.h"
#include "hw.h"
+#include "hif.h"
#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
@@ -1574,6 +1575,7 @@ static const struct wmi_peer_flags_map wmi_peer_flags_map = {
.bw80 = WMI_PEER_80MHZ,
.vht_2g = WMI_PEER_VHT_2G,
.pmf = WMI_PEER_PMF,
+ .bw160 = WMI_PEER_160MHZ,
};
static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
@@ -1591,6 +1593,7 @@ static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
.spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
.vht = WMI_10X_PEER_VHT,
.bw80 = WMI_10X_PEER_80MHZ,
+ .bw160 = WMI_10X_PEER_160MHZ,
};
static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
@@ -1610,6 +1613,7 @@ static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
.bw80 = WMI_10_2_PEER_80MHZ,
.vht_2g = WMI_10_2_PEER_VHT_2G,
.pmf = WMI_10_2_PEER_PMF,
+ .bw160 = WMI_10_2_PEER_160MHZ,
};
void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1634,7 +1638,10 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
ch->mhz = __cpu_to_le32(arg->freq);
ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
- ch->band_center_freq2 = 0;
+ if (arg->mode == MODE_11AC_VHT80_80)
+ ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
+ else
+ ch->band_center_freq2 = 0;
ch->min_power = arg->min_power;
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
@@ -1772,7 +1779,7 @@ unlock:
static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
ath10k_wmi_tx_beacon_nowait(arvif);
}
@@ -2319,7 +2326,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
*/
if (channel >= 1 && channel <= 14) {
status->band = NL80211_BAND_2GHZ;
- } else if (channel >= 36 && channel <= 165) {
+ } else if (channel >= 36 && channel <= 169) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 5d3dff95b2e5..386aa51435f1 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -75,7 +75,7 @@ struct wmi_cmd_hdr {
/*
* There is no signed version of __le32, so for a temporary solution come
- * up with our own version. The idea is from fs/ntfs/types.h.
+ * up with our own version. The idea is from fs/ntfs/endian.h.
*
* Use a_ prefix so that it doesn't conflict if we get proper support to
* linux/types.h.
@@ -1728,8 +1728,10 @@ enum wmi_phy_mode {
MODE_11AC_VHT20_2G = 11,
MODE_11AC_VHT40_2G = 12,
MODE_11AC_VHT80_2G = 13,
- MODE_UNKNOWN = 14,
- MODE_MAX = 14
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_UNKNOWN = 16,
+ MODE_MAX = 16
};
static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
@@ -1757,6 +1759,10 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
return "11ac-vht40";
case MODE_11AC_VHT80:
return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
case MODE_11AC_VHT20_2G:
return "11ac-vht20-2g";
case MODE_11AC_VHT40_2G:
@@ -1811,6 +1817,7 @@ struct wmi_channel {
struct wmi_channel_arg {
u32 freq;
u32 band_center_freq1;
+ u32 band_center_freq2;
bool passive;
bool allow_ibss;
bool allow_ht;
@@ -1875,9 +1882,18 @@ enum wmi_channel_change_cause {
#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
#define WMI_VHT_CAP_RX_LDPC 0x00000010
#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
#define WMI_VHT_CAP_TX_STBC 0x00000080
#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23
#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
@@ -1926,6 +1942,8 @@ enum {
REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */
REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */
REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */
+ REGDMN_MODE_11AC_VHT160 = 0x200000, /* 5Ghz, VHT160 channels */
+ REGDMN_MODE_11AC_VHT80_80 = 0x400000, /* 5Ghz, VHT80+80 channels */
REGDMN_MODE_ALL = 0xffffffff
};
@@ -5783,6 +5801,7 @@ enum wmi_peer_chwidth {
WMI_PEER_CHWIDTH_20MHZ = 0,
WMI_PEER_CHWIDTH_40MHZ = 1,
WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
};
enum wmi_peer_param {
@@ -5792,6 +5811,7 @@ enum wmi_peer_param {
WMI_PEER_CHAN_WIDTH = 0x4,
WMI_PEER_NSS = 0x5,
WMI_PEER_USE_4ADDR = 0x6,
+ WMI_PEER_DEBUG = 0xa,
WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
};
@@ -5873,6 +5893,7 @@ struct wmi_peer_flags_map {
u32 bw80;
u32 vht_2g;
u32 pmf;
+ u32 bw160;
};
enum wmi_peer_flags {
@@ -5892,6 +5913,7 @@ enum wmi_peer_flags {
WMI_PEER_80MHZ = 0x04000000,
WMI_PEER_VHT_2G = 0x08000000,
WMI_PEER_PMF = 0x10000000,
+ WMI_PEER_160MHZ = 0x20000000
};
enum wmi_10x_peer_flags {
@@ -5909,6 +5931,7 @@ enum wmi_10x_peer_flags {
WMI_10X_PEER_SPATIAL_MUX = 0x00200000,
WMI_10X_PEER_VHT = 0x02000000,
WMI_10X_PEER_80MHZ = 0x04000000,
+ WMI_10X_PEER_160MHZ = 0x20000000
};
enum wmi_10_2_peer_flags {
@@ -5928,6 +5951,7 @@ enum wmi_10_2_peer_flags {
WMI_10_2_PEER_80MHZ = 0x04000000,
WMI_10_2_PEER_VHT_2G = 0x08000000,
WMI_10_2_PEER_PMF = 0x10000000,
+ WMI_10_2_PEER_160MHZ = 0x20000000
};
/*
@@ -6581,7 +6605,7 @@ struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
u32 cmd_id);
-void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg);
void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
struct ath10k_fw_stats_pdev *dst);
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 2ca88b593e4c..c0794f5988b3 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -16,10 +16,10 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/module.h>
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
-#include <linux/export.h>
#include <ath25_platform.h>
#include "ath5k.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index dc44cfef7517..16e052d02c94 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -502,8 +502,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
return -EOPNOTSUPP;
default:
- WARN_ON(1);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
mutex_lock(&ah->lock);
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index ac25f1781b42..87e99c12d4ba 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -641,7 +641,6 @@ struct ath6kl_vif {
u32 txe_intvl;
u16 bg_scan_period;
u8 assoc_bss_dtim_period;
- struct net_device_stats net_stats;
struct target_stats target_stats;
struct wmi_connect_cmd profile;
u16 rsn_capab;
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 1af3fed5a72c..91ee542de3d7 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -1113,13 +1113,6 @@ static int ath6kl_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
-{
- struct ath6kl_vif *vif = netdev_priv(dev);
-
- return &vif->net_stats;
-}
-
static int ath6kl_set_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1285,7 +1278,6 @@ static const struct net_device_ops ath6kl_netdev_ops = {
.ndo_open = ath6kl_open,
.ndo_stop = ath6kl_close,
.ndo_start_xmit = ath6kl_data_tx,
- .ndo_get_stats = ath6kl_get_stats,
.ndo_set_features = ath6kl_set_features,
.ndo_set_rx_mode = ath6kl_set_multicast_list,
};
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 8ec66e74d06d..2195b1b7a8a6 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -713,7 +713,7 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
* that the packet is properly freed?
*/
if (s_req->busrequest) {
- s_req->busrequest->scat_req = 0;
+ s_req->busrequest->scat_req = NULL;
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
}
kfree(s_req->virt_dma_buf);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 9df41d5e3249..a531e0c5c1e2 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -405,7 +405,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb = skb_realloc_headroom(skb, dev->needed_headroom);
kfree_skb(tmp_skb);
if (skb == NULL) {
- vif->net_stats.tx_dropped++;
+ dev->stats.tx_dropped++;
return 0;
}
}
@@ -520,8 +520,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
fail_tx:
dev_kfree_skb(skb);
- vif->net_stats.tx_dropped++;
- vif->net_stats.tx_aborted_errors++;
+ dev->stats.tx_dropped++;
+ dev->stats.tx_aborted_errors++;
return 0;
}
@@ -767,7 +767,7 @@ void ath6kl_tx_complete(struct htc_target *target,
/* a packet was flushed */
flushing[if_idx] = true;
- vif->net_stats.tx_errors++;
+ vif->ndev->stats.tx_errors++;
if (status != -ENOSPC && status != -ECANCELED)
ath6kl_warn("tx complete error: %d\n", status);
@@ -783,8 +783,8 @@ void ath6kl_tx_complete(struct htc_target *target,
eid, "OK");
flushing[if_idx] = false;
- vif->net_stats.tx_packets++;
- vif->net_stats.tx_bytes += skb->len;
+ vif->ndev->stats.tx_packets++;
+ vif->ndev->stats.tx_bytes += skb->len;
}
ath6kl_tx_clear_node_map(vif, eid, map_no);
@@ -1365,8 +1365,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
*/
spin_lock_bh(&vif->if_lock);
- vif->net_stats.rx_packets++;
- vif->net_stats.rx_bytes += packet->act_len;
+ vif->ndev->stats.rx_packets++;
+ vif->ndev->stats.rx_bytes += packet->act_len;
spin_unlock_bh(&vif->if_lock);
@@ -1395,8 +1395,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
((packet->act_len < min_hdr_len) ||
(packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
ath6kl_info("frame len is too short or too long\n");
- vif->net_stats.rx_errors++;
- vif->net_stats.rx_length_errors++;
+ vif->ndev->stats.rx_errors++;
+ vif->ndev->stats.rx_length_errors++;
dev_kfree_skb(skb);
return;
}
@@ -1619,7 +1619,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
return;
}
} else if (!is_broadcast_ether_addr(datap->h_dest)) {
- vif->net_stats.multicast++;
+ vif->ndev->stats.multicast++;
}
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 8f231c67dd51..783a38f1a626 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -3,8 +3,8 @@ config ATH9K_HW
config ATH9K_COMMON
tristate
select ATH_COMMON
- select DEBUG_FS
- select RELAY
+config ATH9K_COMMON_DEBUG
+ bool
config ATH9K_DFS_DEBUGFS
def_bool y
depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
@@ -60,12 +60,14 @@ config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
depends on ATH9K && DEBUG_FS
select MAC80211_DEBUGFS
+ select ATH9K_COMMON_DEBUG
select RELAY
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
Also required for changing debug message flags at run time.
+ As well as access to the FFT/spectral data and TX99.
config ATH9K_STATION_STATISTICS
bool "Detailed station statistics"
@@ -174,8 +176,11 @@ config ATH9K_HTC
config ATH9K_HTC_DEBUGFS
bool "Atheros ath9k_htc debugging"
depends on ATH9K_HTC && DEBUG_FS
+ select ATH9K_COMMON_DEBUG
+ select RELAY
---help---
Say Y, if you need access to ath9k_htc's statistics.
+ As well as access to the FFT/spectral data.
config ATH9K_HWRNG
bool "Random number generator support"
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 76f9dc37500b..36a40ffdce15 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -60,8 +60,9 @@ obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o \
common-init.o \
common-beacon.o \
- common-debug.o \
- common-spectral.o
+
+ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \
+ common-spectral.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 8eea8d22e72e..7922550c2159 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -524,7 +524,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
return true;
/* Setup rf parameters */
- eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
+ eepMinorRev = ah->eep_ops->get_eeprom_rev(ah);
for (i = 0; i < ah->iniBank6.ia_rows; i++)
ah->analogBank6Data[i] = INI_RA(&ah->iniBank6, i, modesIndex);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d480d2f3e185..ae68f674829b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -108,8 +108,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
{
u32 rxgain_type;
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
- AR5416_EEP_MINOR_VER_17) {
+ if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_17) {
rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
@@ -129,8 +128,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
{
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
- AR5416_EEP_MINOR_VER_19) {
+ if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) {
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9280Modes_high_power_tx_gain_9280_2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index f816909d9474..4b3c9b108197 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
- ACCESS_ONCE(ads->ds_link) = i->link;
- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
+ WRITE_ONCE(ads->ds_link, i->link);
+ WRITE_ONCE(ads->ds_data, i->buf_addr[0]);
ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
ctl6 = SM(i->keytype, AR_EncrType);
@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
if ((i->is_first || i->is_last) &&
i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
+ WRITE_ONCE(ads->ds_ctl2, set11nTries(i->rates, 0)
| set11nTries(i->rates, 1)
| set11nTries(i->rates, 2)
| set11nTries(i->rates, 3)
| (i->dur_update ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
+ | SM(0, AR_BurstDur));
- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
+ WRITE_ONCE(ads->ds_ctl3, set11nRate(i->rates, 0)
| set11nRate(i->rates, 1)
| set11nRate(i->rates, 2)
- | set11nRate(i->rates, 3);
+ | set11nRate(i->rates, 3));
} else {
- ACCESS_ONCE(ads->ds_ctl2) = 0;
- ACCESS_ONCE(ads->ds_ctl3) = 0;
+ WRITE_ONCE(ads->ds_ctl2, 0);
+ WRITE_ONCE(ads->ds_ctl3, 0);
}
if (!i->is_first) {
- ACCESS_ONCE(ads->ds_ctl0) = 0;
- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+ WRITE_ONCE(ads->ds_ctl0, 0);
+ WRITE_ONCE(ads->ds_ctl1, ctl1);
+ WRITE_ONCE(ads->ds_ctl6, ctl6);
return;
}
@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
break;
}
- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+ WRITE_ONCE(ads->ds_ctl0, (i->pkt_len & AR_FrameLen)
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
| SM(i->txpower[0], AR_XmitPower0)
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -287,29 +287,29 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
| (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
| (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
- (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)));
- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+ WRITE_ONCE(ads->ds_ctl1, ctl1);
+ WRITE_ONCE(ads->ds_ctl6, ctl6);
if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
return;
- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
- | set11nPktDurRTSCTS(i->rates, 1);
+ WRITE_ONCE(ads->ds_ctl4, set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1));
- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
- | set11nPktDurRTSCTS(i->rates, 3);
+ WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3));
- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+ WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0)
| set11nRateFlags(i->rates, 1)
| set11nRateFlags(i->rates, 2)
| set11nRateFlags(i->rates, 3)
- | SM(i->rtscts_rate, AR_RTSCTSRate);
+ | SM(i->rtscts_rate, AR_RTSCTSRate));
- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
+ WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1));
+ WRITE_ONCE(ads->ds_ctl10, SM(i->txpower[2], AR_XmitPower2));
+ WRITE_ONCE(ads->ds_ctl11, SM(i->txpower[3], AR_XmitPower3));
}
static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
@@ -318,7 +318,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
struct ar5416_desc *ads = AR5416DESC(ds);
u32 status;
- status = ACCESS_ONCE(ads->ds_txstatus9);
+ status = READ_ONCE(ads->ds_txstatus9);
if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
@@ -332,7 +332,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ts_rateindex = MS(status, AR_FinalTxIdx);
ts->ts_seqnum = MS(status, AR_SeqNum);
- status = ACCESS_ONCE(ads->ds_txstatus0);
+ status = READ_ONCE(ads->ds_txstatus0);
ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
@@ -342,7 +342,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ba_high = ads->AR_BaBitmapHigh;
}
- status = ACCESS_ONCE(ads->ds_txstatus1);
+ status = READ_ONCE(ads->ds_txstatus1);
if (status & AR_FrmXmitOK)
ts->ts_status |= ATH9K_TX_ACKED;
else {
@@ -371,7 +371,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ts_longretry = MS(status, AR_DataFailCnt);
ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
- status = ACCESS_ONCE(ads->ds_txstatus5);
+ status = READ_ONCE(ads->ds_txstatus5);
ts->ts_rssi = MS(status, AR_TxRSSICombined);
ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
@@ -390,13 +390,13 @@ static int ar9002_hw_get_duration(struct ath_hw *ah, const void *ds, int index)
switch (index) {
case 0:
- return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur0);
+ return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur0);
case 1:
- return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur1);
+ return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur1);
case 2:
- return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur2);
+ return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur2);
case 3:
- return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur3);
+ return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur3);
default:
return -1;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 08607d7fdb56..3dbfd86ebe36 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -53,7 +53,7 @@ static const struct ar9300_eeprom ar9300_default = {
.txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
.opCapFlags = {
.opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
- .eepMisc = 0,
+ .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
},
.rfSilent = 0,
.blueToothOptions = 0,
@@ -631,7 +631,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
.txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
.opCapFlags = {
.opFlags = AR5416_OPFLAGS_11A,
- .eepMisc = 0,
+ .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
},
.rfSilent = 0,
.blueToothOptions = 0,
@@ -1210,7 +1210,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
.txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
.opCapFlags = {
.opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
- .eepMisc = 0,
+ .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
},
.rfSilent = 0,
.blueToothOptions = 0,
@@ -1789,7 +1789,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
.txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
.opCapFlags = {
.opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
- .eepMisc = 0,
+ .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
},
.rfSilent = 0,
.blueToothOptions = 0,
@@ -2367,7 +2367,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.txrxMask = 0x33, /* 4 bits tx and 4 bits rx */
.opCapFlags = {
.opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
- .eepMisc = 0,
+ .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
},
.rfSilent = 0,
.blueToothOptions = 0,
@@ -3468,7 +3468,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
AR5416_OPFLAGS_N_5G_HT20));
PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags &
AR5416_OPFLAGS_N_5G_HT40));
- PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01));
+ PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc &
+ AR5416_EEPMISC_BIG_ENDIAN));
PR_EEP("RF Silent", pBase->rfSilent);
PR_EEP("BT option", pBase->blueToothOptions);
PR_EEP("Device Cap", pBase->deviceCap);
@@ -5497,6 +5498,11 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
}
}
+static u8 ar9003_get_eepmisc(struct ath_hw *ah)
+{
+ return ah->eeprom.map4k.baseEepHeader.eepMisc;
+}
+
const struct eeprom_ops eep_ar9300_ops = {
.check_eeprom = ath9k_hw_ar9300_check_eeprom,
.get_eeprom = ath9k_hw_ar9300_get_eeprom,
@@ -5507,5 +5513,6 @@ const struct eeprom_ops eep_ar9300_ops = {
.set_board_values = ath9k_hw_ar9300_set_board_values,
.set_addac = ath9k_hw_ar9300_set_addac,
.set_txpower = ath9k_hw_ar9300_set_txpower,
- .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
+ .get_spur_channel = ath9k_hw_ar9300_get_spur_channel,
+ .get_eepmisc = ar9003_get_eepmisc
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 107bcfbbe0fb..bd2269c7de6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -38,7 +38,6 @@
#define AR9300_NUM_CTLS_2G 12
#define AR9300_NUM_BAND_EDGES_5G 8
#define AR9300_NUM_BAND_EDGES_2G 4
-#define AR9300_EEPMISC_BIG_ENDIAN 0x01
#define AR9300_EEPMISC_WOW 0x02
#define AR9300_CUSTOMER_DATA_SIZE 20
@@ -70,16 +69,19 @@
#define AR9300_BASE_ADDR 0x3ff
#define AR9300_BASE_ADDR_512 0x1ff
+/* AR5416_EEPMISC_BIG_ENDIAN not set indicates little endian */
+#define AR9300_EEPMISC_LITTLE_ENDIAN 0
+
#define AR9300_OTP_BASE \
((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
#define AR9300_OTP_STATUS \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18)
#define AR9300_OTP_STATUS_TYPE 0x7
#define AR9300_OTP_STATUS_VALID 0x4
#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
#define AR9300_OTP_STATUS_SM_BUSY 0x1
#define AR9300_OTP_READ_DATA \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c)
enum targetPowerHTRates {
HT_TARGET_RATE_0_8_16,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index da84b705cbcd..cc5bb0a76baf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
(i->qcu << AR_TxQcuNum_S) | desc_len;
checksum += val;
- ACCESS_ONCE(ads->info) = val;
+ WRITE_ONCE(ads->info, val);
checksum += i->link;
- ACCESS_ONCE(ads->link) = i->link;
+ WRITE_ONCE(ads->link, i->link);
checksum += i->buf_addr[0];
- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
+ WRITE_ONCE(ads->data0, i->buf_addr[0]);
checksum += i->buf_addr[1];
- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
+ WRITE_ONCE(ads->data1, i->buf_addr[1]);
checksum += i->buf_addr[2];
- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
+ WRITE_ONCE(ads->data2, i->buf_addr[2]);
checksum += i->buf_addr[3];
- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
+ WRITE_ONCE(ads->data3, i->buf_addr[3]);
checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
- ACCESS_ONCE(ads->ctl3) = val;
+ WRITE_ONCE(ads->ctl3, val);
checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
- ACCESS_ONCE(ads->ctl5) = val;
+ WRITE_ONCE(ads->ctl5, val);
checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
- ACCESS_ONCE(ads->ctl7) = val;
+ WRITE_ONCE(ads->ctl7, val);
checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
- ACCESS_ONCE(ads->ctl9) = val;
+ WRITE_ONCE(ads->ctl9, val);
checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
- ACCESS_ONCE(ads->ctl10) = checksum;
+ WRITE_ONCE(ads->ctl10, checksum);
if (i->is_first || i->is_last) {
- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
+ WRITE_ONCE(ads->ctl13, set11nTries(i->rates, 0)
| set11nTries(i->rates, 1)
| set11nTries(i->rates, 2)
| set11nTries(i->rates, 3)
| (i->dur_update ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
+ | SM(0, AR_BurstDur));
- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
+ WRITE_ONCE(ads->ctl14, set11nRate(i->rates, 0)
| set11nRate(i->rates, 1)
| set11nRate(i->rates, 2)
- | set11nRate(i->rates, 3);
+ | set11nRate(i->rates, 3));
} else {
- ACCESS_ONCE(ads->ctl13) = 0;
- ACCESS_ONCE(ads->ctl14) = 0;
+ WRITE_ONCE(ads->ctl13, 0);
+ WRITE_ONCE(ads->ctl14, 0);
}
ads->ctl20 = 0;
@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ctl17 = SM(i->keytype, AR_EncrType);
if (!i->is_first) {
- ACCESS_ONCE(ads->ctl11) = 0;
- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
- ACCESS_ONCE(ads->ctl15) = 0;
- ACCESS_ONCE(ads->ctl16) = 0;
- ACCESS_ONCE(ads->ctl17) = ctl17;
- ACCESS_ONCE(ads->ctl18) = 0;
- ACCESS_ONCE(ads->ctl19) = 0;
+ WRITE_ONCE(ads->ctl11, 0);
+ WRITE_ONCE(ads->ctl12, i->is_last ? 0 : AR_TxMore);
+ WRITE_ONCE(ads->ctl15, 0);
+ WRITE_ONCE(ads->ctl16, 0);
+ WRITE_ONCE(ads->ctl17, ctl17);
+ WRITE_ONCE(ads->ctl18, 0);
+ WRITE_ONCE(ads->ctl19, 0);
return;
}
- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+ WRITE_ONCE(ads->ctl11, (i->pkt_len & AR_FrameLen)
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
| SM(i->txpower[0], AR_XmitPower0)
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -107,7 +107,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
| (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
| (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
| (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
- (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)));
ctl12 = (i->keyix != ATH9K_TXKEYIX_INVALID ?
SM(i->keyix, AR_DestIdx) : 0)
@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
ctl12 |= SM(val, AR_PAPRDChainMask);
- ACCESS_ONCE(ads->ctl12) = ctl12;
- ACCESS_ONCE(ads->ctl17) = ctl17;
+ WRITE_ONCE(ads->ctl12, ctl12);
+ WRITE_ONCE(ads->ctl17, ctl17);
- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
- | set11nPktDurRTSCTS(i->rates, 1);
+ WRITE_ONCE(ads->ctl15, set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1));
- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
- | set11nPktDurRTSCTS(i->rates, 3);
+ WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3));
- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
+ WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0)
| set11nRateFlags(i->rates, 1)
| set11nRateFlags(i->rates, 2)
| set11nRateFlags(i->rates, 3)
- | SM(i->rtscts_rate, AR_RTSCTSRate);
+ | SM(i->rtscts_rate, AR_RTSCTSRate));
- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
+ WRITE_ONCE(ads->ctl19, AR_Not_Sounding);
- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
+ WRITE_ONCE(ads->ctl20, SM(i->txpower[1], AR_XmitPower1));
+ WRITE_ONCE(ads->ctl21, SM(i->txpower[2], AR_XmitPower2));
+ WRITE_ONCE(ads->ctl22, SM(i->txpower[3], AR_XmitPower3));
}
static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
@@ -359,7 +359,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ads = &ah->ts_ring[ah->ts_tail];
- status = ACCESS_ONCE(ads->status8);
+ status = READ_ONCE(ads->status8);
if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
@@ -385,7 +385,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
if (status & AR_TxOpExceeded)
ts->ts_status |= ATH9K_TXERR_XTXOP;
- status = ACCESS_ONCE(ads->status2);
+ status = READ_ONCE(ads->status2);
ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
@@ -395,7 +395,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ba_high = ads->status6;
}
- status = ACCESS_ONCE(ads->status3);
+ status = READ_ONCE(ads->status3);
if (status & AR_ExcessiveRetries)
ts->ts_status |= ATH9K_TXERR_XRETRY;
if (status & AR_Filtered)
@@ -420,7 +420,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ts_longretry = MS(status, AR_DataFailCnt);
ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
- status = ACCESS_ONCE(ads->status7);
+ status = READ_ONCE(ads->status7);
ts->ts_rssi = MS(status, AR_TxRSSICombined);
ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
@@ -437,13 +437,13 @@ static int ar9003_hw_get_duration(struct ath_hw *ah, const void *ds, int index)
switch (index) {
case 0:
- return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur0);
+ return MS(READ_ONCE(adc->ctl15), AR_PacketDur0);
case 1:
- return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur1);
+ return MS(READ_ONCE(adc->ctl15), AR_PacketDur1);
case 2:
- return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur2);
+ return MS(READ_ONCE(adc->ctl16), AR_PacketDur2);
case 3:
- return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur3);
+ return MS(READ_ONCE(adc->ctl16), AR_PacketDur3);
default:
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 378d3458fddb..cf076719c27e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -108,10 +108,12 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_MIN_QDEPTH 2
/* minimum h/w qdepth for non-aggregated traffic */
#define ATH_NON_AGGR_MIN_QDEPTH 8
-#define ATH_TX_COMPLETE_POLL_INT 1000
+#define ATH_HW_CHECK_POLL_INT 1000
#define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01
+#define ATH_AIRTIME_QUANTUM 300 /* usec */
+
/* Stop tx traffic 1ms before the GO goes away */
#define ATH_P2P_PS_STOP_TIME 1000
@@ -247,6 +249,9 @@ struct ath_atx_tid {
bool has_queued;
};
+void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
+void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
+
struct ath_node {
struct ath_softc *sc;
struct ieee80211_sta *sta; /* station struct we're part of */
@@ -258,9 +263,12 @@ struct ath_node {
bool sleeping;
bool no_ps_filter;
+ s64 airtime_deficit[IEEE80211_NUM_ACS];
+ u32 airtime_rx_start;
#ifdef CONFIG_ATH9K_STATION_STATISTICS
struct ath_rx_rate_stats rx_rate_stats;
+ struct ath_airtime_stats airtime_stats;
#endif
u8 key_idx[4];
@@ -317,10 +325,16 @@ struct ath_rx {
/* Channel Context */
/*******************/
+struct ath_acq {
+ struct list_head acq_new;
+ struct list_head acq_old;
+ spinlock_t lock;
+};
+
struct ath_chanctx {
struct cfg80211_chan_def chandef;
struct list_head vifs;
- struct list_head acq[IEEE80211_NUM_ACS];
+ struct ath_acq acq[IEEE80211_NUM_ACS];
int hw_queue_base;
/* do not dereference, use for comparison only */
@@ -555,6 +569,15 @@ static inline void ath_chanctx_check_active(struct ath_softc *sc,
#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+static inline void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+{
+ spin_lock_bh(&txq->axq_lock);
+}
+static inline void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+{
+ spin_unlock_bh(&txq->axq_lock);
+}
+
void ath_startrecv(struct ath_softc *sc);
bool ath_stoprecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
@@ -562,8 +585,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
-void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
-void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
bool ath_drain_all_txq(struct ath_softc *sc);
@@ -575,6 +596,8 @@ void ath_txq_schedule_all(struct ath_softc *sc);
int ath_tx_init(struct ath_softc *sc, int nbufs);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
+u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
+ int width, int half_gi, bool shortPreamble);
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -722,7 +745,7 @@ void ath9k_csa_update(struct ath_softc *sc);
#define ATH_PAPRD_TIMEOUT 100 /* msecs */
#define ATH_PLL_WORK_INTERVAL 100
-void ath_tx_complete_poll_work(struct work_struct *work);
+void ath_hw_check_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work);
bool ath_hw_check(struct ath_softc *sc);
void ath_hw_pll_work(struct work_struct *work);
@@ -963,6 +986,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH9K_NUM_CHANCTX 2 /* supports 2 operating channels */
+#define AIRTIME_USE_TX BIT(0)
+#define AIRTIME_USE_RX BIT(1)
+#define AIRTIME_USE_NEW_QUEUES BIT(2)
+#define AIRTIME_ACTIVE(flags) (!!(flags & (AIRTIME_USE_TX|AIRTIME_USE_RX)))
+
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
@@ -970,6 +998,7 @@ struct ath_softc {
struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS];
+ spinlock_t intr_lock;
struct tasklet_struct intr_tq;
struct tasklet_struct bcon_tasklet;
struct ath_hw *sc_ah;
@@ -1005,6 +1034,8 @@ struct ath_softc {
short nbcnvifs;
unsigned long ps_usecount;
+ u16 airtime_flags; /* AIRTIME_* */
+
struct ath_rx rx;
struct ath_tx tx;
struct ath_beacon beacon;
@@ -1023,7 +1054,7 @@ struct ath_softc {
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
#endif
- struct delayed_work tx_complete_work;
+ struct delayed_work hw_check_work;
struct delayed_work hw_pll_work;
struct timer_list sleep_timer;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 929dd70f48eb..b84539d89f1a 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -118,8 +118,11 @@ void ath_chanctx_init(struct ath_softc *sc)
INIT_LIST_HEAD(&ctx->vifs);
ctx->txpower = ATH_TXPOWER_MAX;
ctx->flush_timeout = HZ / 5; /* 200ms */
- for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
- INIT_LIST_HEAD(&ctx->acq[j]);
+ for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) {
+ INIT_LIST_HEAD(&ctx->acq[j].acq_new);
+ INIT_LIST_HEAD(&ctx->acq[j].acq_old);
+ spin_lock_init(&ctx->acq[j].lock);
+ }
}
}
@@ -1345,8 +1348,11 @@ void ath9k_offchannel_init(struct ath_softc *sc)
ctx->txpower = ATH_TXPOWER_MAX;
cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
- for (i = 0; i < ARRAY_SIZE(ctx->acq); i++)
- INIT_LIST_HEAD(&ctx->acq[i]);
+ for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) {
+ INIT_LIST_HEAD(&ctx->acq[i].acq_new);
+ INIT_LIST_HEAD(&ctx->acq[i].acq_old);
+ spin_lock_init(&ctx->acq[i].lock);
+ }
sc->offchannel.chan.offchannel = true;
}
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
index 7c9788490f7f..3376990d3a24 100644
--- a/drivers/net/wireless/ath/ath9k/common-debug.h
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -60,6 +60,7 @@ struct ath_rx_stats {
u32 rx_spectral;
};
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
struct ath_hw *ah);
void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
@@ -70,3 +71,29 @@ void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
struct ath_rx_stats *rxstats);
void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
struct ath_rx_stats *rxstats);
+#else
+static inline void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+}
+
+static inline void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+}
+
+static inline void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+ struct ath_rx_status *rs)
+{
+}
+
+static inline void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+}
+
+static inline void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+}
+#endif /* CONFIG_ATH9K_COMMON_DEBUG */
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index eedf86b67cf5..0ffa23a61568 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -482,7 +482,7 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
struct rchan *rc = spec_priv->rfs_chan_spec_scan;
for_each_online_cpu(i)
- ret += relay_buf_full(rc->buf[i]);
+ ret += relay_buf_full(*per_cpu_ptr(rc->buf, i));
i = num_online_cpus();
@@ -1075,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
+ if (spec_priv->rfs_chan_spec_scan) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 998743be9c67..5d1a51d83aa6 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -151,6 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
return bins[0] & 0x3f;
}
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
@@ -161,5 +162,27 @@ int ath9k_cmn_spectral_scan_config(struct ath_common *common,
enum spectral_mode spectral_mode);
int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf);
+#else
+static inline void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
+ struct dentry *debugfs_phy)
+{
+}
+
+static inline void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
+{
+}
+
+static inline void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv)
+{
+}
+
+static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv,
+ struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+ return 0;
+}
+#endif /* CONFIG_ATH9K_COMMON_DEBUG */
#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 89a94dd5f2cb..43930c336987 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1399,5 +1399,8 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_tpc);
+ debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, &sc->airtime_flags);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index a078cdd3170d..249f8141cd00 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -221,6 +221,11 @@ struct ath_rx_rate_stats {
} cck_stats[4];
};
+struct ath_airtime_stats {
+ u32 rx_airtime;
+ u32 tx_airtime;
+};
+
#define ANT_MAIN 0
#define ANT_ALT 1
@@ -314,12 +319,20 @@ ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
void ath_debug_rate_stats(struct ath_softc *sc,
struct ath_rx_status *rs,
struct sk_buff *skb);
+void ath_debug_airtime(struct ath_softc *sc,
+ struct ath_node *an,
+ u32 rx, u32 tx);
#else
static inline void ath_debug_rate_stats(struct ath_softc *sc,
struct ath_rx_status *rs,
struct sk_buff *skb)
{
}
+static inline void ath_debug_airtime(struct ath_softc *sc,
+ struct ath_node *an,
+ u32 rx, u32 tx)
+{
+}
#endif /* CONFIG_ATH9K_STATION_STATISTICS */
#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index 2a3a3c4671bc..524cbf13ca9c 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -242,6 +242,59 @@ static const struct file_operations fops_node_recv = {
.llseek = default_llseek,
};
+void ath_debug_airtime(struct ath_softc *sc,
+ struct ath_node *an,
+ u32 rx,
+ u32 tx)
+{
+ struct ath_airtime_stats *astats = &an->airtime_stats;
+
+ astats->rx_airtime += rx;
+ astats->tx_airtime += tx;
+}
+
+static ssize_t read_airtime(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_airtime_stats *astats;
+ static const char *qname[4] = {
+ "VO", "VI", "BE", "BK"
+ };
+ u32 len = 0, size = 256;
+ char *buf;
+ size_t retval;
+ int i;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ astats = &an->airtime_stats;
+
+ len += scnprintf(buf + len, size - len, "RX: %u us\n", astats->rx_airtime);
+ len += scnprintf(buf + len, size - len, "TX: %u us\n", astats->tx_airtime);
+ len += scnprintf(buf + len, size - len, "Deficit: ");
+ for (i = 0; i < 4; i++)
+ len += scnprintf(buf+len, size - len, "%s: %lld us ", qname[i], an->airtime_deficit[i]);
+ if (len < size)
+ buf[len++] = '\n';
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+
+static const struct file_operations fops_airtime = {
+ .read = read_airtime,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -251,4 +304,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr);
debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv);
+ debugfs_create_file("airtime", S_IRUGO, dir, an, &fops_airtime);
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index a449588a8009..fb80ec86e53d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -160,6 +160,7 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
u16 magic;
u16 *eepdata;
int i;
+ bool needs_byteswap = false;
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
@@ -167,31 +168,40 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
return -EIO;
}
- *swap_needed = false;
if (swab16(magic) == AR5416_EEPROM_MAGIC) {
+ needs_byteswap = true;
+ ath_dbg(common, EEPROM,
+ "EEPROM needs byte-swapping to correct endianness.\n");
+ } else if (magic != AR5416_EEPROM_MAGIC) {
+ if (ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, EEPROM,
+ "Ignoring invalid EEPROM magic (0x%04x).\n",
+ magic);
+ } else {
+ ath_err(common,
+ "Invalid EEPROM magic (0x%04x).\n", magic);
+ return -EINVAL;
+ }
+ }
+
+ if (needs_byteswap) {
if (ah->ah_flags & AH_NO_EEP_SWAP) {
ath_info(common,
"Ignoring endianness difference in EEPROM magic bytes.\n");
} else {
- *swap_needed = true;
- }
- } else if (magic != AR5416_EEPROM_MAGIC) {
- if (ath9k_hw_use_flash(ah))
- return 0;
+ eepdata = (u16 *)(&ah->eeprom);
- ath_err(common,
- "Invalid EEPROM Magic (0x%04x).\n", magic);
- return -EINVAL;
+ for (i = 0; i < size; i++)
+ eepdata[i] = swab16(eepdata[i]);
+ }
}
- eepdata = (u16 *)(&ah->eeprom);
-
- if (*swap_needed) {
+ if (ah->eep_ops->get_eepmisc(ah) & AR5416_EEPMISC_BIG_ENDIAN) {
+ *swap_needed = true;
ath_dbg(common, EEPROM,
- "EEPROM Endianness is not native.. Changing.\n");
-
- for (i = 0; i < size; i++)
- eepdata[i] = swab16(eepdata[i]);
+ "Big Endian EEPROM detected according to EEPMISC register.\n");
+ } else {
+ *swap_needed = false;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 4465c6566f20..30bf722e33ed 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -23,6 +23,17 @@
#include <net/cfg80211.h>
#include "ar9003_eeprom.h"
+/* helpers to swap EEPROM fields, which are stored as __le16 or __le32. Since
+ * we are 100% sure about it we __force these to u16/u32 for the swab calls to
+ * silence the sparse checks. These macros are used when we have a Big Endian
+ * EEPROM (according to AR5416_EEPMISC_BIG_ENDIAN) and need to convert the
+ * fields to __le16/__le32.
+ */
+#define EEPROM_FIELD_SWAB16(field) \
+ (field = (__force __le16)swab16((__force u16)field))
+#define EEPROM_FIELD_SWAB32(field) \
+ (field = (__force __le32)swab32((__force u32)field))
+
#ifdef __BIG_ENDIAN
#define AR5416_EEPROM_MAGIC 0x5aa5
#else
@@ -99,7 +110,6 @@
#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
-#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \
@@ -121,6 +131,8 @@
#define AR5416_EEP_NO_BACK_VER 0x1
#define AR5416_EEP_VER 0xE
+#define AR5416_EEP_VER_MAJOR_SHIFT 12
+#define AR5416_EEP_VER_MAJOR_MASK 0xF000
#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
#define AR5416_EEP_MINOR_VER_2 0x2
#define AR5416_EEP_MINOR_VER_3 0x3
@@ -161,6 +173,9 @@
#define AR5416_EEP_TXGAIN_ORIGINAL 0
#define AR5416_EEP_TXGAIN_HIGH_POWER 1
+/* Endianness of EEPROM content */
+#define AR5416_EEPMISC_BIG_ENDIAN 0x01
+
#define AR5416_EEP4K_START_LOC 64
#define AR5416_EEP4K_NUM_2G_CAL_PIERS 3
#define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3
@@ -174,7 +189,6 @@
#define AR9280_TX_GAIN_TABLE_SIZE 22
#define AR9287_EEP_VER 0xE
-#define AR9287_EEP_VER_MINOR_MASK 0xFFF
#define AR9287_EEP_MINOR_VER_1 0x1
#define AR9287_EEP_MINOR_VER_2 0x2
#define AR9287_EEP_MINOR_VER_3 0x3
@@ -191,7 +205,6 @@
#define AR9287_NUM_CTLS 12
#define AR9287_NUM_BAND_EDGES 4
#define AR9287_PD_GAIN_ICEPTS 1
-#define AR9287_EEPMISC_BIG_ENDIAN 0x01
#define AR9287_EEPMISC_WOW 0x02
#define AR9287_MAX_CHAINS 2
#define AR9287_ANT_16S 32
@@ -228,7 +241,6 @@ enum eeprom_param {
EEP_DB_5,
EEP_OB_2,
EEP_DB_2,
- EEP_MINOR_REV,
EEP_TX_MASK,
EEP_RX_MASK,
EEP_FSTCLK_5G,
@@ -269,19 +281,19 @@ enum ath9k_hal_freq_band {
};
struct base_eep_header {
- u16 length;
- u16 checksum;
- u16 version;
+ __le16 length;
+ __le16 checksum;
+ __le16 version;
u8 opCapFlags;
u8 eepMisc;
- u16 regDmn[2];
+ __le16 regDmn[2];
u8 macAddr[6];
u8 rxMask;
u8 txMask;
- u16 rfSilent;
- u16 blueToothOptions;
- u16 deviceCap;
- u32 binBuildNumber;
+ __le16 rfSilent;
+ __le16 blueToothOptions;
+ __le16 deviceCap;
+ __le32 binBuildNumber;
u8 deviceType;
u8 pwdclkind;
u8 fastClk5g;
@@ -299,33 +311,33 @@ struct base_eep_header {
} __packed;
struct base_eep_header_4k {
- u16 length;
- u16 checksum;
- u16 version;
+ __le16 length;
+ __le16 checksum;
+ __le16 version;
u8 opCapFlags;
u8 eepMisc;
- u16 regDmn[2];
+ __le16 regDmn[2];
u8 macAddr[6];
u8 rxMask;
u8 txMask;
- u16 rfSilent;
- u16 blueToothOptions;
- u16 deviceCap;
- u32 binBuildNumber;
+ __le16 rfSilent;
+ __le16 blueToothOptions;
+ __le16 deviceCap;
+ __le32 binBuildNumber;
u8 deviceType;
u8 txGainType;
} __packed;
struct spur_chan {
- u16 spurChan;
+ __le16 spurChan;
u8 spurRangeLow;
u8 spurRangeHigh;
} __packed;
struct modal_eep_header {
- u32 antCtrlChain[AR5416_MAX_CHAINS];
- u32 antCtrlCommon;
+ __le32 antCtrlChain[AR5416_MAX_CHAINS];
+ __le32 antCtrlCommon;
u8 antennaGainCh[AR5416_MAX_CHAINS];
u8 switchSettling;
u8 txRxAttenCh[AR5416_MAX_CHAINS];
@@ -360,7 +372,7 @@ struct modal_eep_header {
u8 db_ch1;
u8 lna_ctl;
u8 miscBits;
- u16 xpaBiasLvlFreq[3];
+ __le16 xpaBiasLvlFreq[3];
u8 futureModal[6];
struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
@@ -374,8 +386,8 @@ struct calDataPerFreqOpLoop {
} __packed;
struct modal_eep_4k_header {
- u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
- u32 antCtrlCommon;
+ __le32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
+ __le32 antCtrlCommon;
u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS];
u8 switchSettling;
u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS];
@@ -439,19 +451,19 @@ struct modal_eep_4k_header {
} __packed;
struct base_eep_ar9287_header {
- u16 length;
- u16 checksum;
- u16 version;
+ __le16 length;
+ __le16 checksum;
+ __le16 version;
u8 opCapFlags;
u8 eepMisc;
- u16 regDmn[2];
+ __le16 regDmn[2];
u8 macAddr[6];
u8 rxMask;
u8 txMask;
- u16 rfSilent;
- u16 blueToothOptions;
- u16 deviceCap;
- u32 binBuildNumber;
+ __le16 rfSilent;
+ __le16 blueToothOptions;
+ __le16 deviceCap;
+ __le32 binBuildNumber;
u8 deviceType;
u8 openLoopPwrCntl;
int8_t pwrTableOffset;
@@ -461,8 +473,8 @@ struct base_eep_ar9287_header {
} __packed;
struct modal_eep_ar9287_header {
- u32 antCtrlChain[AR9287_MAX_CHAINS];
- u32 antCtrlCommon;
+ __le32 antCtrlChain[AR9287_MAX_CHAINS];
+ __le32 antCtrlCommon;
int8_t antennaGainCh[AR9287_MAX_CHAINS];
u8 switchSettling;
u8 txRxAttenCh[AR9287_MAX_CHAINS];
@@ -653,6 +665,7 @@ struct eeprom_ops {
u16 cfgCtl, u8 twiceAntennaReduction,
u8 powerLimit, bool test);
u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
+ u8 (*get_eepmisc)(struct ath_hw *ah);
};
void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 5da0826bf1be..b8c0a08066a0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -20,12 +20,17 @@
static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
{
- return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF);
+ u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version);
+
+ return (version & AR5416_EEP_VER_MAJOR_MASK) >>
+ AR5416_EEP_VER_MAJOR_SHIFT;
}
static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
{
- return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
+ u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version);
+
+ return version & AR5416_EEP_VER_MINOR_MASK;
}
#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
@@ -67,12 +72,12 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
return __ath9k_hw_4k_fill_eeprom(ah);
}
-#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_4k_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
- PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Switch Settle", modal_hdr->switchSettling);
PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
@@ -127,6 +132,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
{
struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
+ u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber);
if (!dump_base_hdr) {
len += scnprintf(buf + len, size - len,
@@ -136,12 +142,12 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
goto out;
}
- PR_EEP("Major Version", pBase->version >> 12);
- PR_EEP("Minor Version", pBase->version & 0xFFF);
- PR_EEP("Checksum", pBase->checksum);
- PR_EEP("Length", pBase->length);
- PR_EEP("RegDomain1", pBase->regDmn[0]);
- PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("Major Version", ath9k_hw_4k_get_eeprom_ver(ah));
+ PR_EEP("Minor Version", ath9k_hw_4k_get_eeprom_rev(ah));
+ PR_EEP("Checksum", le16_to_cpu(pBase->checksum));
+ PR_EEP("Length", le16_to_cpu(pBase->length));
+ PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+ PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
PR_EEP("TX Mask", pBase->txMask);
PR_EEP("RX Mask", pBase->rxMask);
PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
@@ -154,10 +160,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
AR5416_OPFLAGS_N_5G_HT20));
PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
AR5416_OPFLAGS_N_5G_HT40));
- PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
- PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
- PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
- PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN));
+ PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF);
PR_EEP("TX Gain type", pBase->txGainType);
len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
@@ -189,54 +195,31 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
return err;
if (need_swap)
- el = swab16(eep->baseEepHeader.length);
+ el = swab16((__force u16)eep->baseEepHeader.length);
else
- el = eep->baseEepHeader.length;
+ el = le16_to_cpu(eep->baseEepHeader.length);
el = min(el / sizeof(u16), SIZE_EEPROM_4K);
if (!ath9k_hw_nvram_validate_checksum(ah, el))
return -EINVAL;
if (need_swap) {
- u32 integer;
- u16 word;
-
- word = swab16(eep->baseEepHeader.length);
- eep->baseEepHeader.length = word;
-
- word = swab16(eep->baseEepHeader.checksum);
- eep->baseEepHeader.checksum = word;
-
- word = swab16(eep->baseEepHeader.version);
- eep->baseEepHeader.version = word;
-
- word = swab16(eep->baseEepHeader.regDmn[0]);
- eep->baseEepHeader.regDmn[0] = word;
-
- word = swab16(eep->baseEepHeader.regDmn[1]);
- eep->baseEepHeader.regDmn[1] = word;
-
- word = swab16(eep->baseEepHeader.rfSilent);
- eep->baseEepHeader.rfSilent = word;
-
- word = swab16(eep->baseEepHeader.blueToothOptions);
- eep->baseEepHeader.blueToothOptions = word;
-
- word = swab16(eep->baseEepHeader.deviceCap);
- eep->baseEepHeader.deviceCap = word;
-
- integer = swab32(eep->modalHeader.antCtrlCommon);
- eep->modalHeader.antCtrlCommon = integer;
-
- for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
- integer = swab32(eep->modalHeader.antCtrlChain[i]);
- eep->modalHeader.antCtrlChain[i] = integer;
- }
-
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- word = swab16(eep->modalHeader.spurChans[i].spurChan);
- eep->modalHeader.spurChans[i].spurChan = word;
- }
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.length);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.version);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap);
+ EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon);
+
+ for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++)
+ EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++)
+ EEPROM_FIELD_SWAB16(
+ eep->modalHeader.spurChans[i].spurChan);
}
if (!ath9k_hw_nvram_check_version(ah, AR5416_EEP_VER,
@@ -254,9 +237,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
struct modal_eep_4k_header *pModal = &eep->modalHeader;
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
- u16 ver_minor;
-
- ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK;
switch (param) {
case EEP_NFTHRESH_2:
@@ -268,19 +248,17 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
case EEP_MAC_MSW:
return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
- return pBase->regDmn[0];
+ return le16_to_cpu(pBase->regDmn[0]);
case EEP_OP_CAP:
- return pBase->deviceCap;
+ return le16_to_cpu(pBase->deviceCap);
case EEP_OP_MODE:
return pBase->opCapFlags;
case EEP_RF_SILENT:
- return pBase->rfSilent;
+ return le16_to_cpu(pBase->rfSilent);
case EEP_OB_2:
return pModal->ob_0;
case EEP_DB_2:
return pModal->db1_1;
- case EEP_MINOR_REV:
- return ver_minor;
case EEP_TX_MASK:
return pBase->txMask;
case EEP_RX_MASK:
@@ -319,14 +297,12 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
xpdMask = pEepData->modalHeader.xpdGain;
- if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_2) {
+ if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2)
pdGainOverlap_t2 =
pEepData->modalHeader.pdGainOverlap;
- } else {
+ else
pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
- }
pCalBChans = pEepData->calFreqPier2G;
numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS;
@@ -612,10 +588,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
memset(ratesArray, 0, sizeof(ratesArray));
- if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_2) {
+ if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2)
ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
- }
ath9k_hw_set_4k_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
@@ -728,15 +702,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
{
ENABLE_REG_RMW_BUFFER(ah);
REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
- pModal->antCtrlChain[0], 0);
+ le32_to_cpu(pModal->antCtrlChain[0]), 0);
REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
- if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_3) {
+ if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
txRxAttenLocal = pModal->txRxAttenCh[0];
REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ,
@@ -795,7 +768,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
pModal = &eep->modalHeader;
txRxAttenLocal = 23;
- REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon));
/* Single chain for 4K EEPROM*/
ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal);
@@ -1014,16 +987,14 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
pModal->thresh62);
- if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_2) {
+ if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) {
REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START,
pModal->txFrameToDataStart);
REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
pModal->txFrameToPaOn);
}
- if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_3) {
+ if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
if (IS_CHAN_HT40(chan))
REG_RMW_FIELD(ah, AR_PHY_SETTLING,
AR_PHY_SETTLING_SWITCH,
@@ -1061,7 +1032,12 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
{
- return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan;
+ return le16_to_cpu(ah->eeprom.map4k.modalHeader.spurChans[i].spurChan);
+}
+
+static u8 ath9k_hw_4k_get_eepmisc(struct ath_hw *ah)
+{
+ return ah->eeprom.map4k.baseEepHeader.eepMisc;
}
const struct eeprom_ops eep_4k_ops = {
@@ -1073,5 +1049,6 @@ const struct eeprom_ops eep_4k_ops = {
.get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev,
.set_board_values = ath9k_hw_4k_set_board_values,
.set_txpower = ath9k_hw_4k_set_txpower,
- .get_spur_channel = ath9k_hw_4k_get_spur_channel
+ .get_spur_channel = ath9k_hw_4k_get_spur_channel,
+ .get_eepmisc = ath9k_hw_4k_get_eepmisc
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 1a019a39eda1..3caa149b1013 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -22,12 +22,17 @@
static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
{
- return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF;
+ u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version);
+
+ return (version & AR5416_EEP_VER_MAJOR_MASK) >>
+ AR5416_EEP_VER_MAJOR_SHIFT;
}
static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
{
- return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
+ u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version);
+
+ return version & AR5416_EEP_VER_MINOR_MASK;
}
static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
@@ -70,13 +75,13 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
return __ath9k_hw_ar9287_fill_eeprom(ah);
}
-#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_ar9287_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
- PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
- PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
PR_EEP("Switch Settle", modal_hdr->switchSettling);
@@ -123,6 +128,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
+ u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber);
if (!dump_base_hdr) {
len += scnprintf(buf + len, size - len,
@@ -132,12 +138,12 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
goto out;
}
- PR_EEP("Major Version", pBase->version >> 12);
- PR_EEP("Minor Version", pBase->version & 0xFFF);
- PR_EEP("Checksum", pBase->checksum);
- PR_EEP("Length", pBase->length);
- PR_EEP("RegDomain1", pBase->regDmn[0]);
- PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("Major Version", ath9k_hw_ar9287_get_eeprom_ver(ah));
+ PR_EEP("Minor Version", ath9k_hw_ar9287_get_eeprom_rev(ah));
+ PR_EEP("Checksum", le16_to_cpu(pBase->checksum));
+ PR_EEP("Length", le16_to_cpu(pBase->length));
+ PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+ PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
PR_EEP("TX Mask", pBase->txMask);
PR_EEP("RX Mask", pBase->rxMask);
PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
@@ -150,10 +156,10 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
AR5416_OPFLAGS_N_5G_HT20));
PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
AR5416_OPFLAGS_N_5G_HT40));
- PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
- PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
- PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
- PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN));
+ PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF);
PR_EEP("Power Table Offset", pBase->pwrTableOffset);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
@@ -177,8 +183,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
{
- u32 el, integer;
- u16 word;
+ u32 el;
int i, err;
bool need_swap;
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
@@ -188,51 +193,31 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
return err;
if (need_swap)
- el = swab16(eep->baseEepHeader.length);
+ el = swab16((__force u16)eep->baseEepHeader.length);
else
- el = eep->baseEepHeader.length;
+ el = le16_to_cpu(eep->baseEepHeader.length);
el = min(el / sizeof(u16), SIZE_EEPROM_AR9287);
if (!ath9k_hw_nvram_validate_checksum(ah, el))
return -EINVAL;
if (need_swap) {
- word = swab16(eep->baseEepHeader.length);
- eep->baseEepHeader.length = word;
-
- word = swab16(eep->baseEepHeader.checksum);
- eep->baseEepHeader.checksum = word;
-
- word = swab16(eep->baseEepHeader.version);
- eep->baseEepHeader.version = word;
-
- word = swab16(eep->baseEepHeader.regDmn[0]);
- eep->baseEepHeader.regDmn[0] = word;
-
- word = swab16(eep->baseEepHeader.regDmn[1]);
- eep->baseEepHeader.regDmn[1] = word;
-
- word = swab16(eep->baseEepHeader.rfSilent);
- eep->baseEepHeader.rfSilent = word;
-
- word = swab16(eep->baseEepHeader.blueToothOptions);
- eep->baseEepHeader.blueToothOptions = word;
-
- word = swab16(eep->baseEepHeader.deviceCap);
- eep->baseEepHeader.deviceCap = word;
-
- integer = swab32(eep->modalHeader.antCtrlCommon);
- eep->modalHeader.antCtrlCommon = integer;
-
- for (i = 0; i < AR9287_MAX_CHAINS; i++) {
- integer = swab32(eep->modalHeader.antCtrlChain[i]);
- eep->modalHeader.antCtrlChain[i] = integer;
- }
-
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- word = swab16(eep->modalHeader.spurChans[i].spurChan);
- eep->modalHeader.spurChans[i].spurChan = word;
- }
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.length);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.version);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap);
+ EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon);
+
+ for (i = 0; i < AR9287_MAX_CHAINS; i++)
+ EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++)
+ EEPROM_FIELD_SWAB16(
+ eep->modalHeader.spurChans[i].spurChan);
}
if (!ath9k_hw_nvram_check_version(ah, AR9287_EEP_VER,
@@ -250,9 +235,7 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
- u16 ver_minor;
-
- ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK;
+ u16 ver_minor = ath9k_hw_ar9287_get_eeprom_rev(ah);
switch (param) {
case EEP_NFTHRESH_2:
@@ -264,15 +247,13 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
case EEP_MAC_MSW:
return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
- return pBase->regDmn[0];
+ return le16_to_cpu(pBase->regDmn[0]);
case EEP_OP_CAP:
- return pBase->deviceCap;
+ return le16_to_cpu(pBase->deviceCap);
case EEP_OP_MODE:
return pBase->opCapFlags;
case EEP_RF_SILENT:
- return pBase->rfSilent;
- case EEP_MINOR_REV:
- return ver_minor;
+ return le16_to_cpu(pBase->rfSilent);
case EEP_TX_MASK:
return pBase->txMask;
case EEP_RX_MASK:
@@ -387,8 +368,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
xpdMask = pEepData->modalHeader.xpdGain;
- if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
- AR9287_EEP_MINOR_VER_2)
+ if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2)
pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap;
else
pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
@@ -737,8 +717,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
memset(ratesArray, 0, sizeof(ratesArray));
- if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
- AR9287_EEP_MINOR_VER_2)
+ if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2)
ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
ath9k_hw_set_ar9287_power_per_rate_table(ah, chan,
@@ -879,13 +858,13 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
pModal = &eep->modalHeader;
- REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon));
for (i = 0; i < AR9287_MAX_CHAINS; i++) {
regChainOffset = i * 0x1000;
REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
- pModal->antCtrlChain[i]);
+ le32_to_cpu(pModal->antCtrlChain[i]));
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
(REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset)
@@ -983,7 +962,14 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
u16 i, bool is2GHz)
{
- return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
+ __le16 spur_ch = ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
+
+ return le16_to_cpu(spur_ch);
+}
+
+static u8 ath9k_hw_ar9287_get_eepmisc(struct ath_hw *ah)
+{
+ return ah->eeprom.map9287.baseEepHeader.eepMisc;
}
const struct eeprom_ops eep_ar9287_ops = {
@@ -995,5 +981,6 @@ const struct eeprom_ops eep_ar9287_ops = {
.get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev,
.set_board_values = ath9k_hw_ar9287_set_board_values,
.set_txpower = ath9k_hw_ar9287_set_txpower,
- .get_spur_channel = ath9k_hw_ar9287_get_spur_channel
+ .get_spur_channel = ath9k_hw_ar9287_get_spur_channel,
+ .get_eepmisc = ath9k_hw_ar9287_get_eepmisc
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 959682f7909c..56b44fc7a8e6 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -79,12 +79,17 @@ static void ath9k_olc_get_pdadcs(struct ath_hw *ah,
static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah)
{
- return ((ah->eeprom.def.baseEepHeader.version >> 12) & 0xF);
+ u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version);
+
+ return (version & AR5416_EEP_VER_MAJOR_MASK) >>
+ AR5416_EEP_VER_MAJOR_SHIFT;
}
static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
{
- return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
+ u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version);
+
+ return version & AR5416_EEP_VER_MINOR_MASK;
}
#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
@@ -126,14 +131,14 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
return __ath9k_hw_def_fill_eeprom(ah);
}
-#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
- PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
- PR_EEP("Chain2 Ant. Control", modal_hdr->antCtrlChain[2]);
- PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2]));
+ PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
PR_EEP("Chain2 Ant. Gain", modal_hdr->antennaGainCh[2]);
@@ -189,9 +194,9 @@ static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
PR_EEP("Chain1 OutputBias", modal_hdr->ob_ch1);
PR_EEP("Chain1 DriverBias", modal_hdr->db_ch1);
PR_EEP("LNA Control", modal_hdr->lna_ctl);
- PR_EEP("XPA Bias Freq0", modal_hdr->xpaBiasLvlFreq[0]);
- PR_EEP("XPA Bias Freq1", modal_hdr->xpaBiasLvlFreq[1]);
- PR_EEP("XPA Bias Freq2", modal_hdr->xpaBiasLvlFreq[2]);
+ PR_EEP("XPA Bias Freq0", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[0]));
+ PR_EEP("XPA Bias Freq1", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[1]));
+ PR_EEP("XPA Bias Freq2", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[2]));
return len;
}
@@ -201,6 +206,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
{
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct base_eep_header *pBase = &eep->baseEepHeader;
+ u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber);
if (!dump_base_hdr) {
len += scnprintf(buf + len, size - len,
@@ -214,12 +220,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
goto out;
}
- PR_EEP("Major Version", pBase->version >> 12);
- PR_EEP("Minor Version", pBase->version & 0xFFF);
- PR_EEP("Checksum", pBase->checksum);
- PR_EEP("Length", pBase->length);
- PR_EEP("RegDomain1", pBase->regDmn[0]);
- PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("Major Version", ath9k_hw_def_get_eeprom_ver(ah));
+ PR_EEP("Minor Version", ath9k_hw_def_get_eeprom_rev(ah));
+ PR_EEP("Checksum", le16_to_cpu(pBase->checksum));
+ PR_EEP("Length", le16_to_cpu(pBase->length));
+ PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+ PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
PR_EEP("TX Mask", pBase->txMask);
PR_EEP("RX Mask", pBase->rxMask);
PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
@@ -232,10 +238,10 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
AR5416_OPFLAGS_N_5G_HT20));
PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
AR5416_OPFLAGS_N_5G_HT40));
- PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
- PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
- PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
- PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN));
+ PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
@@ -268,61 +274,40 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
return err;
if (need_swap)
- el = swab16(eep->baseEepHeader.length);
+ el = swab16((__force u16)eep->baseEepHeader.length);
else
- el = eep->baseEepHeader.length;
+ el = le16_to_cpu(eep->baseEepHeader.length);
el = min(el / sizeof(u16), SIZE_EEPROM_DEF);
if (!ath9k_hw_nvram_validate_checksum(ah, el))
return -EINVAL;
if (need_swap) {
- u32 integer, j;
- u16 word;
-
- word = swab16(eep->baseEepHeader.length);
- eep->baseEepHeader.length = word;
-
- word = swab16(eep->baseEepHeader.checksum);
- eep->baseEepHeader.checksum = word;
-
- word = swab16(eep->baseEepHeader.version);
- eep->baseEepHeader.version = word;
-
- word = swab16(eep->baseEepHeader.regDmn[0]);
- eep->baseEepHeader.regDmn[0] = word;
-
- word = swab16(eep->baseEepHeader.regDmn[1]);
- eep->baseEepHeader.regDmn[1] = word;
-
- word = swab16(eep->baseEepHeader.rfSilent);
- eep->baseEepHeader.rfSilent = word;
-
- word = swab16(eep->baseEepHeader.blueToothOptions);
- eep->baseEepHeader.blueToothOptions = word;
+ u32 j;
- word = swab16(eep->baseEepHeader.deviceCap);
- eep->baseEepHeader.deviceCap = word;
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.length);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.version);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions);
+ EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap);
for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
struct modal_eep_header *pModal =
&eep->modalHeader[j];
- integer = swab32(pModal->antCtrlCommon);
- pModal->antCtrlCommon = integer;
+ EEPROM_FIELD_SWAB32(pModal->antCtrlCommon);
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- integer = swab32(pModal->antCtrlChain[i]);
- pModal->antCtrlChain[i] = integer;
- }
- for (i = 0; i < 3; i++) {
- word = swab16(pModal->xpaBiasLvlFreq[i]);
- pModal->xpaBiasLvlFreq[i] = word;
- }
+ for (i = 0; i < AR5416_MAX_CHAINS; i++)
+ EEPROM_FIELD_SWAB32(pModal->antCtrlChain[i]);
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- word = swab16(pModal->spurChans[i].spurChan);
- pModal->spurChans[i].spurChan = word;
- }
+ for (i = 0; i < 3; i++)
+ EEPROM_FIELD_SWAB16(pModal->xpaBiasLvlFreq[i]);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++)
+ EEPROM_FIELD_SWAB16(
+ pModal->spurChans[i].spurChan);
}
}
@@ -332,7 +317,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
/* Enable fixup for AR_AN_TOP2 if necessary */
if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
- ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+ ((le16_to_cpu(eep->baseEepHeader.version) & 0xff) > 0x0a) &&
(eep->baseEepHeader.pwdclkind == 0))
ah->need_an_top2_fixup = true;
@@ -365,13 +350,13 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
case EEP_MAC_MSW:
return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
- return pBase->regDmn[0];
+ return le16_to_cpu(pBase->regDmn[0]);
case EEP_OP_CAP:
- return pBase->deviceCap;
+ return le16_to_cpu(pBase->deviceCap);
case EEP_OP_MODE:
return pBase->opCapFlags;
case EEP_RF_SILENT:
- return pBase->rfSilent;
+ return le16_to_cpu(pBase->rfSilent);
case EEP_OB_5:
return pModal[0].ob;
case EEP_DB_5:
@@ -380,8 +365,6 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pModal[1].ob;
case EEP_DB_2:
return pModal[1].db;
- case EEP_MINOR_REV:
- return AR5416_VER_MASK;
case EEP_TX_MASK:
return pBase->txMask;
case EEP_RX_MASK:
@@ -393,27 +376,27 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
case EEP_TXGAIN_TYPE:
return pBase->txGainType;
case EEP_OL_PWRCTRL:
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19)
return pBase->openLoopPwrCntl ? true : false;
else
return false;
case EEP_RC_CHAIN_MASK:
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19)
return pBase->rcChainMask;
else
return 0;
case EEP_DAC_HPWR_5G:
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20)
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20)
return pBase->dacHiPwrMode_5G;
else
return 0;
case EEP_FRAC_N_5G:
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_22)
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_22)
return pBase->frac_n_5g;
else
return 0;
case EEP_PWR_TABLE_OFFSET:
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21)
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_21)
return pBase->pwr_table_offset;
else
return AR5416_PWR_TABLE_OFFSET_DB;
@@ -436,7 +419,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
u8 txRxAttenLocal, int regChainOffset, int i)
{
ENABLE_REG_RMW_BUFFER(ah);
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
txRxAttenLocal = pModal->txRxAttenCh[i];
if (AR_SREV_9280_20_OR_LATER(ah)) {
@@ -487,11 +470,13 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
int i, regChainOffset;
u8 txRxAttenLocal;
+ u32 antCtrlCommon;
pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
+ antCtrlCommon = le32_to_cpu(pModal->antCtrlCommon);
- REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon & 0xffff);
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, antCtrlCommon & 0xffff);
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
if (AR_SREV_9280(ah)) {
@@ -505,7 +490,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
regChainOffset = i * 0x1000;
REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
- pModal->antCtrlChain[i]);
+ le32_to_cpu(pModal->antCtrlChain[i]));
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
(REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
@@ -605,7 +590,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
pModal->thresh62);
}
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) {
REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
AR_PHY_TX_END_DATA_START,
pModal->txFrameToDataStart);
@@ -613,7 +598,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
pModal->txFrameToPaOn);
}
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
if (IS_CHAN_HT40(chan))
REG_RMW_FIELD(ah, AR_PHY_SETTLING,
AR_PHY_SETTLING_SWITCH,
@@ -621,13 +606,14 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
}
if (AR_SREV_9280_20_OR_LATER(ah) &&
- AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+ ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19)
REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL,
AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK,
pModal->miscBits);
- if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) {
+ if (AR_SREV_9280_20(ah) &&
+ ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20) {
if (IS_CHAN_2GHZ(chan))
REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
eep->baseEepHeader.dacLpMode);
@@ -651,7 +637,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
static void ath9k_hw_def_set_addac(struct ath_hw *ah,
struct ath9k_channel *chan)
{
-#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt])
+#define XPA_LVL_FREQ(cnt) (le16_to_cpu(pModal->xpaBiasLvlFreq[cnt]))
struct modal_eep_header *pModal;
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
u8 biaslevel;
@@ -798,8 +784,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET);
- if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_2) {
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) {
pdGainOverlap_t2 =
pEepData->modalHeader[modalIdx].pdGainOverlap;
} else {
@@ -1171,10 +1156,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
memset(ratesArray, 0, sizeof(ratesArray));
- if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
- AR5416_EEP_MINOR_VER_2) {
+ if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2)
ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
- }
ath9k_hw_set_def_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
@@ -1314,7 +1297,14 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
{
- return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
+ __le16 spch = ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
+
+ return le16_to_cpu(spch);
+}
+
+static u8 ath9k_hw_def_get_eepmisc(struct ath_hw *ah)
+{
+ return ah->eeprom.def.baseEepHeader.eepMisc;
}
const struct eeprom_ops eep_def_ops = {
@@ -1327,5 +1317,6 @@ const struct eeprom_ops eep_def_ops = {
.set_board_values = ath9k_hw_def_set_board_values,
.set_addac = ath9k_hw_def_set_addac,
.set_txpower = ath9k_hw_def_set_txpower,
- .get_spur_channel = ath9k_hw_def_get_spur_channel
+ .get_spur_channel = ath9k_hw_def_get_spur_channel,
+ .get_eepmisc = ath9k_hw_def_get_eepmisc
};
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a35f78be8dec..8c5c2dd8fa7f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -731,7 +731,7 @@ u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
udelay(100);
if (WARN_ON_ONCE(i >= 100)) {
- ath_err(common, "PLL4 meaurement not done\n");
+ ath_err(common, "PLL4 measurement not done\n");
break;
}
@@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
int count = 50;
u32 reg, last_val;
+ /* Check if chip failed to wake up */
+ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+ return false;
+
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 20794660d6ae..fa4b3cc1ba22 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -620,6 +620,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
/* Will be cleared in ath9k_start() */
set_bit(ATH_OP_INVALID, &common->op_flags);
+ sc->airtime_flags = (AIRTIME_USE_TX | AIRTIME_USE_RX |
+ AIRTIME_USE_NEW_QUEUES);
sc->sc_ah = ah;
sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
@@ -667,6 +669,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock);
+ spin_lock_init(&sc->intr_lock);
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
spin_lock_init(&sc->chan_lock);
@@ -679,6 +682,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
INIT_WORK(&sc->hw_reset_work, ath_reset_work);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+ INIT_DELAYED_WORK(&sc->hw_check_work, ath_hw_check_work);
ath9k_init_channel_context(sc);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 5ad0feeebc86..27c50562dc47 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -20,20 +20,13 @@
* TX polling - checks if the TX engine is stuck somewhere
* and issues a chip reset if so.
*/
-void ath_tx_complete_poll_work(struct work_struct *work)
+static bool ath_tx_complete_check(struct ath_softc *sc)
{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- tx_complete_work.work);
struct ath_txq *txq;
int i;
- bool needreset = false;
-
- if (sc->tx99_state) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
- "skip tx hung detection on tx99\n");
- return;
- }
+ if (sc->tx99_state)
+ return true;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
@@ -41,25 +34,36 @@ void ath_tx_complete_poll_work(struct work_struct *work)
ath_txq_lock(sc, txq);
if (txq->axq_depth) {
if (txq->axq_tx_inprogress) {
- needreset = true;
ath_txq_unlock(sc, txq);
- break;
- } else {
- txq->axq_tx_inprogress = true;
+ goto reset;
}
+
+ txq->axq_tx_inprogress = true;
}
ath_txq_unlock(sc, txq);
}
- if (needreset) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
- "tx hung, resetting the chip\n");
- ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
+ return true;
+
+reset:
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "tx hung, resetting the chip\n");
+ ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
+ return false;
+
+}
+
+void ath_hw_check_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_check_work.work);
+
+ if (!ath_hw_check(sc) ||
+ !ath_tx_complete_check(sc))
return;
- }
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
- msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
+ msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index bba85d1a6cd1..d937c39b3a0b 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
-void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
u32 async_mask;
- if (!(ah->imask & ATH9K_INT_GLOBAL))
- return;
-
- if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
- ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
- atomic_read(&ah->intr_ref_cnt));
- return;
- }
-
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
AR_SREV_9561(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
@@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
}
+
+void ath9k_hw_resume_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ return;
+
+ if (atomic_read(&ah->intr_ref_cnt) != 0) {
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+ atomic_read(&ah->intr_ref_cnt));
+ return;
+ }
+
+ __ath9k_hw_enable_interrupts(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
+
+void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ return;
+
+ if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+ atomic_read(&ah->intr_ref_cnt));
+ return;
+ }
+
+ __ath9k_hw_enable_interrupts(ah);
+}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
void ath9k_hw_set_interrupts(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 3bab01435a86..770fc11b41d1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah);
void ath9k_hw_enable_interrupts(struct ath_hw *ah);
void ath9k_hw_disable_interrupts(struct ath_hw *ah);
void ath9k_hw_kill_interrupts(struct ath_hw *ah);
+void ath9k_hw_resume_interrupts(struct ath_hw *ah);
void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 59e3bd0f4c20..9e65d14e7b1e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -70,10 +70,10 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq,
goto out;
if (txq->mac80211_qnum >= 0) {
- struct list_head *list;
+ struct ath_acq *acq;
- list = &sc->cur_chan->acq[txq->mac80211_qnum];
- if (!list_empty(list))
+ acq = &sc->cur_chan->acq[txq->mac80211_qnum];
+ if (!list_empty(&acq->acq_new) || !list_empty(&acq->acq_old))
pending = true;
}
out:
@@ -181,7 +181,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
static void __ath_cancel_work(struct ath_softc *sc)
{
cancel_work_sync(&sc->paprd_work);
- cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -198,7 +198,8 @@ void ath_cancel_work(struct ath_softc *sc)
void ath_restart_work(struct ath_softc *sc)
{
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
+ ATH_HW_CHECK_POLL_INT);
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
@@ -373,21 +374,20 @@ void ath9k_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
enum ath_reset_type type;
unsigned long flags;
- u32 status = sc->intrstatus;
+ u32 status;
u32 rxmask;
+ spin_lock_irqsave(&sc->intr_lock, flags);
+ status = sc->intrstatus;
+ sc->intrstatus = 0;
+ spin_unlock_irqrestore(&sc->intr_lock, flags);
+
ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
if (status & ATH9K_INT_FATAL) {
type = RESET_TYPE_FATAL_INT;
ath9k_queue_reset(sc, type);
-
- /*
- * Increment the ref. counter here so that
- * interrupts are enabled in the reset routine.
- */
- atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
goto out;
}
@@ -403,11 +403,6 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
- /*
- * Increment the ref. counter here so that
- * interrupts are enabled in the reset routine.
- */
- atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET,
"BB_WATCHDOG: Skipping interrupts\n");
goto out;
@@ -420,7 +415,6 @@ void ath9k_tasklet(unsigned long data)
if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
type = RESET_TYPE_TX_GTT;
ath9k_queue_reset(sc, type);
- atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET,
"GTT: Skipping interrupts\n");
goto out;
@@ -477,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status);
/* re-enable hardware interrupt */
- ath9k_hw_enable_interrupts(ah);
+ ath9k_hw_resume_interrupts(ah);
out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
@@ -541,7 +535,9 @@ irqreturn_t ath_isr(int irq, void *dev)
return IRQ_NONE;
/* Cache the status */
- sc->intrstatus = status;
+ spin_lock(&sc->intr_lock);
+ sc->intrstatus |= status;
+ spin_unlock(&sc->intr_lock);
if (status & SCHED_INTR)
sched = true;
@@ -587,7 +583,7 @@ chip_reset:
if (sched) {
/* turn off every interrupt */
- ath9k_hw_disable_interrupts(ah);
+ ath9k_hw_kill_interrupts(ah);
tasklet_schedule(&sc->intr_tq);
}
@@ -2091,7 +2087,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
int timeout;
bool drain_txq;
- cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_check_work);
if (ah->ah_flags & AH_UNPLUGGED) {
ath_dbg(common, ANY, "Device has been unplugged!\n");
@@ -2129,7 +2125,8 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
ath9k_ps_restore(sc);
}
- ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
+ ieee80211_queue_delayed_work(hw, &sc->hw_check_work,
+ ATH_HW_CHECK_POLL_INT);
}
static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index fb4ba27d92b7..d79837fe333f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1002,6 +1002,70 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
}
}
+static void ath_rx_count_airtime(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb)
+{
+ struct ath_node *an;
+ struct ath_acq *acq;
+ struct ath_vif *avp;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_sta *sta;
+ struct ieee80211_rx_status *rxs;
+ const struct ieee80211_rate *rate;
+ bool is_sgi, is_40, is_sp;
+ int phy;
+ u16 len = rs->rs_datalen;
+ u32 airtime = 0;
+ u8 tidno, acno;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
+ if (!sta)
+ goto exit;
+ an = (struct ath_node *) sta->drv_priv;
+ avp = (struct ath_vif *) an->vif->drv_priv;
+ tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ acno = TID_TO_WME_AC(tidno);
+ acq = &avp->chanctx->acq[acno];
+
+ rxs = IEEE80211_SKB_RXCB(skb);
+
+ is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI);
+ is_40 = !!(rxs->flag & RX_FLAG_40MHZ);
+ is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE);
+
+ if (!!(rxs->flag & RX_FLAG_HT)) {
+ /* MCS rates */
+
+ airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
+ is_40, is_sgi, is_sp);
+ } else {
+
+ phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM;
+ rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx];
+ airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100,
+ len, rxs->rate_idx, is_sp);
+ }
+
+ if (!!(sc->airtime_flags & AIRTIME_USE_RX)) {
+ spin_lock_bh(&acq->lock);
+ an->airtime_deficit[acno] -= airtime;
+ if (an->airtime_deficit[acno] <= 0)
+ __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno));
+ spin_unlock_bh(&acq->lock);
+ }
+ ath_debug_airtime(sc, an, airtime, 0);
+exit:
+ rcu_read_unlock();
+}
+
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_rxbuf *bf;
@@ -1148,6 +1212,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
ath9k_antenna_check(sc, &rs);
ath9k_apply_ampdu_details(sc, &rs, rxs);
ath_debug_rate_stats(sc, &rs, skb);
+ ath_rx_count_airtime(sc, &rs, skb);
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_ack(hdr->frame_control))
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4e2f3ac266c3..396bf05c6bf6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -97,18 +97,6 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb(skb);
}
-void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
- __acquires(&txq->axq_lock)
-{
- spin_lock_bh(&txq->axq_lock);
-}
-
-void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
- __releases(&txq->axq_lock)
-{
- spin_unlock_bh(&txq->axq_lock);
-}
-
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
@@ -124,21 +112,44 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_status(hw, skb);
}
-static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct list_head *list;
struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
struct ath_chanctx *ctx = avp->chanctx;
+ struct ath_acq *acq;
+ struct list_head *tid_list;
+ u8 acno = TID_TO_WME_AC(tid->tidno);
- if (!ctx)
+ if (!ctx || !list_empty(&tid->list))
return;
- list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
- if (list_empty(&tid->list))
- list_add_tail(&tid->list, list);
+
+ acq = &ctx->acq[acno];
+ if ((sc->airtime_flags & AIRTIME_USE_NEW_QUEUES) &&
+ tid->an->airtime_deficit[acno] > 0)
+ tid_list = &acq->acq_new;
+ else
+ tid_list = &acq->acq_old;
+
+ list_add_tail(&tid->list, tid_list);
}
+void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
+ struct ath_chanctx *ctx = avp->chanctx;
+ struct ath_acq *acq;
+
+ if (!ctx || !list_empty(&tid->list))
+ return;
+
+ acq = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
+ spin_lock_bh(&acq->lock);
+ __ath_tx_queue_tid(sc, tid);
+ spin_unlock_bh(&acq->lock);
+}
+
+
void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
{
struct ath_softc *sc = hw->priv;
@@ -153,7 +164,7 @@ void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
ath_txq_lock(sc, txq);
tid->has_queued = true;
- ath_tx_queue_tid(sc, txq, tid);
+ ath_tx_queue_tid(sc, tid);
ath_txq_schedule(sc, txq);
ath_txq_unlock(sc, txq);
@@ -660,7 +671,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
if (!an->sleeping) {
- ath_tx_queue_tid(sc, txq, tid);
+ ath_tx_queue_tid(sc, tid);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
@@ -688,6 +699,33 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}
+static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an,
+ struct ath_atx_tid *tid, struct ath_buf *bf,
+ struct ath_tx_status *ts)
+{
+ struct ath_txq *txq = tid->txq;
+ u32 airtime = 0;
+ int i;
+
+ airtime += ts->duration * (ts->ts_longretry + 1);
+ for(i = 0; i < ts->ts_rateindex; i++) {
+ int rate_dur = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i);
+ airtime += rate_dur * bf->rates[i].count;
+ }
+
+ if (sc->airtime_flags & AIRTIME_USE_TX) {
+ int q = txq->mac80211_qnum;
+ struct ath_acq *acq = &sc->cur_chan->acq[q];
+
+ spin_lock_bh(&acq->lock);
+ an->airtime_deficit[q] -= airtime;
+ if (an->airtime_deficit[q] <= 0)
+ __ath_tx_queue_tid(sc, tid);
+ spin_unlock_bh(&acq->lock);
+ }
+ ath_debug_airtime(sc, an, 0, airtime);
+}
+
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
@@ -715,6 +753,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (sta) {
struct ath_node *an = (struct ath_node *)sta->drv_priv;
tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+ ath_tx_count_airtime(sc, an, tid, bf, ts);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
}
@@ -1068,8 +1107,8 @@ finish:
* width - 0 for 20 MHz, 1 for 40 MHz
* half_gi - to use 4us v/s 3.6 us for symbol time
*/
-static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
- int width, int half_gi, bool shortPreamble)
+u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
+ int width, int half_gi, bool shortPreamble)
{
u32 nbits, nsymbits, duration, nsymbols;
int streams;
@@ -1151,8 +1190,9 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
if (is_40) {
u8 power_ht40delta;
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ u16 eeprom_rev = ah->eep_ops->get_eeprom_rev(ah);
- if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
+ if (eeprom_rev >= AR5416_EEP_MINOR_VER_2) {
bool is_2ghz;
struct modal_eep_header *pmodal;
@@ -1467,7 +1507,7 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
}
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid, bool *stop)
+ struct ath_atx_tid *tid)
{
struct ath_buf *bf;
struct ieee80211_tx_info *tx_info;
@@ -1489,7 +1529,6 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
__skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
- *stop = true;
return false;
}
@@ -1613,7 +1652,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_lock(sc, txq);
tid->clear_ps_filter = true;
if (ath_tid_has_buffered(tid)) {
- ath_tx_queue_tid(sc, txq, tid);
+ ath_tx_queue_tid(sc, tid);
ath_txq_schedule(sc, txq);
}
ath_txq_unlock_complete(sc, txq);
@@ -1912,9 +1951,10 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_atx_tid *tid, *last_tid;
+ struct ath_atx_tid *tid;
struct list_head *tid_list;
- bool sent = false;
+ struct ath_acq *acq;
+ bool active = AIRTIME_ACTIVE(sc->airtime_flags);
if (txq->mac80211_qnum < 0)
return;
@@ -1923,48 +1963,55 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
return;
spin_lock_bh(&sc->chan_lock);
- tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
-
- if (list_empty(tid_list)) {
- spin_unlock_bh(&sc->chan_lock);
- return;
- }
-
rcu_read_lock();
+ acq = &sc->cur_chan->acq[txq->mac80211_qnum];
- last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
- while (!list_empty(tid_list)) {
- bool stop = false;
-
- if (sc->cur_chan->stopped)
- break;
-
- tid = list_first_entry(tid_list, struct ath_atx_tid, list);
- list_del_init(&tid->list);
+ if (sc->cur_chan->stopped)
+ goto out;
- if (ath_tx_sched_aggr(sc, txq, tid, &stop))
- sent = true;
+begin:
+ tid_list = &acq->acq_new;
+ if (list_empty(tid_list)) {
+ tid_list = &acq->acq_old;
+ if (list_empty(tid_list))
+ goto out;
+ }
+ tid = list_first_entry(tid_list, struct ath_atx_tid, list);
- /*
- * add tid to round-robin queue if more frames
- * are pending for the tid
- */
- if (ath_tid_has_buffered(tid))
- ath_tx_queue_tid(sc, txq, tid);
+ if (active && tid->an->airtime_deficit[txq->mac80211_qnum] <= 0) {
+ spin_lock_bh(&acq->lock);
+ tid->an->airtime_deficit[txq->mac80211_qnum] += ATH_AIRTIME_QUANTUM;
+ list_move_tail(&tid->list, &acq->acq_old);
+ spin_unlock_bh(&acq->lock);
+ goto begin;
+ }
- if (stop)
- break;
+ if (!ath_tid_has_buffered(tid)) {
+ spin_lock_bh(&acq->lock);
+ if ((tid_list == &acq->acq_new) && !list_empty(&acq->acq_old))
+ list_move_tail(&tid->list, &acq->acq_old);
+ else {
+ list_del_init(&tid->list);
+ }
+ spin_unlock_bh(&acq->lock);
+ goto begin;
+ }
- if (tid == last_tid) {
- if (!sent)
- break;
- sent = false;
- last_tid = list_entry(tid_list->prev,
- struct ath_atx_tid, list);
+ /*
+ * If we succeed in scheduling something, immediately restart to make
+ * sure we keep the HW busy.
+ */
+ if(ath_tx_sched_aggr(sc, txq, tid)) {
+ if (!active) {
+ spin_lock_bh(&acq->lock);
+ list_move_tail(&tid->list, &acq->acq_old);
+ spin_unlock_bh(&acq->lock);
}
+ goto begin;
}
+out:
rcu_read_unlock();
spin_unlock_bh(&sc->chan_lock);
}
@@ -2805,8 +2852,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
return error;
}
- INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
error = ath_tx_edma_init(sc);
@@ -2818,6 +2863,9 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
struct ath_atx_tid *tid;
int tidno, acno;
+ for (acno = 0; acno < IEEE80211_NUM_ACS; acno++)
+ an->airtime_deficit[acno] = ATH_AIRTIME_QUANTUM;
+
for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
tid = ath_node_to_tid(an, tidno);
tid->an = an;
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
index 591ebaea8265..4b83e87f0b94 100644
--- a/drivers/net/wireless/ath/wcn36xx/Kconfig
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -1,6 +1,8 @@
config WCN36XX
tristate "Qualcomm Atheros WCN3660/3680 support"
depends on MAC80211 && HAS_DMA
+ depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n
+ depends on QCOM_SMD || QCOM_SMD=n
---help---
This module adds support for wireless adapters based on
Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 231fd022f0f5..87dfdaf9044c 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/interrupt.h>
+#include <linux/soc/qcom/smem_state.h>
#include "wcn36xx.h"
#include "txrx.h"
@@ -151,9 +152,12 @@ int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
goto out_err;
/* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
- ret = wcn->ctrl_ops->smsm_change_state(
- WCN36XX_SMSM_WLAN_TX_ENABLE,
- WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+ ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
+ WCN36XX_SMSM_WLAN_TX_ENABLE |
+ WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
+ WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+ if (ret)
+ goto out_err;
return 0;
@@ -678,9 +682,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
* notify chip about new frame through SMSM bus.
*/
if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
- wcn->ctrl_ops->smsm_change_state(
- 0,
- WCN36XX_SMSM_WLAN_TX_ENABLE);
+ qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
+ WCN36XX_SMSM_WLAN_TX_ENABLE,
+ WCN36XX_SMSM_WLAN_TX_ENABLE);
} else {
/* indicate End Of Packet and generate interrupt on descriptor
* done.
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 4f87ef1e1eb8..b765c647319d 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -350,6 +350,8 @@ enum wcn36xx_hal_host_msg_type {
WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
+ WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
+
WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
};
@@ -4703,4 +4705,18 @@ struct stats_class_b_ind {
u32 rx_time_total;
};
+/* WCN36XX_HAL_PRINT_REG_INFO_IND */
+struct wcn36xx_hal_print_reg_info_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 count;
+ u32 scenario;
+ u32 reason;
+
+ struct {
+ u32 addr;
+ u32 value;
+ } regs[];
+} __packed;
+
#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e1d59da2ad20..7a0c2e7da7f6 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -21,6 +21,10 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
#include "wcn36xx.h"
unsigned int wcn36xx_dbg_mask;
@@ -564,23 +568,81 @@ out:
return ret;
}
-static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *mac_addr)
+static void wcn36xx_hw_scan_worker(struct work_struct *work)
{
- struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work);
+ struct cfg80211_scan_request *req = wcn->scan_req;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+ struct cfg80211_scan_info scan_info = {};
+ bool aborted = false;
+ int i;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels);
+
+ for (i = 0; i < req->n_channels; i++)
+ channels[i] = req->channels[i]->hw_value;
+
+ wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels);
wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
- wcn36xx_smd_start_scan(wcn);
+ for (i = 0; i < req->n_channels; i++) {
+ mutex_lock(&wcn->scan_lock);
+ aborted = wcn->scan_aborted;
+ mutex_unlock(&wcn->scan_lock);
+
+ if (aborted)
+ break;
+
+ wcn->scan_freq = req->channels[i]->center_freq;
+ wcn->scan_band = req->channels[i]->band;
+
+ wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value);
+ msleep(30);
+ wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value);
+
+ wcn->scan_freq = 0;
+ }
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+
+ scan_info.aborted = aborted;
+ ieee80211_scan_completed(wcn->hw, &scan_info);
+
+ mutex_lock(&wcn->scan_lock);
+ wcn->scan_req = NULL;
+ mutex_unlock(&wcn->scan_lock);
}
-static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
{
struct wcn36xx *wcn = hw->priv;
- wcn36xx_smd_end_scan(wcn);
- wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+ mutex_lock(&wcn->scan_lock);
+ if (wcn->scan_req) {
+ mutex_unlock(&wcn->scan_lock);
+ return -EBUSY;
+ }
+
+ wcn->scan_aborted = false;
+ wcn->scan_req = &hw_req->req;
+ mutex_unlock(&wcn->scan_lock);
+
+ schedule_work(&wcn->scan_work);
+
+ return 0;
+}
+
+static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ mutex_lock(&wcn->scan_lock);
+ wcn->scan_aborted = true;
+ mutex_unlock(&wcn->scan_lock);
+
+ cancel_work_sync(&wcn->scan_work);
}
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
@@ -993,8 +1055,8 @@ static const struct ieee80211_ops wcn36xx_ops = {
.configure_filter = wcn36xx_configure_filter,
.tx = wcn36xx_tx,
.set_key = wcn36xx_set_key,
- .sw_scan_start = wcn36xx_sw_scan_start,
- .sw_scan_complete = wcn36xx_sw_scan_complete,
+ .hw_scan = wcn36xx_hw_scan,
+ .cancel_hw_scan = wcn36xx_cancel_hw_scan,
.bss_info_changed = wcn36xx_bss_info_changed,
.set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add,
@@ -1019,6 +1081,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1028,6 +1091,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
+ wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS;
+ wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN;
+
wcn->hw->wiphy->cipher_suites = cipher_suites;
wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -1058,8 +1124,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
int ret;
/* Set TX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "wcnss_wlantx_irq");
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx");
if (!res) {
wcn36xx_err("failed to get tx_irq\n");
return -ENOENT;
@@ -1067,14 +1132,29 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
wcn->tx_irq = res->start;
/* Set RX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "wcnss_wlanrx_irq");
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx");
if (!res) {
wcn36xx_err("failed to get rx_irq\n");
return -ENOENT;
}
wcn->rx_irq = res->start;
+ /* Acquire SMSM tx enable handle */
+ wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev,
+ "tx-enable", &wcn->tx_enable_state_bit);
+ if (IS_ERR(wcn->tx_enable_state)) {
+ wcn36xx_err("failed to get tx-enable state\n");
+ return PTR_ERR(wcn->tx_enable_state);
+ }
+
+ /* Acquire SMSM tx rings empty handle */
+ wcn->tx_rings_empty_state = qcom_smem_state_get(&pdev->dev,
+ "tx-rings-empty", &wcn->tx_rings_empty_state_bit);
+ if (IS_ERR(wcn->tx_rings_empty_state)) {
+ wcn36xx_err("failed to get tx-rings-empty state\n");
+ return PTR_ERR(wcn->tx_rings_empty_state);
+ }
+
mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0);
if (!mmio_node) {
wcn36xx_err("failed to acquire qcom,mmio reference\n");
@@ -1115,11 +1195,14 @@ static int wcn36xx_probe(struct platform_device *pdev)
{
struct ieee80211_hw *hw;
struct wcn36xx *wcn;
+ void *wcnss;
int ret;
- u8 addr[ETH_ALEN];
+ const u8 *addr;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+ wcnss = dev_get_drvdata(pdev->dev.parent);
+
hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
if (!hw) {
wcn36xx_err("failed to alloc hw\n");
@@ -1130,11 +1213,26 @@ static int wcn36xx_probe(struct platform_device *pdev)
wcn = hw->priv;
wcn->hw = hw;
wcn->dev = &pdev->dev;
- wcn->ctrl_ops = pdev->dev.platform_data;
-
mutex_init(&wcn->hal_mutex);
+ mutex_init(&wcn->scan_lock);
- if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker);
+
+ wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process);
+ if (IS_ERR(wcn->smd_channel)) {
+ wcn36xx_err("failed to open WLAN_CTRL channel\n");
+ ret = PTR_ERR(wcn->smd_channel);
+ goto out_wq;
+ }
+
+ qcom_smd_set_drvdata(wcn->smd_channel, hw);
+
+ addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret);
+ if (addr && ret != ETH_ALEN) {
+ wcn36xx_err("invalid local-mac-address\n");
+ ret = -EINVAL;
+ goto out_wq;
+ } else if (addr) {
wcn36xx_info("mac address: %pM\n", addr);
SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
}
@@ -1158,6 +1256,7 @@ out_wq:
out_err:
return ret;
}
+
static int wcn36xx_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
@@ -1165,45 +1264,37 @@ static int wcn36xx_remove(struct platform_device *pdev)
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
release_firmware(wcn->nv);
- mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
+
+ qcom_smem_state_put(wcn->tx_enable_state);
+ qcom_smem_state_put(wcn->tx_rings_empty_state);
+
iounmap(wcn->dxe_base);
iounmap(wcn->ccu_base);
+
+ mutex_destroy(&wcn->hal_mutex);
ieee80211_free_hw(hw);
return 0;
}
-static const struct platform_device_id wcn36xx_platform_id_table[] = {
- {
- .name = "wcn36xx",
- .driver_data = 0
- },
+
+static const struct of_device_id wcn36xx_of_match[] = {
+ { .compatible = "qcom,wcnss-wlan" },
{}
};
-MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+MODULE_DEVICE_TABLE(of, wcn36xx_of_match);
static struct platform_driver wcn36xx_driver = {
.probe = wcn36xx_probe,
.remove = wcn36xx_remove,
.driver = {
.name = "wcn36xx",
+ .of_match_table = wcn36xx_of_match,
},
- .id_table = wcn36xx_platform_id_table,
};
-static int __init wcn36xx_init(void)
-{
- platform_driver_register(&wcn36xx_driver);
- return 0;
-}
-module_init(wcn36xx_init);
-
-static void __exit wcn36xx_exit(void)
-{
- platform_driver_unregister(&wcn36xx_driver);
-}
-module_exit(wcn36xx_exit);
+module_platform_driver(wcn36xx_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index a443992320f2..1c2966f7db7a 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -19,6 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/bitops.h>
+#include <linux/soc/qcom/smd.h>
#include "smd.h"
struct wcn36xx_cfg_val {
@@ -253,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
init_completion(&wcn->hal_rsp_compl);
start = jiffies;
- ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ ret = qcom_smd_send(wcn->smd_channel, wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
goto out;
@@ -521,7 +522,7 @@ out:
return ret;
}
-int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_start_scan_req_msg msg_body;
int ret = 0;
@@ -529,7 +530,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
- msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+ msg_body.scan_channel = scan_channel;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -551,7 +552,7 @@ out:
return ret;
}
-int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_end_scan_req_msg msg_body;
int ret = 0;
@@ -559,7 +560,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
- msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+ msg_body.scan_channel = scan_channel;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2108,6 +2109,30 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
return -ENOENT;
}
+static int wcn36xx_smd_print_reg_info_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_print_reg_info_ind *rsp = buf;
+ int i;
+
+ if (len < sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted print reg info indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "reginfo indication, scenario: 0x%x reason: 0x%x\n",
+ rsp->scenario, rsp->reason);
+
+ for (i = 0; i < rsp->count; i++) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "\t0x%x: 0x%x\n",
+ rsp->regs[i].addr, rsp->regs[i].value);
+ }
+
+ return 0;
+}
+
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
{
struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
@@ -2180,9 +2205,12 @@ out:
return ret;
}
-static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
+ const void *buf, size_t len)
{
- struct wcn36xx_hal_msg_header *msg_header = buf;
+ const struct wcn36xx_hal_msg_header *msg_header = buf;
+ struct ieee80211_hw *hw = qcom_smd_get_drvdata(channel);
+ struct wcn36xx *wcn = hw->priv;
struct wcn36xx_hal_ind_msg *msg_ind;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
@@ -2233,15 +2261,12 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
- msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_KERNEL);
+ case WCN36XX_HAL_PRINT_REG_INFO_IND:
+ msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
if (!msg_ind) {
- /*
- * FIXME: Do something smarter then just
- * printing an error.
- */
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
msg_header->msg_type);
- break;
+ return -ENOMEM;
}
msg_ind->msg_len = len;
@@ -2257,6 +2282,8 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
wcn36xx_err("SMD_EVENT (%d) not supported\n",
msg_header->msg_type);
}
+
+ return 0;
}
static void wcn36xx_ind_smd_work(struct work_struct *work)
{
@@ -2294,6 +2321,11 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
+ case WCN36XX_HAL_PRINT_REG_INFO_IND:
+ wcn36xx_smd_print_reg_info_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
msg_header->msg_type);
@@ -2315,22 +2347,13 @@ int wcn36xx_smd_open(struct wcn36xx *wcn)
INIT_LIST_HEAD(&wcn->hal_ind_queue);
spin_lock_init(&wcn->hal_ind_lock);
- ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
- if (ret) {
- wcn36xx_err("failed to open control channel\n");
- goto free_wq;
- }
-
- return ret;
+ return 0;
-free_wq:
- destroy_workqueue(wcn->hal_ind_wq);
out:
return ret;
}
void wcn36xx_smd_close(struct wcn36xx *wcn)
{
- wcn->ctrl_ops->close();
destroy_workqueue(wcn->hal_ind_wq);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index df80cbbd9d1b..8892ccd67b14 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -51,6 +51,7 @@ struct wcn36xx_hal_ind_msg {
};
struct wcn36xx;
+struct qcom_smd_channel;
int wcn36xx_smd_open(struct wcn36xx *wcn);
void wcn36xx_smd_close(struct wcn36xx *wcn);
@@ -59,8 +60,8 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
int wcn36xx_smd_start(struct wcn36xx *wcn);
int wcn36xx_smd_stop(struct wcn36xx *wcn);
int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
-int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
-int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode);
int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
@@ -127,6 +128,10 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+
+int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
+ const void *buf, size_t len);
+
int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 1f34c2e912d7..8c387a0a3c09 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -45,9 +45,20 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
skb_pull(skb, bd->pdu.mpdu_header_off);
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = __le16_to_cpu(hdr->frame_control);
+ sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+ /* When scanning associate beacons to this */
+ if (ieee80211_is_beacon(hdr->frame_control) && wcn->scan_freq) {
+ status.freq = wcn->scan_freq;
+ status.band = wcn->scan_band;
+ } else {
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
+ }
+
status.mactime = 10;
- status.freq = WCN36XX_CENTER_FREQ(wcn);
- status.band = WCN36XX_BAND(wcn);
status.signal = -get_rssi0(bd);
status.antenna = 1;
status.rate_idx = 1;
@@ -61,10 +72,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
- hdr = (struct ieee80211_hdr *) skb->data;
- fc = __le16_to_cpu(hdr->frame_control);
- sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
-
if (ieee80211_is_beacon(hdr->frame_control)) {
wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
skb, skb->len, fc, sn);
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 22242d18e1fe..7423998ddeb4 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -35,6 +35,9 @@
/* How many frames until we start a-mpdu TX session */
#define WCN36XX_AMPDU_START_THRESH 20
+#define WCN36XX_MAX_SCAN_SSIDS 9
+#define WCN36XX_MAX_SCAN_IE_LEN 500
+
extern unsigned int wcn36xx_dbg_mask;
enum wcn36xx_debug_mask {
@@ -103,19 +106,6 @@ struct nv_data {
u8 table;
};
-/* Interface for platform control path
- *
- * @open: hook must be called when wcn36xx wants to open control channel.
- * @tx: sends a buffer.
- */
-struct wcn36xx_platform_ctrl_ops {
- int (*open)(void *drv_priv, void *rsp_cb);
- void (*close)(void);
- int (*tx)(char *buf, size_t len);
- int (*get_hw_mac)(u8 *addr);
- int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
-};
-
/**
* struct wcn36xx_vif - holds VIF related fields
*
@@ -205,7 +195,13 @@ struct wcn36xx {
void __iomem *ccu_base;
void __iomem *dxe_base;
- struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+ struct qcom_smd_channel *smd_channel;
+
+ struct qcom_smem_state *tx_enable_state;
+ unsigned tx_enable_state_bit;
+ struct qcom_smem_state *tx_rings_empty_state;
+ unsigned tx_rings_empty_state_bit;
+
/*
* smd_buf must be protected with smd_mutex to garantee
* that all messages are sent one after another
@@ -219,6 +215,13 @@ struct wcn36xx {
spinlock_t hal_ind_lock;
struct list_head hal_ind_queue;
+ struct work_struct scan_work;
+ struct cfg80211_scan_request *scan_req;
+ int scan_freq;
+ int scan_band;
+ struct mutex scan_lock;
+ bool scan_aborted;
+
/* DXE channels */
struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */
struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 6aa3ff4240a9..83155b5ddbfb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -15,11 +15,16 @@
*/
#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
#include "wil6210.h"
#include "wmi.h"
#define WIL_MAX_ROC_DURATION_MS 5000
+bool disable_ap_sme;
+module_param(disable_ap_sme, bool, 0444);
+MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
+
#define CHAN60G(_channel, _flags) { \
.band = NL80211_BAND_60GHZ, \
.center_freq = 56160 + (2160 * (_channel)), \
@@ -62,9 +67,16 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
},
[NL80211_IFTYPE_AP] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4) |
+ BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4)
},
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
@@ -194,7 +206,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy,
int cid = wil_find_cid(wil, mac);
- wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid);
if (cid < 0)
return cid;
@@ -233,7 +245,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
return -ENOENT;
ether_addr_copy(mac, wil->sta[cid].addr);
- wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid);
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@@ -250,16 +262,15 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *p2p_wdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "add_iface\n");
if (type != NL80211_IFTYPE_P2P_DEVICE) {
- wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+ wil_err(wil, "unsupported iftype %d\n", type);
return ERR_PTR(-EINVAL);
}
if (wil->p2p_wdev) {
- wil_err(wil, "%s: P2P_DEVICE interface already created\n",
- __func__);
+ wil_err(wil, "P2P_DEVICE interface already created\n");
return ERR_PTR(-EINVAL);
}
@@ -282,11 +293,10 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "del_iface\n");
if (wdev != wil->p2p_wdev) {
- wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
- __func__, wdev);
+ wil_err(wil, "delete of incorrect interface 0x%p\n", wdev);
return -EINVAL;
}
@@ -304,7 +314,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct wireless_dev *wdev = wil_to_wdev(wil);
int rc;
- wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+ wil_dbg_misc(wil, "change_iface: type=%d\n", type);
if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
wil_dbg_misc(wil, "interface is up. resetting...\n");
@@ -351,8 +361,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
uint i, n;
int rc;
- wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
- __func__, wdev, wdev->iftype);
+ wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
/* check we are client side */
switch (wdev->iftype) {
@@ -557,7 +566,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int rc = 0;
enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "connect\n");
wil_print_connect_params(wil, sme);
if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -593,6 +602,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
goto out;
}
wil->privacy = sme->privacy;
+ wil->pbss = sme->pbss;
if (wil->privacy) {
/* For secure assoc, remove old keys */
@@ -689,12 +699,11 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+ wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code);
if (!(test_bit(wil_status_fwconnecting, wil->status) ||
test_bit(wil_status_fwconnected, wil->status))) {
- wil_err(wil, "%s: Disconnect was called while disconnected\n",
- __func__);
+ wil_err(wil, "Disconnect was called while disconnected\n");
return 0;
}
@@ -702,7 +711,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
if (rc)
- wil_err(wil, "%s: disconnect error %d\n", __func__, rc);
+ wil_err(wil, "disconnect error %d\n", rc);
return rc;
}
@@ -750,7 +759,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
* different from currently "listened" channel and fail if it is.
*/
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "mgmt_tx\n");
print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
@@ -811,7 +820,7 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
break;
}
}
- wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+ wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]);
return rc;
}
@@ -916,13 +925,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
return -EINVAL;
}
- wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+ wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n",
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
if (IS_ERR(cs)) {
- wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
- __func__, mac_addr, key_usage_str[key_usage], key_index,
+ wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
+ mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
@@ -931,8 +940,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
- "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
- params->seq_len, __func__, mac_addr,
+ "Wrong PN len %d, %pM %s[%d] PN %*phN\n",
+ params->seq_len, mac_addr,
key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
@@ -956,11 +965,11 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
mac_addr);
- wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+ wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr,
key_usage_str[key_usage], key_index);
if (IS_ERR(cs))
- wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+ wil_info(wil, "Not connected, %pM %s[%d]\n",
mac_addr, key_usage_str[key_usage], key_index);
if (!IS_ERR_OR_NULL(cs))
@@ -977,7 +986,7 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "set_default_key: entered\n");
return 0;
}
@@ -990,8 +999,9 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
- wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
- __func__, chan->center_freq, duration, wdev->iftype);
+ wil_dbg_misc(wil,
+ "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n",
+ chan->center_freq, duration, wdev->iftype);
rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
return rc;
@@ -1003,7 +1013,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "cancel_remain_on_channel\n");
return wil_p2p_cancel_listen(wil, cookie);
}
@@ -1159,9 +1169,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
if (pbss)
wmi_nettype = WMI_NETTYPE_P2P;
- wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+ wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go);
if (is_go && !pbss) {
- wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+ wil_err(wil, "P2P GO must be in PBSS\n");
return -ENOTSUPP;
}
@@ -1216,7 +1226,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
int rc;
u32 privacy = 0;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "change_beacon\n");
wil_print_bcon_data(bcon);
if (bcon->tail &&
@@ -1255,7 +1265,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 hidden_ssid;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "start_ap\n");
if (!channel) {
wil_err(wil, "AP: No channel???\n");
@@ -1306,7 +1316,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "stop_ap\n");
netif_carrier_off(ndev);
wil_set_recovery_state(wil, fw_recovery_idle);
@@ -1322,13 +1332,35 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
return 0;
}
+static int wil_cfg80211_add_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "add station %pM aid %d\n", mac, params->aid);
+
+ if (!disable_ap_sme) {
+ wil_err(wil, "not supported with AP SME enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (params->aid > WIL_MAX_DMG_AID) {
+ wil_err(wil, "invalid aid\n");
+ return -EINVAL;
+ }
+
+ return wmi_new_sta(wil, mac, params->aid);
+}
+
static int wil_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *dev,
struct station_del_parameters *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+ wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac,
params->reason_code);
mutex_lock(&wil->mutex);
@@ -1338,6 +1370,52 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
return 0;
}
+static int wil_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int authorize;
+ int cid, i;
+ struct vring_tx_data *txdata = NULL;
+
+ wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x\n", mac,
+ params->sta_flags_mask, params->sta_flags_set);
+
+ if (!disable_ap_sme) {
+ wil_dbg_misc(wil, "not supported with AP SME enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ return 0;
+
+ cid = wil_find_cid(wil, mac);
+ if (cid < 0) {
+ wil_err(wil, "station not found\n");
+ return -ENOLINK;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
+ if (wil->vring2cid_tid[i][0] == cid) {
+ txdata = &wil->vring_tx_data[i];
+ break;
+ }
+
+ if (!txdata) {
+ wil_err(wil, "vring data not found\n");
+ return -ENOLINK;
+ }
+
+ authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
+ txdata->dot1x_open = authorize ? 1 : 0;
+ wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
+ txdata->dot1x_open);
+
+ return 0;
+}
+
/* probe_client handling */
static void wil_probe_client_handle(struct wil6210_priv *wil,
struct wil_probe_client_req *req)
@@ -1387,7 +1465,7 @@ void wil_probe_client_flush(struct wil6210_priv *wil)
{
struct wil_probe_client_req *req, *t;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "probe_client_flush\n");
mutex_lock(&wil->probe_client_mutex);
@@ -1407,7 +1485,7 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
struct wil_probe_client_req *req;
int cid = wil_find_cid(wil, peer);
- wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
+ wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid);
if (cid < 0)
return -ENOLINK;
@@ -1435,7 +1513,7 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
if (params->ap_isolate >= 0) {
- wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+ wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n",
wil->ap_isolate, params->ap_isolate);
wil->ap_isolate = params->ap_isolate;
}
@@ -1448,7 +1526,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "start_p2p_device: entered\n");
wil->p2p.p2p_dev_started = 1;
return 0;
}
@@ -1462,7 +1540,7 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
if (!p2p->p2p_dev_started)
return;
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "stop_p2p_device: entered\n");
mutex_lock(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
wil_p2p_stop_radio_operations(wil);
@@ -1499,7 +1577,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return rc;
}
-static struct cfg80211_ops wil_cfg80211_ops = {
+static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
.scan = wil_cfg80211_scan,
@@ -1521,7 +1599,9 @@ static struct cfg80211_ops wil_cfg80211_ops = {
.change_beacon = wil_cfg80211_change_beacon,
.start_ap = wil_cfg80211_start_ap,
.stop_ap = wil_cfg80211_stop_ap,
+ .add_station = wil_cfg80211_add_station,
.del_station = wil_cfg80211_del_station,
+ .change_station = wil_cfg80211_change_station,
.probe_client = wil_cfg80211_probe_client,
.change_bss = wil_cfg80211_change_bss,
/* P2P device */
@@ -1542,10 +1622,11 @@ static void wil_wiphy_init(struct wiphy *wiphy)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_MONITOR);
- wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (!disable_ap_sme)
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
__func__, wiphy->flags);
wiphy->probe_resp_offload =
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 5e4058a4037b..3e8cdf12feda 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -364,13 +364,13 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
}
static const struct dbg_off isr_off[] = {
- {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32},
- {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32},
- {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32},
- {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32},
- {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32},
- {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32},
- {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32},
+ {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32},
+ {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32},
+ {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32},
+ {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32},
+ {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32},
+ {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32},
+ {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32},
{},
};
@@ -390,9 +390,9 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
}
static const struct dbg_off pseudo_isr_off[] = {
- {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
- {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
- {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
+ {"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
+ {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
+ {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
{},
};
@@ -411,40 +411,40 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
}
static const struct dbg_off lgc_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
{},
};
static const struct dbg_off tx_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
doff_io32},
- {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
doff_io32},
- {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
doff_io32},
- {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
doff_io32},
{},
};
static const struct dbg_off rx_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
doff_io32},
- {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
doff_io32},
- {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
doff_io32},
- {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
doff_io32},
{},
};
@@ -813,7 +813,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
kfree(frame);
- wil_info(wil, "%s() -> %d\n", __func__, rc);
+ wil_info(wil, "-> %d\n", rc);
return len;
}
@@ -855,7 +855,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
kfree(wmi);
- wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1);
+ wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
return rc;
}
@@ -1379,6 +1379,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
+ u8 aid = 0;
switch (p->status) {
case wil_sta_unused:
@@ -1389,9 +1390,10 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
break;
case wil_sta_connected:
status = "connected";
+ aid = p->aid;
break;
}
- seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
+ seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@@ -1622,7 +1624,7 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
blob->size = map->to - map->from;
snprintf(name, sizeof(name), "blob_%s", map->name);
- wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob);
+ wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob);
}
}
@@ -1632,29 +1634,29 @@ static const struct {
umode_t mode;
const struct file_operations *fops;
} dbg_files[] = {
- {"mbox", S_IRUGO, &fops_mbox},
- {"vrings", S_IRUGO, &fops_vring},
- {"stations", S_IRUGO, &fops_sta},
- {"desc", S_IRUGO, &fops_txdesc},
- {"bf", S_IRUGO, &fops_bf},
- {"ssid", S_IRUGO | S_IWUSR, &fops_ssid},
- {"mem_val", S_IRUGO, &fops_memread},
- {"reset", S_IWUSR, &fops_reset},
- {"rxon", S_IWUSR, &fops_rxon},
- {"tx_mgmt", S_IWUSR, &fops_txmgmt},
- {"wmi_send", S_IWUSR, &fops_wmi},
- {"back", S_IRUGO | S_IWUSR, &fops_back},
- {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg},
- {"pmcdata", S_IRUGO, &fops_pmcdata},
- {"temp", S_IRUGO, &fops_temp},
- {"freq", S_IRUGO, &fops_freq},
- {"link", S_IRUGO, &fops_link},
- {"info", S_IRUGO, &fops_info},
- {"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
- {"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
- {"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
- {"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
- {"fw_version", S_IRUGO, &fops_fw_version},
+ {"mbox", 0444, &fops_mbox},
+ {"vrings", 0444, &fops_vring},
+ {"stations", 0444, &fops_sta},
+ {"desc", 0444, &fops_txdesc},
+ {"bf", 0444, &fops_bf},
+ {"ssid", 0644, &fops_ssid},
+ {"mem_val", 0644, &fops_memread},
+ {"reset", 0244, &fops_reset},
+ {"rxon", 0244, &fops_rxon},
+ {"tx_mgmt", 0244, &fops_txmgmt},
+ {"wmi_send", 0244, &fops_wmi},
+ {"back", 0644, &fops_back},
+ {"pmccfg", 0644, &fops_pmccfg},
+ {"pmcdata", 0444, &fops_pmcdata},
+ {"temp", 0444, &fops_temp},
+ {"freq", 0444, &fops_freq},
+ {"link", 0444, &fops_link},
+ {"info", 0444, &fops_info},
+ {"recovery", 0644, &fops_recovery},
+ {"led_cfg", 0644, &fops_led_cfg},
+ {"led_blink_time", 0644, &fops_led_blink_time},
+ {"fw_capabilities", 0444, &fops_fw_capabilities},
+ {"fw_version", 0444, &fops_fw_version},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1693,30 +1695,32 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
/* fields in struct wil6210_priv */
static const struct dbg_off dbg_wil_off[] = {
- WIL_FIELD(privacy, S_IRUGO, doff_u32),
- WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
- WIL_FIELD(hw_version, S_IRUGO, doff_x32),
- WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
- WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
- WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8),
+ WIL_FIELD(privacy, 0444, doff_u32),
+ WIL_FIELD(status[0], 0644, doff_ulong),
+ WIL_FIELD(hw_version, 0444, doff_x32),
+ WIL_FIELD(recovery_count, 0444, doff_u32),
+ WIL_FIELD(ap_isolate, 0444, doff_u32),
+ WIL_FIELD(discovery_mode, 0644, doff_u8),
+ WIL_FIELD(chip_revision, 0444, doff_u8),
+ WIL_FIELD(abft_len, 0644, doff_u8),
{},
};
static const struct dbg_off dbg_wil_regs[] = {
- {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
+ {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
doff_io32},
- {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+ {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
{},
};
/* static parameters */
static const struct dbg_off dbg_statics[] = {
- {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
- {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
- {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
- {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+ {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
+ {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
+ {"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
+ {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh,
doff_u32},
- {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8},
+ {"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 7053b62ca8d3..adcfef4dabf7 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,7 +27,7 @@ static int wil_ethtoolops_begin(struct net_device *ndev)
mutex_lock(&wil->mutex);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_begin\n");
return 0;
}
@@ -36,7 +36,7 @@ static void wil_ethtoolops_complete(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_complete\n");
mutex_unlock(&wil->mutex);
}
@@ -48,7 +48,7 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
u32 tx_itr_en, tx_itr_val = 0;
u32 rx_itr_en, rx_itr_val = 0;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
@@ -68,7 +68,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__,
+ wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 82aae2d705b4..540fc20984d8 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,8 +19,9 @@
#include "wil6210.h"
#include "fw.h"
-MODULE_FIRMWARE(WIL_FW_NAME);
-MODULE_FIRMWARE(WIL_FW2_NAME);
+MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT);
+MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS);
+MODULE_FIRMWARE(WIL_BOARD_FILE_NAME);
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 8f40eb301924..f4901587c005 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -537,3 +537,22 @@ out:
release_firmware(fw);
return rc;
}
+
+/**
+ * wil_fw_verify_file_exists - checks if firmware file exist
+ *
+ * @wil: driver context
+ * @name: firmware file name
+ *
+ * return value - boolean, true for success, false for failure
+ */
+bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name)
+{
+ const struct firmware *fw;
+ int rc;
+
+ rc = request_firmware(&fw, name, wil_to_dev(wil));
+ if (!rc)
+ release_firmware(fw);
+ return rc != -ENOENT;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 64046e0bd0a2..cab1e5c0e374 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -94,7 +94,7 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
- wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__,
+ wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
mask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
@@ -103,7 +103,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
void wil6210_mask_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -111,7 +111,7 @@ void wil6210_mask_halp(struct wil6210_priv *wil)
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_irq_pseudo\n");
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
@@ -134,7 +134,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
- wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__,
+ wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
unmask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
@@ -143,7 +143,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
static void wil6210_unmask_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -151,7 +151,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil)
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_irq_pseudo\n");
set_bit(wil_status_irqen, wil->status);
@@ -160,7 +160,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
void wil_mask_irq(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_rx(wil);
@@ -170,7 +170,7 @@ void wil_mask_irq(struct wil6210_priv *wil)
void wil_unmask_irq(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_irq\n");
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
@@ -187,7 +187,7 @@ void wil_unmask_irq(struct wil6210_priv *wil)
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "configure_interrupt_moderation\n");
/* disable interrupt moderation for monitor
* to get better timestamp precision
@@ -400,7 +400,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
- wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__);
+ wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
wil6210_mask_halp(wil);
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
complete(&wil->halp.comp);
@@ -599,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
void wil6210_set_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "set_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -607,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
void wil6210_clear_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "clear_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -618,7 +618,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
- wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
+ wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
rc = request_threaded_irq(irq, wil6210_hardirq,
wil6210_thread_irq,
@@ -629,7 +629,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "fini_irq:\n");
wil_mask_irq(wil);
free_irq(irq, wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index e2e021bcaa03..efb1f59aafd9 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,23 +27,23 @@
#define WAIT_FOR_SCAN_ABORT_MS 1000
bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
+module_param(debug_fw, bool, 0444);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
static bool oob_mode;
-module_param(oob_mode, bool, S_IRUGO);
+module_param(oob_mode, bool, 0444);
MODULE_PARM_DESC(oob_mode,
" enable out of the box (OOB) mode in FW, for diagnostics and certification");
bool no_fw_recovery;
-module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+module_param(no_fw_recovery, bool, 0644);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
/* if not set via modparam, will be set to default value of 1/8 of
* rx ring size during init flow
*/
unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
-module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO);
+module_param(rx_ring_overflow_thrsh, ushort, 0444);
MODULE_PARM_DESC(rx_ring_overflow_thrsh,
" RX ring overflow threshold in descriptors.");
@@ -73,7 +73,7 @@ static const struct kernel_param_ops mtu_max_ops = {
.get = param_get_uint,
};
-module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO);
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
MODULE_PARM_DESC(mtu_max, " Max MTU value.");
static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
@@ -102,11 +102,11 @@ static const struct kernel_param_ops ring_order_ops = {
.get = param_get_uint,
};
-module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO);
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444);
MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
-module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444);
MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
-module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO);
+module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
@@ -172,12 +172,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
struct wil_sta_info *sta = &wil->sta[cid];
might_sleep();
- wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
- sta->status);
+ wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n",
+ cid, sta->status);
/* inform upper/lower layers */
if (sta->status != wil_sta_unused) {
- if (!from_event)
- wmi_disconnect_sta(wil, sta->addr, reason_code, true);
+ if (!from_event) {
+ bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ?
+ disable_ap_sme : false;
+ wmi_disconnect_sta(wil, sta->addr, reason_code,
+ true, del_sta);
+ }
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
@@ -237,7 +241,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
return;
might_sleep();
- wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
+ wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid,
reason_code, from_event ? "+" : "-");
/* Cases are:
@@ -347,7 +351,7 @@ static int wil_wait_for_recovery(struct wil6210_priv *wil)
void wil_set_recovery_state(struct wil6210_priv *wil, int state)
{
- wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__,
+ wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n",
wil->recovery_state, state);
wil->recovery_state = state;
@@ -489,7 +493,7 @@ int wil_priv_init(struct wil6210_priv *wil)
{
uint i;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "priv_init\n");
memset(wil->sta, 0, sizeof(wil->sta));
for (i = 0; i < WIL6210_MAX_CID; i++)
@@ -564,7 +568,7 @@ out_wmi_wq:
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "disconnect\n");
del_timer_sync(&wil->connect_timer);
_wil6210_disconnect(wil, bssid, reason_code, from_event);
@@ -572,7 +576,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
void wil_priv_deinit(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "priv_deinit\n");
wil_set_recovery_state(wil, fw_recovery_idle);
del_timer_sync(&wil->scan_timer);
@@ -605,7 +609,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
{
- wil_info(wil, "%s: enable=%d\n", __func__, enable);
+ wil_info(wil, "enable=%d\n", enable);
if (enable)
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
else
@@ -861,7 +865,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "reset\n");
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
@@ -884,9 +888,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_PRE_RESET);
if (rc)
- wil_err(wil,
- "%s: PRE_RESET platform notify failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "PRE_RESET platform notify failed, rc %d\n",
+ rc);
}
set_bit(wil_status_resetting, wil->status);
@@ -915,7 +918,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
flush_workqueue(wil->wmi_wq);
wil_bl_crash_info(wil, false);
+ wil_disable_irq(wil);
rc = wil_target_reset(wil);
+ wil6210_clear_irq(wil);
+ wil_enable_irq(wil);
wil_rx_fini(wil);
if (rc) {
wil_bl_crash_info(wil, true);
@@ -930,16 +936,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
- wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
- WIL_FW2_NAME);
+ wil_info(wil, "Use firmware <%s> + board <%s>\n",
+ wil->wil_fw_name, WIL_BOARD_FILE_NAME);
wil_halt_cpu(wil);
memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
- rc = wil_request_firmware(wil, WIL_FW_NAME, true);
+ rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
return rc;
- rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
+ rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
if (rc)
return rc;
@@ -976,8 +982,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* check FW is responsive */
rc = wmi_echo(wil);
if (rc) {
- wil_err(wil, "%s: wmi_echo failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "wmi_echo failed, rc %d\n", rc);
return rc;
}
@@ -987,9 +992,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
if (rc) {
- wil_err(wil,
- "%s: FW_RDY notify failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "FW_RDY notify failed, rc %d\n",
+ rc);
rc = 0;
}
}
@@ -1073,7 +1077,7 @@ int wil_up(struct wil6210_priv *wil)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "up\n");
mutex_lock(&wil->mutex);
rc = __wil_up(wil);
@@ -1113,7 +1117,7 @@ int wil_down(struct wil6210_priv *wil)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "down\n");
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
@@ -1146,25 +1150,24 @@ void wil_halp_vote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
- wil_err(wil, "%s: HALP vote timed out\n", __func__);
+ wil_err(wil, "HALP vote timed out\n");
/* Mask HALP as done in case the interrupt is raised */
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,
- "%s: HALP vote completed after %d ms\n",
- __func__,
+ "halp_vote: HALP vote completed after %d ms\n",
jiffies_to_msecs(to_jiffies - rc));
}
}
- wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
@@ -1176,15 +1179,15 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
- wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
+ wil_dbg_irq(wil, "HALP unvote\n");
}
- wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 6676001dcbca..708facd5f667 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,10 +22,11 @@ static int wil_open(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "open\n");
- if (debug_fw) {
- wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+ if (debug_fw ||
+ test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
+ wil_err(wil, "while in debug_fw or wmi_only mode\n");
return -EINVAL;
}
@@ -36,7 +37,7 @@ static int wil_stop(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "stop\n");
return wil_down(wil);
}
@@ -68,7 +69,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
done = budget - quota;
if (done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, done);
wil6210_unmask_irq_rx(wil);
wil_dbg_txrx(wil, "NAPI RX complete\n");
}
@@ -132,7 +133,7 @@ void *wil_if_alloc(struct device *dev)
wil->wdev = wdev;
wil->radio_wdev = wdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_alloc\n");
rc = wil_priv_init(wil);
if (rc) {
@@ -179,7 +180,7 @@ void wil_if_free(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_free\n");
if (!ndev)
return;
@@ -234,7 +235,7 @@ void wil_if_remove(struct wil6210_priv *wil)
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil_to_wdev(wil);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_remove\n");
unregister_netdev(ndev);
wiphy_unregister(wdev->wiphy);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index fbae99525e01..792484756654 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -69,7 +69,7 @@ void wil_p2p_discovery_timer_fn(ulong x)
{
struct wil6210_priv *wil = (void *)x;
- wil_dbg_misc(wil, "%s\n", __func__);
+ wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
schedule_work(&wil->p2p.discovery_expired_work);
}
@@ -80,27 +80,25 @@ int wil_p2p_search(struct wil6210_priv *wil,
int rc;
struct wil_p2p_info *p2p = &wil->p2p;
- wil_dbg_misc(wil, "%s: channel %d\n",
- __func__, P2P_DMG_SOCIAL_CHANNEL);
+ wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL);
lockdep_assert_held(&wil->mutex);
if (p2p->discovery_started) {
- wil_err(wil, "%s: search failed. discovery already ongoing\n",
- __func__);
+ wil_err(wil, "search failed. discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
if (rc) {
- wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+ wil_err(wil, "wmi_p2p_cfg failed\n");
goto out;
}
rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
if (rc) {
- wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+ wil_err(wil, "wmi_set_ssid failed\n");
goto out_stop;
}
@@ -108,8 +106,7 @@ int wil_p2p_search(struct wil6210_priv *wil,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
request->ie_len, request->ie);
if (rc) {
- wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
- __func__);
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n");
goto out_stop;
}
@@ -119,14 +116,13 @@ int wil_p2p_search(struct wil6210_priv *wil,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
request->ie_len, request->ie);
if (rc) {
- wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
- __func__);
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n");
goto out_stop;
}
rc = wmi_start_search(wil);
if (rc) {
- wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+ wil_err(wil, "wmi_start_search failed\n");
goto out_stop;
}
@@ -153,12 +149,12 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
if (!chan)
return -EINVAL;
- wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+ wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration);
mutex_lock(&wil->mutex);
if (p2p->discovery_started) {
- wil_err(wil, "%s: discovery already ongoing\n", __func__);
+ wil_err(wil, "discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
@@ -220,8 +216,8 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
mutex_lock(&wil->mutex);
if (cookie != p2p->cookie) {
- wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
- __func__, p2p->cookie, cookie);
+ wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+ p2p->cookie, cookie);
mutex_unlock(&wil->mutex);
return -ENOENT;
}
@@ -231,7 +227,7 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
mutex_unlock(&wil->mutex);
if (!started) {
- wil_err(wil, "%s: listen not started\n", __func__);
+ wil_err(wil, "listen not started\n");
return -ENOENT;
}
@@ -253,7 +249,7 @@ void wil_p2p_listen_expired(struct work_struct *work)
struct wil6210_priv, p2p);
u8 started;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "p2p_listen_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
@@ -279,7 +275,7 @@ void wil_p2p_search_expired(struct work_struct *work)
struct wil6210_priv, p2p);
u8 started;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "p2p_search_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 44746ca0d2e6..874c787727fe 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -23,7 +23,7 @@
#include <linux/rtnetlink.h>
static bool use_msi = true;
-module_param(use_msi, bool, S_IRUGO);
+module_param(use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
#ifdef CONFIG_PM
@@ -36,18 +36,38 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
- u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
+ u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
+ u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
+ RGF_USER_REVISION_ID_MASK);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
-
- switch (rev_id) {
- case JTAG_DEV_ID_SPARROW_B0:
- wil->hw_name = "Sparrow B0";
- wil->hw_version = HW_VER_SPARROW_B0;
+ wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
+ wil->chip_revision = chip_revision;
+
+ switch (jtag_id) {
+ case JTAG_DEV_ID_SPARROW:
+ switch (chip_revision) {
+ case REVISION_ID_SPARROW_D0:
+ wil->hw_name = "Sparrow D0";
+ wil->hw_version = HW_VER_SPARROW_D0;
+ if (wil_fw_verify_file_exists(wil,
+ WIL_FW_NAME_SPARROW_PLUS))
+ wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
+ break;
+ case REVISION_ID_SPARROW_B0:
+ wil->hw_name = "Sparrow B0";
+ wil->hw_version = HW_VER_SPARROW_B0;
+ break;
+ default:
+ wil->hw_name = "Unknown";
+ wil->hw_version = HW_VER_UNKNOWN;
+ break;
+ }
break;
default:
- wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id);
+ wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
+ jtag_id, chip_revision);
wil->hw_name = "Unknown";
wil->hw_version = HW_VER_UNKNOWN;
}
@@ -55,7 +75,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
/* extract FW capabilities from file without loading the FW */
- wil_request_firmware(wil, WIL_FW_NAME, false);
+ wil_request_firmware(wil, wil->wil_fw_name, false);
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -79,8 +99,10 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
*/
int msi_only = pdev->msi_enabled;
bool _use_msi = use_msi;
+ bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+ wil->fw_capabilities);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
pdev->msi_enabled = 0;
@@ -103,9 +125,11 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
if (rc)
goto stop_master;
- /* need reset here to obtain MAC */
+ /* need reset here to obtain MAC or in case of WMI-only FW, full reset
+ * and fw loading takes place
+ */
mutex_lock(&wil->mutex);
- rc = wil_reset(wil, false);
+ rc = wil_reset(wil, wmi_only);
mutex_unlock(&wil->mutex);
if (rc)
goto release_irq;
@@ -125,7 +149,7 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil)
{
struct pci_dev *pdev = wil->pdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_pcie_disable\n");
pci_clear_master(pdev);
/* disable and release IRQ */
@@ -289,7 +313,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
struct wil6210_priv *wil = pci_get_drvdata(pdev);
void __iomem *csr = wil->csr;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "pcie_remove\n");
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
@@ -327,8 +351,7 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
rc = wil_can_suspend(wil, is_runtime);
if (rc)
@@ -354,8 +377,7 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
/* allow master */
pci_set_master(pdev);
@@ -375,7 +397,7 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
int rc = 0;
enum wil_platform_event evt;
- wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode);
+ wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode);
switch (mode) {
case PM_HIBERNATION_PREPARE:
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 11ee24d509e5..a0acb2d0cb79 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -21,8 +21,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct wireless_dev *wdev = wil->wdev;
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
if (!netif_running(wil_to_ndev(wil))) {
/* can always sleep when down */
@@ -59,7 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
}
out:
- wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+ wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
return rc;
@@ -70,8 +69,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
@@ -86,7 +84,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
rc = wil->platform_ops.suspend(wil->platform_handle);
out:
- wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
@@ -96,8 +94,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
rc = wil->platform_ops.resume(wil->platform_handle);
@@ -115,7 +112,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
rc = wil_up(wil);
out:
- wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ wil_dbg_pm(wil, "resume: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index b9faae0278c9..3ff4f4ce9fef 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -60,7 +60,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
if (wil_is_pmc_allocated(pmc)) {
/* sanity check */
- wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+ wil_err(wil, "ERROR pmc is already allocated\n");
goto no_release_err;
}
if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
@@ -90,21 +90,20 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
- wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
- __func__, num_descriptors, descriptor_size);
+ wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
+ num_descriptors, descriptor_size);
/* allocate descriptors info list in pmc context*/
pmc->descriptors = kcalloc(num_descriptors,
sizeof(struct desc_alloc_info),
GFP_KERNEL);
if (!pmc->descriptors) {
- wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+ wil_err(wil, "ERROR allocating pmc skb list\n");
goto no_release_err;
}
- wil_dbg_misc(wil,
- "%s: allocated descriptors info list %p\n",
- __func__, pmc->descriptors);
+ wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
+ pmc->descriptors);
/* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2
@@ -116,15 +115,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
GFP_KERNEL);
wil_dbg_misc(wil,
- "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
- __func__,
+ "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
pmc->pring_va, &pmc->pring_pa,
sizeof(struct vring_tx_desc),
num_descriptors,
sizeof(struct vring_tx_desc) * num_descriptors);
if (!pmc->pring_va) {
- wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+ wil_err(wil, "ERROR allocating pmc pring\n");
goto release_pmc_skb_list;
}
@@ -143,9 +141,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
GFP_KERNEL);
if (unlikely(!pmc->descriptors[i].va)) {
- wil_err(wil,
- "%s: ERROR allocating pmc descriptor %d",
- __func__, i);
+ wil_err(wil, "ERROR allocating pmc descriptor %d", i);
goto release_pmc_skbs;
}
@@ -165,21 +161,21 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
*_d = *d;
}
- wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+ wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
pmc_cmd.op = WMI_PMC_ALLOCATE;
pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
- wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+ wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
pmc->last_cmd_status = wmi_send(wil,
WMI_PMC_CMDID,
&pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
- "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
- __func__, pmc->last_cmd_status);
+ "WMI_PMC_CMD with ALLOCATE op failed with status %d",
+ pmc->last_cmd_status);
goto release_pmc_skbs;
}
@@ -188,7 +184,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
return;
release_pmc_skbs:
- wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+ wil_err(wil, "exit on error: Releasing skbs...\n");
for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
dma_free_coherent(dev,
descriptor_size,
@@ -197,7 +193,7 @@ release_pmc_skbs:
pmc->descriptors[i].va = NULL;
}
- wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+ wil_err(wil, "exit on error: Releasing pring...\n");
dma_free_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
@@ -207,8 +203,7 @@ release_pmc_skbs:
pmc->pring_va = NULL;
release_pmc_skb_list:
- wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
- __func__);
+ wil_err(wil, "exit on error: Releasing descriptors info list...\n");
kfree(pmc->descriptors);
pmc->descriptors = NULL;
@@ -232,24 +227,23 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
pmc->last_cmd_status = 0;
if (!wil_is_pmc_allocated(pmc)) {
- wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
- __func__);
+ wil_dbg_misc(wil,
+ "pmc_free: Error, can't free - not allocated\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return;
}
if (send_pmc_cmd) {
- wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
- __func__);
+ wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
pmc_cmd.op = WMI_PMC_RELEASE;
pmc->last_cmd_status =
wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
- "%s WMI_PMC_CMD with RELEASE op failed, status %d",
- __func__, pmc->last_cmd_status);
+ "WMI_PMC_CMD with RELEASE op failed, status %d",
+ pmc->last_cmd_status);
/* There's nothing we can do with this error.
* Normally, it should never occur.
* Continue to freeing all memory allocated for pmc.
@@ -261,8 +255,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
size_t buf_size = sizeof(struct vring_tx_desc) *
pmc->num_descriptors;
- wil_dbg_misc(wil, "%s: free pring va %p\n",
- __func__, pmc->pring_va);
+ wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
+ pmc->pring_va);
dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
pmc->pring_va = NULL;
@@ -281,11 +275,11 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
- wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
- __func__, i, pmc->num_descriptors);
+ wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
+ pmc->num_descriptors);
wil_dbg_misc(wil,
- "%s: free pmc descriptors info list %p\n",
- __func__, pmc->descriptors);
+ "pmc_free: free pmc descriptors info list %p\n",
+ pmc->descriptors);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
} else {
@@ -301,7 +295,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s: status %d\n", __func__,
+ wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
wil->pmc.last_cmd_status);
return wil->pmc.last_cmd_status;
@@ -324,7 +318,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
- wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+ wil_err(wil, "error, pmc is not allocated!\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
@@ -333,8 +327,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
pmc_size = pmc->descriptor_size * pmc->num_descriptors;
wil_dbg_misc(wil,
- "%s: size %u, pos %lld\n",
- __func__, (unsigned)count, *f_pos);
+ "pmc_read: size %u, pos %lld\n",
+ (u32)count, *f_pos);
pmc->last_cmd_status = 0;
@@ -343,15 +337,16 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
offset = *f_pos - (idx * pmc->descriptor_size);
if (*f_pos >= pmc_size) {
- wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
- __func__, *f_pos, (unsigned)pmc_size);
+ wil_dbg_misc(wil,
+ "pmc_read: reached end of pmc buf: %lld >= %u\n",
+ *f_pos, (u32)pmc_size);
pmc->last_cmd_status = -ERANGE;
goto out;
}
wil_dbg_misc(wil,
- "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
- __func__, *f_pos, idx, offset, count);
+ "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+ *f_pos, idx, offset, count);
/* if no errors, return the copied byte count */
retval = simple_read_from_buffer(buf,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 19ed127d4d05..7404b6f39c6a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -349,8 +349,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
- wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
- __func__, rc, status);
+ wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
+ status);
goto out;
}
@@ -387,7 +387,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
txdata->addba_in_progress = true;
rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
if (rc) {
- wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
+ wil_err(wil, "wmi_addba failed, rc (%d)", rc);
txdata->addba_in_progress = false;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index c1b4bb03e997..072182e527e6 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -29,12 +29,12 @@
#include "trace.h"
static bool rtap_include_phy_info;
-module_param(rtap_include_phy_info, bool, S_IRUGO);
+module_param(rtap_include_phy_info, bool, 0444);
MODULE_PARM_DESC(rtap_include_phy_info,
" Include PHY info in the radiotap header, default - no");
bool rx_align_2;
-module_param(rx_align_2, bool, S_IRUGO);
+module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
static inline uint wil_rx_snaplen(void)
@@ -112,7 +112,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
size_t sz = vring->size * sizeof(vring->va[0]);
uint i;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "vring_alloc:\n");
BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
@@ -745,7 +745,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return;
}
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "rx_handle\n");
while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
(*quota)--;
@@ -768,7 +768,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
struct vring *vring = &wil->vring_rx;
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "rx_init\n");
if (vring->va) {
wil_err(wil, "Rx ring already allocated\n");
@@ -799,7 +799,7 @@ void wil_rx_fini(struct wil6210_priv *wil)
{
struct vring *vring = &wil->vring_rx;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
wil_vring_free(wil, vring, 0);
@@ -851,7 +851,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
struct vring *vring = &wil->vring_tx[id];
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
- wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@@ -931,7 +931,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
struct vring *vring = &wil->vring_tx[id];
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
- wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@@ -993,7 +993,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
if (!vring->va)
return;
- wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
+ wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
@@ -1032,12 +1032,14 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct vring *v = &wil->vring_tx[i];
struct vring_tx_data *txdata = &wil->vring_tx_data[i];
- wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
- __func__, eth->h_dest, i);
+ wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
+ eth->h_dest, i);
if (v->va && txdata->enabled) {
return v;
} else {
- wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+ wil_dbg_txrx(wil,
+ "find_tx_ucast: vring[%d] not valid\n",
+ i);
return NULL;
}
}
@@ -1193,17 +1195,6 @@ found:
return v;
}
-static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
- struct sk_buff *skb)
-{
- struct wireless_dev *wdev = wil->wdev;
-
- if (wdev->iftype != NL80211_IFTYPE_AP)
- return wil_find_tx_bcast_2(wil, skb);
-
- return wil_find_tx_bcast_1(wil, skb);
-}
-
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
int vring_index)
{
@@ -1373,8 +1364,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
int gso_type;
int rc = -EINVAL;
- wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
- __func__, skb->len, vring_index);
+ wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
+ vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1643,8 +1634,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
bool mcast = (vring_index == wil->bcast_vring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
- __func__, skb->len, vring_index);
+ wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
+ vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1884,7 +1875,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static bool pr_once_fw;
int rc;
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "start_xmit\n");
if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
if (!pr_once_fw) {
wil_err(wil, "FW not ready\n");
@@ -1903,12 +1894,26 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pr_once_fw = false;
/* find vring */
- if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
- /* in STA mode (ESS), all to same VRING */
+ if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) {
+ /* in STA mode (ESS), all to same VRING (to AP) */
vring = wil_find_tx_vring_sta(wil, skb);
- } else { /* direct communication, find matching VRING */
- vring = bcast ? wil_find_tx_bcast(wil, skb) :
- wil_find_tx_ucast(wil, skb);
+ } else if (bcast) {
+ if (wil->pbss)
+ /* in pbss, no bcast VRING - duplicate skb in
+ * all stations VRINGs
+ */
+ vring = wil_find_tx_bcast_2(wil, skb);
+ else if (wil->wdev->iftype == NL80211_IFTYPE_AP)
+ /* AP has a dedicated bcast VRING */
+ vring = wil_find_tx_bcast_1(wil, skb);
+ else
+ /* unexpected combination, fallback to duplicating
+ * the skb in all stations VRINGs
+ */
+ vring = wil_find_tx_bcast_2(wil, skb);
+ } else {
+ /* unicast, find specific VRING by dest. address */
+ vring = wil_find_tx_ucast(wil, skb);
}
if (unlikely(!vring)) {
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
@@ -1982,7 +1987,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
return 0;
}
- wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+ wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
used_before_complete = wil_vring_used_tx(vring);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 237e1666df2d..085a2dbfa21d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -33,10 +33,12 @@ extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
extern bool debug_fw;
+extern bool disable_ap_sme;
#define WIL_NAME "wil6210"
-#define WIL_FW_NAME "wil6210.fw" /* code */
-#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
+#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
+#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
+#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
@@ -98,6 +100,9 @@ static inline u32 wil_mtu2macbuf(u32 mtu)
#define WIL6210_RX_HIGH_TRSH_INIT (0)
#define WIL6210_RX_HIGH_TRSH_DEFAULT \
(1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3))
+#define WIL_MAX_DMG_AID 254 /* for DMG only 1-254 allowed (see
+ * 802.11REVmc/D5.0, section 9.4.1.8)
+ */
/* Hardware definitions begin */
/*
@@ -249,7 +254,12 @@ struct RGF_ICR {
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
- #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f)
+ #define JTAG_DEV_ID_SPARROW (0x2632072f)
+
+#define RGF_USER_REVISION_ID (0x88afe4)
+#define RGF_USER_REVISION_ID_MASK (3)
+ #define REVISION_ID_SPARROW_B0 (0x0)
+ #define REVISION_ID_SPARROW_D0 (0x3)
/* crash codes for FW/Ucode stored here */
#define RGF_FW_ASSERT_CODE (0x91f020)
@@ -257,7 +267,8 @@ struct RGF_ICR {
enum {
HW_VER_UNKNOWN,
- HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
+ HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
+ HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
};
/* popular locations */
@@ -512,6 +523,7 @@ struct wil_sta_info {
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
struct wil_tid_crypto_rx group_crypto_rx;
+ u8 aid; /* 1-254; 0 if unknown/not reported */
};
enum {
@@ -583,7 +595,9 @@ struct wil6210_priv {
DECLARE_BITMAP(status, wil_status_last);
u8 fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
+ u8 chip_revision;
const char *hw_name;
+ const char *wil_fw_name;
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
@@ -653,6 +667,7 @@ struct wil6210_priv {
struct dentry *debug;
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
+ u8 abft_len;
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -816,8 +831,8 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
-int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
- bool full_disconnect);
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac,
+ u16 reason, bool full_disconnect, bool del_sta);
int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout);
int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
@@ -827,6 +842,7 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile);
int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short);
int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short);
+int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid);
int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
@@ -918,6 +934,7 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load);
+bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index d051eea47a54..e53cf0cf7031 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -62,13 +62,13 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
u32 host_min, dump_size, offset, len;
if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
- wil_err(wil, "%s: fail to obtain crash dump size\n", __func__);
+ wil_err(wil, "fail to obtain crash dump size\n");
return -EINVAL;
}
if (dump_size > size) {
- wil_err(wil, "%s: not enough space for dump. Need %d have %d\n",
- __func__, dump_size, size);
+ wil_err(wil, "not enough space for dump. Need %d have %d\n",
+ dump_size, size);
return -EINVAL;
}
@@ -83,8 +83,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
len = map->to - map->from;
offset = map->host - host_min;
- wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n",
- __func__, fw_mapping[i].name, len, offset);
+ wil_dbg_misc(wil,
+ "fw_copy_crash_dump: - dump %s, size %d, offset %d\n",
+ fw_mapping[i].name, len, offset);
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
@@ -99,7 +100,7 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
u32 fw_dump_size;
if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
- wil_err(wil, "%s: fail to get fw dump size\n", __func__);
+ wil_err(wil, "fail to get fw dump size\n");
return;
}
@@ -115,6 +116,5 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
* after 5 min
*/
dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
- wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__,
- fw_dump_size);
+ wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size);
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 7585003bef67..1f22c19696b1 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -24,16 +24,16 @@
#include "trace.h"
static uint max_assoc_sta = WIL6210_MAX_CID;
-module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
+module_param(max_assoc_sta, uint, 0644);
MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
int agg_wsize; /* = 0; */
-module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
+module_param(agg_wsize, int, 0644);
MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
" 0 - use default; < 0 - don't auto-establish");
u8 led_id = WIL_LED_INVALID_ID;
-module_param(led_id, byte, S_IRUGO);
+module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
@@ -495,8 +495,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
ch = evt->channel + 1;
- wil_info(wil, "Connect %pM channel [%d] cid %d\n",
- evt->bssid, ch, evt->cid);
+ wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n",
+ evt->bssid, ch, evt->cid, evt->aid);
wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
evt->assoc_info, len - sizeof(*evt), true);
@@ -539,8 +539,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
if (wil->sta[evt->cid].status != wil_sta_unused) {
- wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
- __func__, wil->sta[evt->cid].status, evt->cid);
+ wil_err(wil, "AP: Invalid status %d for CID %d\n",
+ wil->sta[evt->cid].status, evt->cid);
mutex_unlock(&wil->mutex);
return;
}
@@ -553,22 +553,19 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
rc = wil_tx_init(wil, evt->cid);
if (rc) {
- wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n",
- __func__, evt->cid, rc);
+ wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
+ evt->cid, rc);
wmi_disconnect_sta(wil, wil->sta[evt->cid].addr,
- WLAN_REASON_UNSPECIFIED, false);
+ WLAN_REASON_UNSPECIFIED, false, false);
} else {
- wil_info(wil, "%s: successful connection to CID %d\n",
- __func__, evt->cid);
+ wil_info(wil, "successful connection to CID %d\n", evt->cid);
}
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (rc) {
netif_carrier_off(ndev);
- wil_err(wil,
- "%s: cfg80211_connect_result with failure\n",
- __func__);
+ wil_err(wil, "cfg80211_connect_result with failure\n");
cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -583,8 +580,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
- if (rc)
+ if (rc) {
+ if (disable_ap_sme)
+ /* notify new_sta has failed */
+ cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL);
goto out;
+ }
memset(&sinfo, 0, sizeof(sinfo));
@@ -597,12 +598,13 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
} else {
- wil_err(wil, "%s: unhandled iftype %d for CID %d\n",
- __func__, wdev->iftype, evt->cid);
+ wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
+ evt->cid);
goto out;
}
wil->sta[evt->cid].status = wil_sta_connected;
+ wil->sta[evt->cid].aid = evt->aid;
set_bit(wil_status_fwconnected, wil->status);
wil_update_net_queues_bh(wil, NULL, false);
@@ -687,6 +689,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
{
struct wmi_vring_en_event *evt = d;
u8 vri = evt->vring_index;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
wil_dbg_wmi(wil, "Enable vring %d\n", vri);
@@ -694,7 +697,12 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
- wil->vring_tx_data[vri].dot1x_open = true;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme)
+ /* in AP mode with disable_ap_sme, this is done by
+ * wil_cfg80211_change_station()
+ */
+ wil->vring_tx_data[vri].dot1x_open = true;
if (vri == wil->bcast_vring) /* no BA for bcast */
return;
if (agg_wsize >= 0)
@@ -919,8 +927,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
if (immed_reply) {
- wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
- __func__, wil->reply_id);
+ wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n",
+ wil->reply_id);
kfree(evt);
num_immed_reply++;
complete(&wil->wmi_call);
@@ -934,7 +942,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
}
}
/* normally, 1 event per IRQ should be processed */
- wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__,
+ wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n",
n - num_immed_reply, num_immed_reply);
}
@@ -950,6 +958,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
wil->reply_id = reply_id;
wil->reply_buf = reply;
wil->reply_size = reply_size;
+ reinit_completion(&wil->wmi_call);
spin_unlock(&wil->wmi_ev_lock);
rc = __wmi_send(wil, cmdid, buf, len);
@@ -1069,6 +1078,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
.pcp_max_assoc_sta = max_assoc_sta,
.hidden_ssid = hidden_ssid,
.is_go = is_go,
+ .disable_ap_sme = disable_ap_sme,
+ .abft_len = wil->abft_len,
};
struct {
struct wmi_cmd_hdr wmi;
@@ -1086,6 +1097,13 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
cmd.pcp_max_assoc_sta = WIL6210_MAX_CID;
}
+ if (disable_ap_sme &&
+ !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME,
+ wil->fw_capabilities)) {
+ wil_err(wil, "disable_ap_sme not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+
/*
* Processing time may be huge, in case of secure AP it takes about
* 3500ms for FW to start AP
@@ -1352,7 +1370,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
struct wmi_listen_started_event evt;
} __packed reply;
- wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+ wil_info(wil, "(%s)\n", on ? "on" : "off");
if (on) {
rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
@@ -1456,12 +1474,15 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
return 0;
}
-int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
- bool full_disconnect)
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac,
+ u16 reason, bool full_disconnect, bool del_sta)
{
int rc;
u16 reason_code;
- struct wmi_disconnect_sta_cmd cmd = {
+ struct wmi_disconnect_sta_cmd disc_sta_cmd = {
+ .disconnect_reason = cpu_to_le16(reason),
+ };
+ struct wmi_del_sta_cmd del_sta_cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
struct {
@@ -1469,12 +1490,19 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
struct wmi_disconnect_event evt;
} __packed reply;
- ether_addr_copy(cmd.dst_mac, mac);
-
- wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+ wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
- rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
- WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+ if (del_sta) {
+ ether_addr_copy(del_sta_cmd.dst_mac, mac);
+ rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd,
+ sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID,
+ &reply, sizeof(reply), 1000);
+ } else {
+ ether_addr_copy(disc_sta_cmd.dst_mac, mac);
+ rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &disc_sta_cmd,
+ sizeof(disc_sta_cmd), WMI_DISCONNECT_EVENTID,
+ &reply, sizeof(reply), 1000);
+ }
/* failure to disconnect in reasonable time treated as FW error */
if (rc) {
wil_fw_error_recovery(wil);
@@ -1507,8 +1535,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
.amsdu = 0,
};
- wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__,
- ringid, size, timeout);
+ wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
+ timeout);
return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd));
}
@@ -1520,8 +1548,7 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason)
.reason = cpu_to_le16(reason),
};
- wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__,
- ringid, reason);
+ wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd));
}
@@ -1533,8 +1560,8 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason)
.reason = cpu_to_le16(reason),
};
- wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__,
- cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason);
+ wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf,
+ (cidxtid >> 4) & 0xf, reason);
return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd));
}
@@ -1686,11 +1713,29 @@ int wmi_abort_scan(struct wil6210_priv *wil)
return rc;
}
+int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid)
+{
+ int rc;
+ struct wmi_new_sta_cmd cmd = {
+ .aid = aid,
+ };
+
+ wil_dbg_wmi(wil, "new sta %pM, aid %d\n", mac, aid);
+
+ ether_addr_copy(cmd.dst_mac, mac);
+
+ rc = wmi_send(wil, WMI_NEW_STA_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ wil_err(wil, "Failed to send new sta (%d)\n", rc);
+
+ return rc;
+}
+
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;
- wil_dbg_wmi(wil, "%s()\n", __func__);
+ wil_dbg_wmi(wil, "event_flush\n");
list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
list_del(&evt->list);
@@ -1731,8 +1776,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
WARN_ON(wil->reply_buf);
wmi_evt_call_handler(wil, id, evt_data,
len - sizeof(*wmi));
- wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
- __func__, id);
+ wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n",
+ id);
complete(&wil->wmi_call);
return;
}
@@ -1779,11 +1824,11 @@ void wmi_event_worker(struct work_struct *work)
struct pending_wmi_event *evt;
struct list_head *lh;
- wil_dbg_wmi(wil, "Start %s\n", __func__);
+ wil_dbg_wmi(wil, "event_worker: Start\n");
while ((lh = next_wmi_ev(wil)) != NULL) {
evt = list_entry(lh, struct pending_wmi_event, list);
wmi_event_handle(wil, &evt->event.hdr);
kfree(evt);
}
- wil_dbg_wmi(wil, "Finished %s\n", __func__);
+ wil_dbg_wmi(wil, "event_worker: Finished\n");
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index d93a4d490d24..7c9fee57aa91 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -56,6 +56,8 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_PS_CONFIG = 1,
WMI_FW_CAPABILITY_RF_SECTORS = 2,
WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
+ WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
+ WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_MAX,
};
@@ -185,8 +187,11 @@ enum wmi_command_id {
WMI_RS_CFG_CMDID = 0x921,
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924,
WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930,
WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
+ WMI_NEW_STA_CMDID = 0x935,
+ WMI_DEL_STA_CMDID = 0x936,
WMI_TOF_SESSION_START_CMDID = 0x991,
WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
WMI_TOF_SET_LCR_CMDID = 0x993,
@@ -543,7 +548,10 @@ struct wmi_pcp_start_cmd {
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
u8 is_go;
- u8 reserved0[7];
+ u8 reserved0[5];
+ /* abft_len override if non-0 */
+ u8 abft_len;
+ u8 disable_ap_sme;
u8 network_type;
u8 channel;
u8 disable_sec_offload;
@@ -902,6 +910,18 @@ struct wmi_set_mgmt_retry_limit_cmd {
u8 reserved[3];
} __packed;
+/* WMI_NEW_STA_CMDID */
+struct wmi_new_sta_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 aid;
+} __packed;
+
+/* WMI_DEL_STA_CMDID */
+struct wmi_del_sta_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 disconnect_reason;
+} __packed;
+
enum wmi_tof_burst_duration {
WMI_TOF_BURST_DURATION_250_USEC = 2,
WMI_TOF_BURST_DURATION_500_USEC = 3,
@@ -1067,6 +1087,7 @@ enum wmi_event_id {
WMI_RS_CFG_DONE_EVENTID = 0x1921,
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
@@ -1287,12 +1308,13 @@ struct wmi_connect_event {
u8 assoc_req_len;
u8 assoc_resp_len;
u8 cid;
- u8 reserved2[3];
+ u8 aid;
+ u8 reserved2[2];
/* not in use */
u8 assoc_info[0];
} __packed;
-/* WMI_DISCONNECT_EVENTID */
+/* disconnect_reason */
enum wmi_disconnect_reason {
WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01,
/* bmiss */
@@ -1310,6 +1332,7 @@ enum wmi_disconnect_reason {
WMI_DIS_REASON_IBSS_MERGE = 0x0E,
};
+/* WMI_DISCONNECT_EVENTID */
struct wmi_disconnect_event {
/* reason code, see 802.11 spec. */
__le16 protocol_reason_status;
@@ -1759,6 +1782,42 @@ struct wmi_get_detailed_rs_res_event {
u8 reserved[3];
} __packed;
+/* BRP antenna limit mode */
+enum wmi_brp_ant_limit_mode {
+ /* Disable BRP force antenna limit */
+ WMI_BRP_ANT_LIMIT_MODE_DISABLE = 0x00,
+ /* Define maximal antennas limit. Only effective antennas will be
+ * actually used
+ */
+ WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE = 0x01,
+ /* Force a specific number of antennas */
+ WMI_BRP_ANT_LIMIT_MODE_FORCE = 0x02,
+ /* number of BRP antenna limit modes */
+ WMI_BRP_ANT_LIMIT_MODES_NUM = 0x03,
+};
+
+/* WMI_BRP_SET_ANT_LIMIT_CMDID */
+struct wmi_brp_set_ant_limit_cmd {
+ /* connection id */
+ u8 cid;
+ /* enum wmi_brp_ant_limit_mode */
+ u8 limit_mode;
+ /* antenna limit count, 1-27
+ * disable_mode - ignored
+ * effective_mode - upper limit to number of antennas to be used
+ * force_mode - exact number of antennas to be used
+ */
+ u8 ant_limit;
+ u8 reserved;
+} __packed;
+
+/* WMI_BRP_SET_ANT_LIMIT_EVENTID */
+struct wmi_brp_set_ant_limit_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* broadcast connection ID */
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 72139b579b18..5bc2ba214735 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1104,6 +1104,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index e21f7600122b..76693df34742 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -218,9 +218,6 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
* interface functions from common layer
*/
-bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
- int prec);
-
/* Receive frame for delivery to OS. Callee disposes of rxp. */
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
/* Receive async event packet from firmware. Callee disposes of rxp. */
@@ -241,13 +238,12 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
/* Configure the "global" bus state used by upper layers */
void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
-int brcmf_bus_start(struct device *dev);
+int brcmf_bus_started(struct device *dev);
s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
void brcmf_sdio_exit(void);
-void brcmf_sdio_init(void);
void brcmf_sdio_register(void);
#endif
#ifdef CONFIG_BRCMFMAC_USB
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 7ffc4aba5bab..10098b7586f3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -138,7 +138,6 @@ static struct ieee80211_rate __wl_rates[] = {
.band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
- .flags = IEEE80211_CHAN_DISABLED, \
.max_antenna_gain = 0, \
.max_power = 30, \
}
@@ -147,7 +146,6 @@ static struct ieee80211_rate __wl_rates[] = {
.band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
- .flags = IEEE80211_CHAN_DISABLED, \
.max_antenna_gain = 0, \
.max_power = 30, \
}
@@ -328,7 +326,7 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
* triples, returning a pointer to the substring whose first element
* matches tag
*/
-const struct brcmf_tlv *
+static const struct brcmf_tlv *
brcmf_parse_tlvs(const void *buf, int buflen, uint key)
{
const struct brcmf_tlv *elt = buf;
@@ -3332,7 +3330,6 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
goto out_err;
}
- data += sizeof(struct brcmf_pno_scanresults_le);
netinfo_start = brcmf_get_netinfo_array(pfn_result);
for (i = 0; i < result_count; i++) {
@@ -3480,8 +3477,7 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
return -EINVAL;
}
- data += sizeof(struct brcmf_pno_scanresults_le);
- netinfo = (struct brcmf_pno_net_info_le *)data;
+ netinfo = brcmf_get_netinfo_array(pfn_result);
memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
cfg->wowl.nd->n_channels = 1;
@@ -3971,7 +3967,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
pval |= AES_ENABLED;
break;
default:
- brcmf_err("Ivalid unicast security info\n");
+ brcmf_err("Invalid unicast security info\n");
}
offset++;
}
@@ -4015,7 +4011,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
wpa_auth |= WPA2_AUTH_1X_SHA256;
break;
default:
- brcmf_err("Ivalid key mgmt info\n");
+ brcmf_err("Invalid key mgmt info\n");
}
offset++;
}
@@ -5071,6 +5067,29 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
return ret;
}
+static int
+brcmf_cfg80211_update_conn_params(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme,
+ u32 changed)
+{
+ struct brcmf_if *ifp;
+ int err;
+
+ if (!(changed & UPDATE_ASSOC_IES))
+ return 0;
+
+ ifp = netdev_priv(ndev);
+ err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
+ sme->ie, sme->ie_len);
+ if (err)
+ brcmf_err("Set Assoc REQ IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
+
+ return err;
+}
+
#ifdef CONFIG_PM
static int
brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev,
@@ -5138,6 +5157,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.crit_proto_start = brcmf_cfg80211_crit_proto_start,
.crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
.tdls_oper = brcmf_cfg80211_tdls_oper,
+ .update_connect_params = brcmf_cfg80211_update_conn_params,
};
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
@@ -5825,7 +5845,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
u32 i, j;
u32 total;
u32 chaninfo;
- u32 index;
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@@ -5873,33 +5892,39 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
ch.bw == BRCMU_CHAN_BW_80)
continue;
- channel = band->channels;
- index = band->n_channels;
+ channel = NULL;
for (j = 0; j < band->n_channels; j++) {
- if (channel[j].hw_value == ch.control_ch_num) {
- index = j;
+ if (band->channels[j].hw_value == ch.control_ch_num) {
+ channel = &band->channels[j];
break;
}
}
- channel[index].center_freq =
- ieee80211_channel_to_frequency(ch.control_ch_num,
- band->band);
- channel[index].hw_value = ch.control_ch_num;
+ if (!channel) {
+ /* It seems firmware supports some channel we never
+ * considered. Something new in IEEE standard?
+ */
+ brcmf_err("Ignoring unexpected firmware channel %d\n",
+ ch.control_ch_num);
+ continue;
+ }
+
+ if (channel->orig_flags & IEEE80211_CHAN_DISABLED)
+ continue;
/* assuming the chanspecs order is HT20,
* HT40 upper, HT40 lower, and VHT80.
*/
if (ch.bw == BRCMU_CHAN_BW_80) {
- channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ;
+ channel->flags &= ~IEEE80211_CHAN_NO_80MHZ;
} else if (ch.bw == BRCMU_CHAN_BW_40) {
- brcmf_update_bw40_channel_flag(&channel[index], &ch);
+ brcmf_update_bw40_channel_flag(channel, &ch);
} else {
/* enable the channel and disable other bandwidths
* for now as mentioned order assure they are enabled
* for subsequent chanspecs.
*/
- channel[index].flags = IEEE80211_CHAN_NO_HT40 |
- IEEE80211_CHAN_NO_80MHZ;
+ channel->flags = IEEE80211_CHAN_NO_HT40 |
+ IEEE80211_CHAN_NO_80MHZ;
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
chaninfo = ch.chspec;
@@ -5907,11 +5932,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
&chaninfo);
if (!err) {
if (chaninfo & WL_CHAN_RADAR)
- channel[index].flags |=
+ channel->flags |=
(IEEE80211_CHAN_RADAR |
IEEE80211_CHAN_NO_IR);
if (chaninfo & WL_CHAN_PASSIVE)
- channel[index].flags |=
+ channel->flags |=
IEEE80211_CHAN_NO_IR;
}
}
@@ -6341,7 +6366,7 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
}
#ifdef CONFIG_PM
-static struct wiphy_wowlan_support brcmf_wowlan_support = {
+static const struct wiphy_wowlan_support brcmf_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
.n_patterns = BRCMF_WOWL_MAXPATTERNS,
.pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE,
@@ -6354,19 +6379,29 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp)
{
#ifdef CONFIG_PM
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct wiphy_wowlan_support *wowl;
+
+ wowl = kmemdup(&brcmf_wowlan_support, sizeof(brcmf_wowlan_support),
+ GFP_KERNEL);
+ if (!wowl) {
+ brcmf_err("only support basic wowlan features\n");
+ wiphy->wowlan = &brcmf_wowlan_support;
+ return;
+ }
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ND)) {
- brcmf_wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ wowl->flags |= WIPHY_WOWLAN_NET_DETECT;
+ wowl->max_nd_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
init_waitqueue_head(&cfg->wowl.nd_data_wait);
}
}
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) {
- brcmf_wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY;
- brcmf_wowlan_support.flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+ wowl->flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY;
+ wowl->flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE;
}
- wiphy->wowlan = &brcmf_wowlan_support;
+ wiphy->wowlan = wowl;
#endif
}
@@ -6477,8 +6512,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->bands[NL80211_BAND_5GHZ] = band;
}
}
- err = brcmf_setup_wiphybands(wiphy);
- return err;
+
+ wiphy_read_of_freq_limits(wiphy);
+
+ return 0;
}
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6748,6 +6785,10 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
kfree(wiphy->bands[NL80211_BAND_5GHZ]);
}
+#if IS_ENABLED(CONFIG_PM)
+ if (wiphy->wowlan != &brcmf_wowlan_support)
+ kfree(wiphy->wowlan);
+#endif
wiphy_free(wiphy);
}
@@ -6843,6 +6884,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto priv_out;
}
+ err = brcmf_setup_wiphybands(wiphy);
+ if (err) {
+ brcmf_err("Setting wiphy bands failed (%d)\n", err);
+ goto wiphy_unreg_out;
+ }
+
/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
* setup 40MHz in 2GHz band and enable OBSS scanning.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 0c9a7081fca9..8f19d95d4175 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -396,8 +396,6 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
-const struct brcmf_tlv *
-brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 3e15d64c6481..33b133f7e63a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -74,7 +74,7 @@ module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
MODULE_PARM_DESC(roamoff, "Do not use internal roaming engine");
#ifdef DEBUG
-/* always succeed brcmf_bus_start() */
+/* always succeed brcmf_bus_started() */
static int brcmf_ignore_probe_fail;
module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0);
MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging");
@@ -218,6 +218,22 @@ done:
return err;
}
+#ifndef CONFIG_BRCM_TRACING
+void __brcmf_err(const char *func, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_err("%s: %pV", func, &vaf);
+
+ va_end(args);
+}
+#endif
+
#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
{
@@ -299,11 +315,9 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
}
}
}
- if ((bus_type == BRCMF_BUSTYPE_SDIO) && (!found)) {
- /* No platform data for this device. In case of SDIO try OF
- * (Open Firwmare) Device Tree.
- */
- brcmf_of_probe(dev, &settings->bus.sdio);
+ if (!found) {
+ /* No platform data for this device, try OF (Open Firwmare) */
+ brcmf_of_probe(dev, bus_type, settings);
}
return settings;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index bd095abca393..a62f8e70b320 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -65,6 +65,8 @@ struct brcmf_mp_device {
} bus;
};
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
+
struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
enum brcmf_bus_type bus_type,
u32 chip, u32 chiprev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 9e6f60a0ec3e..60da86a8d95b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -249,10 +249,10 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
done:
if (ret) {
- ifp->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
} else {
- ifp->stats.tx_packets++;
- ifp->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
}
/* Return ok: we always eat the packet */
@@ -296,15 +296,15 @@ void brcmf_txflowblock(struct device *dev, bool state)
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
if (skb->pkt_type == PACKET_MULTICAST)
- ifp->stats.multicast++;
+ ifp->ndev->stats.multicast++;
if (!(ifp->ndev->flags & IFF_UP)) {
brcmu_pkt_buf_free_skb(skb);
return;
}
- ifp->stats.rx_bytes += skb->len;
- ifp->stats.rx_packets++;
+ ifp->ndev->stats.rx_bytes += skb->len;
+ ifp->ndev->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
if (in_interrupt())
@@ -327,7 +327,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
if (ret || !(*ifp) || !(*ifp)->ndev) {
if (ret != -ENODATA && *ifp)
- (*ifp)->stats.rx_errors++;
+ (*ifp)->ndev->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
return -ENODATA;
}
@@ -388,7 +388,7 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
}
if (!success)
- ifp->stats.tx_errors++;
+ ifp->ndev->stats.tx_errors++;
brcmu_pkt_buf_free_skb(txp);
}
@@ -411,15 +411,6 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
}
}
-static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
-
- brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
-
- return &ifp->stats;
-}
-
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -492,7 +483,6 @@ static int brcmf_netdev_open(struct net_device *ndev)
static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_open = brcmf_netdev_open,
.ndo_stop = brcmf_netdev_stop,
- .ndo_get_stats = brcmf_netdev_get_stats,
.ndo_start_xmit = brcmf_netdev_start_xmit,
.ndo_set_mac_address = brcmf_netdev_set_mac_address,
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
@@ -966,7 +956,7 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data)
return 0;
}
-int brcmf_bus_start(struct device *dev)
+int brcmf_bus_started(struct device *dev)
{
int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -1075,16 +1065,6 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
}
}
-static void brcmf_bus_detach(struct brcmf_pub *drvr)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- if (drvr) {
- /* Stop the bus module */
- brcmf_bus_stop(drvr->bus_if);
- }
-}
-
void brcmf_dev_reset(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -1131,7 +1111,7 @@ void brcmf_detach(struct device *dev)
brcmf_fws_deinit(drvr);
- brcmf_bus_detach(drvr);
+ brcmf_bus_stop(drvr->bus_if);
brcmf_proto_detach(drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index c94dcab260d0..6aecd8dfd824 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -171,7 +171,6 @@ enum brcmf_netif_stop_reason {
* @drvr: points to device related information.
* @vif: points to cfg80211 specific interface information.
* @ndev: associated network device.
- * @stats: interface specific network statistics.
* @multicast_work: worker object for multicast provisioning.
* @ndoffload_work: worker object for neighbor discovery offload configuration.
* @fws_desc: interface specific firmware-signalling descriptor.
@@ -187,7 +186,6 @@ struct brcmf_if {
struct brcmf_pub *drvr;
struct brcmf_cfg80211_vif *vif;
struct net_device *ndev;
- struct net_device_stats stats;
struct work_struct multicast_work;
struct work_struct ndoffload_work;
struct brcmf_fws_mac_descriptor *fws_desc;
@@ -216,7 +214,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
-void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index e64557c35553..f4644cf371c7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -32,16 +32,25 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
{
void *dump;
size_t ramsize;
+ int err;
ramsize = brcmf_bus_get_ramsize(bus);
- if (ramsize) {
- dump = vzalloc(len + ramsize);
- if (!dump)
- return -ENOMEM;
- memcpy(dump, data, len);
- brcmf_bus_get_memdump(bus, dump + len, ramsize);
- dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+ if (!ramsize)
+ return -ENOTSUPP;
+
+ dump = vzalloc(len + ramsize);
+ if (!dump)
+ return -ENOMEM;
+
+ memcpy(dump, data, len);
+ err = brcmf_bus_get_memdump(bus, dump + len, ramsize);
+ if (err) {
+ vfree(dump);
+ return err;
}
+
+ dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+
return 0;
}
@@ -49,10 +58,18 @@ static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
const struct brcmf_event_msg *evtmsg,
void *data)
{
+ int err;
+
brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
- return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
- evtmsg->datalen);
+ brcmf_err("PSM's watchdog has fired!\n");
+
+ err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+ evtmsg->datalen);
+ if (err)
+ brcmf_err("Failed to get memory dump, %d\n", err);
+
+ return err;
}
void brcmf_debugfs_init(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 6687812770cc..066126123e96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -45,26 +45,18 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-/* Macro for error messages. net_ratelimit() is used when driver
- * debugging is not selected. When debugging the driver error
- * messages are as important as other tracing or even more so.
+__printf(2, 3)
+void __brcmf_err(const char *func, const char *fmt, ...);
+/* Macro for error messages. When debugging / tracing the driver all error
+ * messages are important to us.
*/
-#ifndef CONFIG_BRCM_TRACING
-#ifdef CONFIG_BRCMDBG
-#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
-#else
#define brcmf_err(fmt, ...) \
do { \
- if (net_ratelimit()) \
- pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ if (IS_ENABLED(CONFIG_BRCMDBG) || \
+ IS_ENABLED(CONFIG_BRCM_TRACING) || \
+ net_ratelimit()) \
+ __brcmf_err(__func__, fmt, ##__VA_ARGS__); \
} while (0)
-#endif
-#else
-__printf(2, 3)
-void __brcmf_err(const char *func, const char *fmt, ...);
-#define brcmf_err(fmt, ...) \
- __brcmf_err(__func__, fmt, ##__VA_ARGS__)
-#endif
#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
__printf(3, 4)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 425c41dc0a59..aee6e5937c41 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -23,14 +23,17 @@
#include "common.h"
#include "of.h"
-void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio)
+void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ struct brcmf_mp_device *settings)
{
+ struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *np = dev->of_node;
int irq;
u32 irqf;
u32 val;
- if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+ if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
+ !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
return;
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
index a9d94c15d0f5..95b7032d54b1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
@@ -14,9 +14,11 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef CONFIG_OF
-void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio);
+void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ struct brcmf_mp_device *settings);
#else
-static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio)
+static void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ struct brcmf_mp_device *settings)
{
}
#endif /* CONFIG_OF */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 048027f2085b..6fae4cf3f6ab 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -601,7 +601,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
{
u32 config;
- brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
/* BAR1 window may not be sized properly */
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
@@ -1572,7 +1571,7 @@ static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo)
if (ret) {
brcmf_err("brcmf_attach failed\n");
} else {
- ret = brcmf_bus_start(&devinfo->pdev->dev);
+ ret = brcmf_bus_started(&devinfo->pdev->dev);
if (ret)
brcmf_err("dongle is not responding\n");
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index dfb0658713d9..c5744b45ec8f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1661,7 +1661,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst->len, pfirst->next,
pfirst->prev);
skb_unlink(pfirst, &bus->glom);
- if (brcmf_sdio_fromevntchan(pfirst->data))
+ if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN]))
brcmf_rx_event(bus->sdiodev->dev, pfirst);
else
brcmf_rx_frame(bus->sdiodev->dev, pfirst,
@@ -4065,7 +4065,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
sdio_release_host(sdiodev->func[1]);
- err = brcmf_bus_start(dev);
+ err = brcmf_bus_started(dev);
if (err != 0) {
brcmf_err("dongle is not responding\n");
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 2f978a39b58a..d93ebbdc7737 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1148,7 +1148,7 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
if (ret)
goto fail;
- ret = brcmf_bus_start(devinfo->dev);
+ ret = brcmf_bus_started(devinfo->dev);
if (ret)
goto fail;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 466912eb2d87..e8e65115feba 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3469,7 +3469,7 @@ static struct attribute_group il3945_attribute_group = {
.attrs = il3945_sysfs_entries,
};
-static struct ieee80211_ops il3945_mac_ops __read_mostly = {
+static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
.tx = il3945_mac_tx,
.start = il3945_mac_start,
.stop = il3945_mac_stop,
@@ -3627,15 +3627,6 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
il->cmd_queue = IL39_CMD_QUEUE_NUM;
- /*
- * Disabling hardware scan means that mac80211 will perform scans
- * "the hard way", rather than using device's scan.
- */
- if (il3945_mod_params.disable_hw_scan) {
- D_INFO("Disabling hw_scan\n");
- il3945_mac_ops.hw_scan = NULL;
- }
-
D_INFO("*** LOAD DRIVER ***\n");
il->cfg = cfg;
il->ops = &il3945_ops;
@@ -3913,6 +3904,15 @@ il3945_init(void)
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
+ /*
+ * Disabling hardware scan means that mac80211 will perform scans
+ * "the hard way", rather than using device's scan.
+ */
+ if (il3945_mod_params.disable_hw_scan) {
+ pr_info("hw_scan is disabled\n");
+ il3945_mac_ops.hw_scan = NULL;
+ }
+
ret = il3945_rate_control_register();
if (ret) {
pr_err("Unable to register rate control algorithm: %d\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index b64db47b31bb..c5f2ddf9b0fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -90,13 +90,16 @@ config IWLWIFI_BCAST_FILTERING
config IWLWIFI_PCIE_RTPM
bool "Enable runtime power management mode for PCIe devices"
- depends on IWLMVM && PM
+ depends on IWLMVM && PM && EXPERT
default false
help
Say Y here to enable runtime power management for PCIe
devices. If enabled, the device will go into low power mode
when idle for a short period of time, allowing for improved
- power saving during runtime.
+ power saving during runtime. Note that this feature requires
+ a tight integration with the platform. It is not recommended
+ to enable this feature without proper validation with the
+ specific target platform.
If unsure, say N.
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index affe760c8c22..376c79337a0e 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
{
struct iwl_priv *priv = file->private_data;
bool restart_fw = iwlwifi_mod_params.restart_fw;
- int ret;
+ int __maybe_unused ret;
iwlwifi_mod_params.restart_fw = true;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 8c0719468d00..2a04d0cd71ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -163,7 +163,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
REGULATORY_DISABLE_BEACON_HINTS;
#ifdef CONFIG_PM_SLEEP
- if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec &&
priv->trans->ops->d3_suspend &&
priv->trans->ops->d3_resume &&
device_can_wakeup(priv->trans->dev)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index b95c2d76db33..ff44ebc5829d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -364,7 +364,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
/*
get the traffic load value for tid
*/
-static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+static void rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
{
u32 curr_time = jiffies_to_msecs(jiffies);
u32 time_diff;
@@ -372,14 +372,14 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
struct iwl_traffic_load *tl = NULL;
if (tid >= IWL_MAX_TID_COUNT)
- return 0;
+ return;
tl = &(lq_data->load[tid]);
curr_time -= curr_time % TID_ROUND_VALUE;
if (!(tl->queue_count))
- return 0;
+ return;
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
index = time_diff / TID_QUEUE_CELL_SPACING;
@@ -388,8 +388,6 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
/* TID_MAX_TIME_DIFF */
if (index >= TID_QUEUE_MAX_SIZE)
rs_tl_rm_old_stats(tl, curr_time);
-
- return tl->total;
}
static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
@@ -397,7 +395,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct ieee80211_sta *sta)
{
int ret = -EAGAIN;
- u32 load;
/*
* Don't create TX aggregation sessions when in high
@@ -410,7 +407,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
return ret;
}
- load = rs_tl_get_load(lq_data, tid);
+ rs_tl_get_load(lq_data, tid);
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
@@ -743,7 +740,10 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
/* Find the previous rate that is in the rate mask */
i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (i >= 0)
+ mask = BIT(i);
+
+ for (; i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index c7509c51e9d9..d6013bfe991c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -407,7 +407,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
lockdep_assert_held(&priv->mutex);
/* No init ucode required? Curious, but maybe ok */
- if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
+ if (!priv->fw->img[IWL_UCODE_INIT].num_sec)
return 0;
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
index 0b9f6a7bc834..39335b7b0c16 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -371,4 +371,4 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index d4b73dedf89b..a72e58623d3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,8 +73,8 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 26
-#define IWL3168_UCODE_API_MAX 26
+#define IWL7265D_UCODE_API_MAX 28
+#define IWL3168_UCODE_API_MAX 28
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 17
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index d02ca1491d16..b7953bf55f6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 26
-#define IWL8265_UCODE_API_MAX 26
+#define IWL8000_UCODE_API_MAX 28
+#define IWL8265_UCODE_API_MAX 28
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 17
@@ -91,7 +91,7 @@
#define IWL8000_FW_PRE "iwlwifi-8000C-"
#define IWL8000_MODULE_FIRMWARE(api) \
- IWL8000_FW_PRE "-" __stringify(api) ".ucode"
+ IWL8000_FW_PRE __stringify(api) ".ucode"
#define IWL8265_FW_PRE "iwlwifi-8265-"
#define IWL8265_MODULE_FIRMWARE(api) \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index ff850410d897..a5f0c0bf85ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 26
+#define IWL9000_UCODE_API_MAX 28
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 17
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
index ea1618525878..15dd7f6137c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 26
+#define IWL_A000_UCODE_API_MAX 28
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
@@ -72,9 +72,13 @@
#define IWL_A000_SMEM_OFFSET 0x400000
#define IWL_A000_SMEM_LEN 0x68000
-#define IWL_A000_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_MODULE_FIRMWARE(api) \
- IWL_A000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+
+#define IWL_A000_HR_MODULE_FIRMWARE(api) \
+ IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_A000_JF_MODULE_FIRMWARE(api) \
+ IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
@@ -116,11 +120,22 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
- .use_tfh = true
+ .use_tfh = true, \
+ .rf_id = true
+
+const struct iwl_cfg iwla000_2ac_cfg_hr = {
+ .name = "Intel(R) Dual Band Wireless AC a000",
+ .fw_name_pre = IWL_A000_HR_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
-const struct iwl_cfg iwla000_2ac_cfg = {
+const struct iwl_cfg iwla000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_FW_PRE,
+ .fw_name_pre = IWL_A000_JF_FW_PRE,
IWL_DEVICE_A000,
.ht_params = &iwl_a000_ht_params,
.nvm_ver = IWL_A000_NVM_VERSION,
@@ -128,4 +143,5 @@ const struct iwl_cfg iwla000_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
-MODULE_FIRMWARE(IWL_A000_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 2660cc4b9f8c..94f8a51b633e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -455,7 +455,8 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
-extern const struct iwl_cfg iwla000_2ac_cfg;
+extern const struct iwl_cfg iwla000_2ac_cfg_hr;
+extern const struct iwl_cfg iwla000_2ac_cfg_jf;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index d73e9d436027..4ee3b621ec27 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -349,6 +349,7 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
+#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 45b2f679e4d8..0e0293d42b5d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -102,7 +102,6 @@ static struct dentry *iwl_dbgfs_root;
* @op_mode: the running op_mode
* @trans: transport layer
* @dev: for debug prints only
- * @cfg: configuration struct
* @fw_index: firmware revision to try loading
* @firmware_name: composite filename of ucode file to load
* @request_firmware_complete: the firmware has been obtained from user space
@@ -114,7 +113,6 @@ struct iwl_drv {
struct iwl_op_mode *op_mode;
struct iwl_trans *trans;
struct device *dev;
- const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
char firmware_name[64]; /* name of firmware file to load */
@@ -166,8 +164,9 @@ static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
{
int i;
- for (i = 0; i < IWL_UCODE_SECTION_MAX; i++)
+ for (i = 0; i < img->num_sec; i++)
iwl_free_fw_desc(drv, &img->sec[i]);
+ kfree(img->sec);
}
static void iwl_dealloc_ucode(struct iwl_drv *drv)
@@ -179,8 +178,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg_conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]);
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
- kfree(drv->fw.dbg_mem_tlv[i]);
+ kfree(drv->fw.dbg_mem_tlv);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -213,18 +211,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
- const char *name_pre = drv->cfg->fw_name_pre;
+ const char *name_pre = drv->trans->cfg->fw_name_pre;
char tag[8];
if (first) {
- drv->fw_index = drv->cfg->ucode_api_max;
+ drv->fw_index = drv->trans->cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
drv->fw_index--;
sprintf(tag, "%d", drv->fw_index);
}
- if (drv->fw_index < drv->cfg->ucode_api_min) {
+ if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
return -ENOENT;
}
@@ -241,7 +239,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
}
struct fw_img_parsing {
- struct fw_sec sec[IWL_UCODE_SECTION_MAX];
+ struct fw_sec *sec;
int sec_counter;
};
@@ -276,7 +274,8 @@ struct iwl_firmware_pieces {
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
- struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+ size_t n_dbg_mem_tlv;
};
/*
@@ -290,11 +289,33 @@ static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
return &pieces->img[type].sec[sec];
}
+static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type,
+ int sec)
+{
+ struct fw_img_parsing *img = &pieces->img[type];
+ struct fw_sec *sec_memory;
+ int size = sec + 1;
+ size_t alloc_size = sizeof(*img->sec) * size;
+
+ if (img->sec && img->sec_counter >= size)
+ return;
+
+ sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL);
+ if (!sec_memory)
+ return;
+
+ img->sec = sec_memory;
+ img->sec_counter = size;
+}
+
static void set_sec_data(struct iwl_firmware_pieces *pieces,
enum iwl_ucode_type type,
int sec,
const void *data)
{
+ alloc_sec_data(pieces, type, sec);
+
pieces->img[type].sec[sec].data = data;
}
@@ -303,6 +324,8 @@ static void set_sec_size(struct iwl_firmware_pieces *pieces,
int sec,
size_t size)
{
+ alloc_sec_data(pieces, type, sec);
+
pieces->img[type].sec[sec].size = size;
}
@@ -318,6 +341,8 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
int sec,
u32 offset)
{
+ alloc_sec_data(pieces, type, sec);
+
pieces->img[type].sec[sec].offset = offset;
}
@@ -383,6 +408,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
struct fw_img_parsing *img;
struct fw_sec *sec;
struct fw_sec_parsing *sec_parse;
+ size_t alloc_size;
if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
return -1;
@@ -390,6 +416,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
sec_parse = (struct fw_sec_parsing *)data;
img = &pieces->img[type];
+
+ alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
+ sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+ img->sec = sec;
+
sec = &img->sec[img->sec_counter];
sec->offset = le32_to_cpu(sec_parse->offset);
@@ -1009,31 +1042,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
(void *)tlv_data;
u32 type;
+ size_t size;
+ struct iwl_fw_dbg_mem_seg_tlv *n;
if (tlv_len != (sizeof(*dbg_mem)))
goto invalid_tlv_len;
type = le32_to_cpu(dbg_mem->data_type);
- drv->fw.dbg_dynamic_mem = true;
- if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
- IWL_ERR(drv,
- "Skip unknown dbg mem segment: %u\n",
- dbg_mem->data_type);
- break;
- }
+ IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+ dbg_mem->data_type);
- if (pieces->dbg_mem_tlv[type]) {
- IWL_ERR(drv,
- "Ignore duplicate mem segment: %u\n",
- dbg_mem->data_type);
+ switch (type & FW_DBG_MEM_TYPE_MASK) {
+ case FW_DBG_MEM_TYPE_REGULAR:
+ case FW_DBG_MEM_TYPE_PRPH:
+ /* we know how to handle these */
break;
+ default:
+ IWL_ERR(drv,
+ "Found debug memory segment with invalid type: 0x%x\n",
+ type);
+ return -EINVAL;
}
- IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
- dbg_mem->data_type);
-
- pieces->dbg_mem_tlv[type] = dbg_mem;
+ size = sizeof(*pieces->dbg_mem_tlv) *
+ (pieces->n_dbg_mem_tlv + 1);
+ n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+ pieces->dbg_mem_tlv = n;
+ pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
+ pieces->n_dbg_mem_tlv++;
break;
}
default:
@@ -1083,12 +1122,18 @@ static int iwl_alloc_ucode(struct iwl_drv *drv,
enum iwl_ucode_type type)
{
int i;
- for (i = 0;
- i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
- i++)
- if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
- get_sec(pieces, type, i)))
+ struct fw_desc *sec;
+
+ sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+ drv->fw.img[type].sec = sec;
+ drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
+
+ for (i = 0; i < pieces->img[type].sec_counter; i++)
+ if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
return -ENOMEM;
+
return 0;
}
@@ -1160,7 +1205,7 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
+ op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (!op_mode) {
@@ -1200,8 +1245,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
- const unsigned int api_max = drv->cfg->ucode_api_max;
- const unsigned int api_min = drv->cfg->ucode_api_min;
+ const unsigned int api_max = drv->trans->cfg->ucode_api_max;
+ const unsigned int api_min = drv->trans->cfg->ucode_api_min;
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
@@ -1263,7 +1308,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* In mvm uCode there is no difference between data and instructions
* sections.
*/
- if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg))
+ if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces,
+ drv->trans->cfg))
goto try_again;
/* Allocate ucode buffers for card's bus-master loading ... */
@@ -1345,19 +1391,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
}
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
- if (pieces->dbg_mem_tlv[i]) {
- drv->fw.dbg_mem_tlv[i] =
- kmemdup(pieces->dbg_mem_tlv[i],
- sizeof(*drv->fw.dbg_mem_tlv[i]),
- GFP_KERNEL);
- if (!drv->fw.dbg_mem_tlv[i])
- goto out_free_fw;
- }
- }
-
/* Now that we can no longer fail, copy information */
+ drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
+ pieces->dbg_mem_tlv = NULL;
+ drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
+
/*
* The (size - 16) / 12 formula is based on the information recorded
* for each event, which is of mode 1 (including timestamp) for all
@@ -1368,14 +1407,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
else
fw->init_evtlog_size =
- drv->cfg->base_params->max_event_log_size;
+ drv->trans->cfg->base_params->max_event_log_size;
fw->init_errlog_ptr = pieces->init_errlog_ptr;
fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
if (pieces->inst_evtlog_size)
fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
else
fw->inst_evtlog_size =
- drv->cfg->base_params->max_event_log_size;
+ drv->trans->cfg->base_params->max_event_log_size;
fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
/*
@@ -1441,29 +1480,30 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
- kfree(pieces);
- return;
+ goto free;
try_again:
/* try next, if any */
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
- kfree(pieces);
- return;
+ goto free;
out_free_fw:
IWL_ERR(drv, "failed to allocate pci memory\n");
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
out_unbind:
- kfree(pieces);
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
+ free:
+ for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
+ kfree(pieces->img[i].sec);
+ kfree(pieces->dbg_mem_tlv);
+ kfree(pieces);
}
-struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
- const struct iwl_cfg *cfg)
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
{
struct iwl_drv *drv;
int ret;
@@ -1476,7 +1516,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
drv->trans = trans;
drv->dev = trans->dev;
- drv->cfg = cfg;
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index f6eacfdbc265..6c537e04864e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -118,15 +118,13 @@ struct iwl_cfg;
* iwl_drv_start - start the drv
*
* @trans_ops: the ops of the transport
- * @cfg: device specific constants / virtual functions
*
* starts the driver: fetches the firmware. This should be called by bus
* specific system flows implementations. For example, the bus specific probe
* function should do bus related operations only, and then call to this
* function. It returns the driver object or %NULL if an error occurred.
*/
-struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
- const struct iwl_cfg *cfg);
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans);
/**
* iwl_drv_stop - stop the drv
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 84813b550ef1..d01701ee4777 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -379,7 +379,6 @@ enum iwl_ucode_tlv_capa {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 16
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
#define PAGING_SEPARATOR_SECTION 0xAAAABBBB
@@ -489,25 +488,22 @@ enum iwl_fw_dbg_monitor_mode {
};
/**
- * enum iwl_fw_mem_seg_type - data types for dumping on error
- *
- * @FW_DBG_MEM_SMEM: the data type is SMEM
- * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
- * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
+ * enum iwl_fw_mem_seg_type - memory segment type
+ * @FW_DBG_MEM_TYPE_MASK: mask for the type indication
+ * @FW_DBG_MEM_TYPE_REGULAR: regular memory
+ * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading)
*/
-enum iwl_fw_dbg_mem_seg_type {
- FW_DBG_MEM_DCCM_LMAC = 0,
- FW_DBG_MEM_DCCM_UMAC,
- FW_DBG_MEM_SMEM,
-
- /* Must be last */
- FW_DBG_MEM_MAX,
+enum iwl_fw_mem_seg_type {
+ FW_DBG_MEM_TYPE_MASK = 0xff000000,
+ FW_DBG_MEM_TYPE_REGULAR = 0x00000000,
+ FW_DBG_MEM_TYPE_PRPH = 0x01000000,
};
/**
* struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
*
- * @data_type: enum %iwl_fw_mem_seg_type
+ * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type
+ * for what we care about
* @ofs: the memory segment offset
* @len: the memory segment length, in bytes
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 5f229556339a..d323b70b510a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -132,7 +132,8 @@ struct fw_desc {
};
struct fw_img {
- struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+ struct fw_desc *sec;
+ int num_sec;
bool is_dual_cpus;
u32 paging_mem_size;
};
@@ -295,8 +296,8 @@ struct iwl_fw {
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
- struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
- bool dbg_dynamic_mem;
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+ size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
struct iwl_gscan_capabilities gscan_capa;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b88e2048ae0b..c7eb1983c4f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -91,7 +91,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
mvmvif->rekey_data.replay_ctr =
- cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+ cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
mvmvif->rekey_data.valid = true;
mutex_unlock(&mvm->mutex);
@@ -1262,12 +1262,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
out:
if (ret < 0) {
- iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
- if (mvm->restart_fw > 0) {
- mvm->restart_fw--;
- ieee80211_restart_hw(mvm->hw);
- }
iwl_mvm_free_nd(mvm);
+
+ if (!unified_image) {
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ if (mvm->restart_fw > 0) {
+ mvm->restart_fw--;
+ ieee80211_restart_hw(mvm->hw);
+ }
+ }
}
out_noreset:
mutex_unlock(&mvm->mutex);
@@ -1738,7 +1741,7 @@ out:
static struct iwl_wowlan_status *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- u32 base = mvm->error_event_table;
+ u32 base = mvm->error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 7b7d2a146e30..a260cd503200 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -798,7 +798,7 @@ static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- int ret;
+ int __maybe_unused ret;
mutex_lock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
index 0246506ab595..480a54af4534 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
@@ -64,13 +64,14 @@
#define __fw_api_mac_h__
/*
- * The first MAC indices (starting from 0)
- * are available to the driver, AUX follows
+ * The first MAC indices (starting from 0) are available to the driver,
+ * AUX indices follows - 1 for non-CDB, 2 for CDB.
*/
#define MAC_INDEX_AUX 4
#define MAC_INDEX_MIN_DRIVER 0
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
-#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
+#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
+#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
#define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_TDLS_STA_COUNT 4
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
index 0c294c9f98e9..c78a0c499459 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
@@ -453,6 +453,8 @@ enum scan_config_flags {
SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
+ SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22),
+ SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23),
/* Bits 26-31 are for num of channels in channel_array */
#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
@@ -486,6 +488,20 @@ enum iwl_channel_flags {
};
/**
+ * struct iwl_scan_dwell
+ * @active: default dwell time for active scan
+ * @passive: default dwell time for passive scan
+ * @fragmented: default dwell time for fragmented scan
+ * @extended: default dwell time for channels 1, 6 and 11
+ */
+struct iwl_scan_dwell {
+ u8 active;
+ u8 passive;
+ u8 fragmented;
+ u8 extended;
+} __packed;
+
+/**
* struct iwl_scan_config
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
@@ -493,10 +509,7 @@ enum iwl_channel_flags {
* @legacy_rates: default legacy rates - enum scan_config_rates
* @out_of_channel_time: default max out of serving channel time
* @suspend_time: default max suspend time
- * @dwell_active: default dwell time for active scan
- * @dwell_passive: default dwell time for passive scan
- * @dwell_fragmented: default dwell time for fragmented scan
- * @dwell_extended: default dwell time for channels 1, 6 and 11
+ * @dwell: dwells for the scan
* @mac_addr: default mac address to be used in probes
* @bcast_sta_id: the index of the station in the fw
* @channel_flags: default channel flags - enum iwl_channel_flags
@@ -510,16 +523,29 @@ struct iwl_scan_config {
__le32 legacy_rates;
__le32 out_of_channel_time;
__le32 suspend_time;
- u8 dwell_active;
- u8 dwell_passive;
- u8 dwell_fragmented;
- u8 dwell_extended;
+ struct iwl_scan_dwell dwell;
u8 mac_addr[ETH_ALEN];
u8 bcast_sta_id;
u8 channel_flags;
u8 channel_array[];
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
+#define SCAN_TWO_LMACS 2
+
+struct iwl_scan_config_cdb {
+ __le32 flags;
+ __le32 tx_chains;
+ __le32 rx_chains;
+ __le32 legacy_rates;
+ __le32 out_of_channel_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ struct iwl_scan_dwell dwell;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_sta_id;
+ u8 channel_flags;
+ u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
+
/**
* iwl_umac_scan_flags
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
@@ -540,17 +566,18 @@ enum iwl_umac_scan_uid_offsets {
};
enum iwl_umac_scan_general_flags {
- IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
- IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
- IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
- IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
- IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
- IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
- IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
- IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
- IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
- IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
- IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
+ IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
+ IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
+ IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
};
/**
@@ -610,8 +637,9 @@ struct iwl_scan_req_umac_tail {
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
- * @max_out_time: max out of serving channel time
- * @suspend_time: max suspend time
+ * @max_out_time: max out of serving channel time, per LMAC - for CDB there
+ * are 2 LMACs
+ * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
* @scan_priority: scan internal prioritization &enum iwl_scan_priority
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
@@ -631,15 +659,33 @@ struct iwl_scan_req_umac {
u8 active_dwell;
u8 passive_dwell;
u8 fragmented_dwell;
- __le32 max_out_time;
- __le32 suspend_time;
- __le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
- u8 channel_flags;
- u8 n_channels;
- __le16 reserved;
- u8 data[];
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ union {
+ struct {
+ __le32 max_out_time;
+ __le32 suspend_time;
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved;
+ u8 data[];
+ } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ struct {
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved;
+ u8 data[];
+ } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
+ };
+} __packed;
+
+#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(__le32))
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
index 4e638a44babb..6371c342b96d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
@@ -220,7 +220,7 @@ struct mvm_statistics_bt_activity {
__le32 lo_priority_rx_denied_cnt;
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
-struct mvm_statistics_general_v8 {
+struct mvm_statistics_general_common {
__le32 radio_temperature;
__le32 radio_voltage;
struct mvm_statistics_dbg dbg;
@@ -248,11 +248,22 @@ struct mvm_statistics_general_v8 {
__le64 on_time_rf;
__le64 on_time_scan;
__le64 tx_time;
+} __packed;
+
+struct mvm_statistics_general_v8 {
+ struct mvm_statistics_general_common common;
__le32 beacon_counter[NUM_MAC_INDEX];
u8 beacon_average_energy[NUM_MAC_INDEX];
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
+struct mvm_statistics_general_cdb {
+ struct mvm_statistics_general_common common;
+ __le32 beacon_counter[NUM_MAC_INDEX_CDB];
+ u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
+ u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
+
/**
* struct mvm_statistics_load - RX statistics for multi-queue devices
* @air_time: accumulated air time, per mac
@@ -267,6 +278,13 @@ struct mvm_statistics_load {
u8 avg_energy[IWL_MVM_STATION_COUNT];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
+struct mvm_statistics_load_cdb {
+ __le32 air_time[NUM_MAC_INDEX_CDB];
+ __le32 byte_count[NUM_MAC_INDEX_CDB];
+ __le32 pkt_count[NUM_MAC_INDEX_CDB];
+ u8 avg_energy[IWL_MVM_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_2 */
+
struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck;
@@ -281,6 +299,7 @@ struct mvm_statistics_rx {
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
* STATISTICS_CMD (0x9c), below.
*/
+
struct iwl_notif_statistics_v10 {
__le32 flag;
struct mvm_statistics_rx rx;
@@ -296,6 +315,14 @@ struct iwl_notif_statistics_v11 {
struct mvm_statistics_load load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
+struct iwl_notif_statistics_cdb {
+ __le32 flag;
+ struct mvm_statistics_rx rx;
+ struct mvm_statistics_tx tx;
+ struct mvm_statistics_general_cdb general;
+ struct mvm_statistics_load_cdb load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_12 */
+
#define IWL_STATISTICS_FLG_CLEAR 0x1
#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 59ca97a11b2b..b38cc073adcc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -672,8 +672,7 @@ struct iwl_mac_beacon_cmd_v6 {
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
/**
- * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
- * @tx: the tx commands associated with the beacon frame
+ * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA
* @template_id: currently equal to the mac context id of the coresponding
* mac.
* @tim_idx: the offset of the tim IE in the beacon
@@ -682,16 +681,38 @@ struct iwl_mac_beacon_cmd_v6 {
* @csa_offset: offset to the CSA IE if present
* @frame: the template of the beacon frame
*/
-struct iwl_mac_beacon_cmd {
- struct iwl_tx_cmd tx;
+struct iwl_mac_beacon_cmd_data {
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[0];
+};
+
+/**
+ * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
+ * @tx: the tx commands associated with the beacon frame
+ * @data: see &iwl_mac_beacon_cmd_data
+ */
+struct iwl_mac_beacon_cmd_v7 {
+ struct iwl_tx_cmd tx;
+ struct iwl_mac_beacon_cmd_data data;
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
+/**
+ * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
+ * @byte_cnt: byte count of the beacon frame
+ * @flags: for future use
+ * @data: see &iwl_mac_beacon_cmd_data
+ */
+struct iwl_mac_beacon_cmd {
+ __le16 byte_cnt;
+ __le16 flags;
+ __le64 reserved;
+ struct iwl_mac_beacon_cmd_data data;
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */
+
struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr;
__le64 tsf;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index ae12badc0c2a..cf2b836f3888 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids {
STORED_BEACON_NTF = 0xFF,
};
+enum iwl_regulatory_and_nvm_subcmd_ids {
+ NVM_ACCESS_COMPLETE = 0x0,
+};
+
enum iwl_fmac_debug_cmds {
LMAC_RD_WR = 0x0,
UMAC_RD_WR = 0x1,
@@ -355,6 +359,7 @@ enum {
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb,
+ REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
};
@@ -593,60 +598,7 @@ enum {
#define IWL_ALIVE_FLG_RFKILL BIT(0)
-struct mvm_alive_resp_ver1 {
- __le16 status;
- __le16 flags;
- u8 ucode_minor;
- u8 ucode_major;
- __le16 id;
- u8 api_minor;
- u8 api_major;
- u8 ver_subtype;
- u8 ver_type;
- u8 mac;
- u8 opt;
- __le16 reserved2;
- __le32 timestamp;
- __le32 error_event_table_ptr; /* SRAM address for error log */
- __le32 log_event_table_ptr; /* SRAM address for event log */
- __le32 cpu_register_ptr;
- __le32 dbgm_config_ptr;
- __le32 alive_counter_ptr;
- __le32 scd_base_ptr; /* SRAM address for SCD */
-} __packed; /* ALIVE_RES_API_S_VER_1 */
-
-struct mvm_alive_resp_ver2 {
- __le16 status;
- __le16 flags;
- u8 ucode_minor;
- u8 ucode_major;
- __le16 id;
- u8 api_minor;
- u8 api_major;
- u8 ver_subtype;
- u8 ver_type;
- u8 mac;
- u8 opt;
- __le16 reserved2;
- __le32 timestamp;
- __le32 error_event_table_ptr; /* SRAM address for error log */
- __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
- __le32 cpu_register_ptr;
- __le32 dbgm_config_ptr;
- __le32 alive_counter_ptr;
- __le32 scd_base_ptr; /* SRAM address for SCD */
- __le32 st_fwrd_addr; /* pointer to Store and forward */
- __le32 st_fwrd_size;
- u8 umac_minor; /* UMAC version: minor */
- u8 umac_major; /* UMAC version: major */
- __le16 umac_id; /* UMAC version: id */
- __le32 error_info_addr; /* SRAM address for UMAC error log */
- __le32 dbg_print_buff_addr;
-} __packed; /* ALIVE_RES_API_S_VER_2 */
-
-struct mvm_alive_resp {
- __le16 status;
- __le16 flags;
+struct iwl_lmac_alive {
__le32 ucode_minor;
__le32 ucode_major;
u8 ver_subtype;
@@ -662,12 +614,29 @@ struct mvm_alive_resp {
__le32 scd_base_ptr; /* SRAM address for SCD */
__le32 st_fwrd_addr; /* pointer to Store and forward */
__le32 st_fwrd_size;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwl_umac_alive {
__le32 umac_minor; /* UMAC version: minor */
__le32 umac_major; /* UMAC version: major */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
+} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+
+struct mvm_alive_resp_v3 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data;
+ struct iwl_umac_alive umac_data;
} __packed; /* ALIVE_RES_API_S_VER_3 */
+struct mvm_alive_resp {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+} __packed; /* ALIVE_RES_API_S_VER_4 */
+
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@@ -708,7 +677,6 @@ struct iwl_error_resp {
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
#define AUX_BINDING_INDEX (3)
-#define MAX_PHYS (4)
/* Used to extract ID and color from the context dword */
#define FW_CTXT_ID_POS (0)
@@ -1251,13 +1219,16 @@ struct iwl_missed_beacons_notif {
* @external_ver: external image version
* @status: MFUART loading status
* @duration: MFUART loading time
+ * @image_size: MFUART image size in bytes
*/
struct iwl_mfuart_load_notif {
__le32 installed_ver;
__le32 external_ver;
__le32 status;
__le32 duration;
-} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+ /* image size valid only in v2 of the command */
+ __le32 image_size;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
@@ -2075,7 +2046,7 @@ struct iwl_mu_group_mgmt_notif {
* @system_time: system time on air rise
* @tsf: TSF on air rise
* @beacon_timestamp: beacon on air rise
- * @phy_flags: general phy flags: band, modulation, etc.
+ * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition
* @channel: channel this beacon was received on
* @rates: rate in ucode internal format
* @byte_count: frame's byte count
@@ -2084,12 +2055,12 @@ struct iwl_stored_beacon_notif {
__le32 system_time;
__le64 tsf;
__le32 beacon_timestamp;
- __le16 phy_flags;
+ __le16 band;
__le16 channel;
__le32 rates;
__le32 byte_count;
u8 data[MAX_STORED_BEACON_SIZE];
-} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
@@ -2200,4 +2171,11 @@ struct iwl_dbg_mem_access_rsp {
__le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
+/**
+ * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+ */
+struct iwl_nvm_access_complete_cmd {
+ __le32 reserved;
+} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 2e8e3e8e30a3..a027b11bbdb3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -406,46 +406,63 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a02400, .end = 0x00a02758 },
};
-static u32 iwl_dump_prph(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data,
- const struct iwl_prph_range *iwl_prph_dump_addr,
- u32 range_len)
+static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+ u32 len_bytes, __le32 *data)
+{
+ u32 i;
+
+ for (i = 0; i < len_bytes; i += 4)
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
+}
+
+static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+ u32 len_bytes, __le32 *data)
+{
+ unsigned long flags;
+ bool success = false;
+
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
+ success = true;
+ _iwl_read_prph_block(trans, start, len_bytes, data);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
+
+ return success;
+}
+
+static void iwl_dump_prph(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ const struct iwl_prph_range *iwl_prph_dump_addr,
+ u32 range_len)
{
struct iwl_fw_error_dump_prph *prph;
unsigned long flags;
- u32 prph_len = 0, i;
+ u32 i;
if (!iwl_trans_grab_nic_access(trans, &flags))
- return 0;
+ return;
for (i = 0; i < range_len; i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
iwl_prph_dump_addr[i].start + 4;
- int reg;
- __le32 *val;
-
- prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
(*data)->len = cpu_to_le32(sizeof(*prph) +
num_bytes_in_chunk);
prph = (void *)(*data)->data;
prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
- val = (void *)prph->data;
- for (reg = iwl_prph_dump_addr[i].start;
- reg <= iwl_prph_dump_addr[i].end;
- reg += 4)
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(trans,
- reg));
+ _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
+ /* our range is inclusive, hence + 4 */
+ iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4,
+ (void *)prph->data);
*data = iwl_fw_error_next_data(*data);
}
iwl_trans_release_nic_access(trans, &flags);
-
- return prph_len;
}
/*
@@ -495,11 +512,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_mvm_dump_ptrs *fw_error_dump;
struct scatterlist *sg_dump_data;
u32 sram_len, sram_ofs;
- struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
- mvm->fw->dbg_mem_tlv;
+ const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv;
u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
- u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
- u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
+ u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len;
+ u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len;
bool monitor_dump_only = false;
int i;
@@ -624,10 +640,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
/* Make room for MEM segments */
- for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
- if (fw_dbg_mem[i])
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- le32_to_cpu(fw_dbg_mem[i]->len);
+ for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ le32_to_cpu(fw_dbg_mem[i].len);
}
/* Make room for fw's virtual image pages, if it exists */
@@ -656,7 +671,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
- if (!mvm->fw->dbg_dynamic_mem)
+ if (!mvm->fw->n_dbg_mem_tlv)
file_len += sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len);
@@ -708,7 +723,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (monitor_dump_only)
goto dump_trans_data;
- if (!mvm->fw->dbg_dynamic_mem) {
+ if (!mvm->fw->n_dbg_mem_tlv) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -719,22 +734,39 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_data = iwl_fw_error_next_data(dump_data);
}
- for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
- if (fw_dbg_mem[i]) {
- u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
- u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
-
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(len +
- sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = fw_dbg_mem[i]->data_type;
- dump_mem->offset = cpu_to_le32(ofs);
+ for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
+ u32 len = le32_to_cpu(fw_dbg_mem[i].len);
+ u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+ bool success;
+
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = fw_dbg_mem[i].data_type;
+ dump_mem->offset = cpu_to_le32(ofs);
+
+ switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
+ case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
iwl_trans_read_mem_bytes(mvm->trans, ofs,
dump_mem->data,
len);
- dump_data = iwl_fw_error_next_data(dump_data);
+ success = true;
+ break;
+ case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
+ success = iwl_read_prph_block(mvm->trans, ofs, len,
+ (void *)dump_mem->data);
+ break;
+ default:
+ /*
+ * shouldn't get here, we ignored this kind
+ * of TLV earlier during the TLV parsing?!
+ */
+ WARN_ON(1);
+ success = false;
}
+
+ if (success)
+ dump_data = iwl_fw_error_next_data(dump_data);
}
if (smem_len) {
@@ -779,12 +811,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
mvm->fw_paging_db[i].fw_paging_block;
+ dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
paging = (void *)dump_data->data;
paging->index = cpu_to_le32(i);
+ dma_sync_single_for_cpu(mvm->trans->dev, addr,
+ PAGING_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
dump_data = iwl_fw_error_next_data(dump_data);
@@ -816,11 +852,12 @@ dump_trans_data:
sg_nents(sg_dump_data),
fw_error_dump->op_mode_ptr,
fw_error_dump->op_mode_len, 0);
- sg_pcopy_from_buffer(sg_dump_data,
- sg_nents(sg_dump_data),
- fw_error_dump->trans_ptr->data,
- fw_error_dump->trans_ptr->len,
- fw_error_dump->op_mode_len);
+ if (fw_error_dump->trans_ptr)
+ sg_pcopy_from_buffer(sg_dump_data,
+ sg_nents(sg_dump_data),
+ fw_error_dump->trans_ptr->data,
+ fw_error_dump->trans_ptr->len,
+ fw_error_dump->op_mode_len);
dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len,
GFP_KERNEL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 872066317fa5..45cb4f476e76 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -190,7 +190,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
* CPU2 paging CSS
* CPU2 paging image (including instruction and data)
*/
- for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+ for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
sec_idx++;
break;
@@ -201,7 +201,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
* If paging is enabled there should be at least 2 more sections left
* (one for CSS and one for Paging data)
*/
- if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+ if (sec_idx >= image->num_sec - 1) {
IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
iwl_free_fw_paging(mvm);
return -EINVAL;
@@ -214,6 +214,10 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
mvm->fw_paging_db[0].fw_paging_size);
+ dma_sync_single_for_device(mvm->trans->dev,
+ mvm->fw_paging_db[0].fw_paging_phys,
+ mvm->fw_paging_db[0].fw_paging_size,
+ DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d CSS bytes to first block\n",
@@ -228,9 +232,16 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
* loop stop at num_of_paging_blk since that last block is not full.
*/
for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
- memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+ memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
- mvm->fw_paging_db[idx].fw_paging_size);
+ block->fw_paging_size);
+ dma_sync_single_for_device(mvm->trans->dev,
+ block->fw_paging_phys,
+ block->fw_paging_size,
+ DMA_BIDIRECTIONAL);
+
IWL_DEBUG_FW(mvm,
"Paging: copied %d paging bytes to block %d\n",
@@ -242,9 +253,15 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
/* copy the last paging block */
if (mvm->num_of_pages_in_last_blk > 0) {
- memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+ memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+ dma_sync_single_for_device(mvm->trans->dev,
+ block->fw_paging_phys,
+ block->fw_paging_size,
+ DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d pages in the last block %d\n",
@@ -259,9 +276,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
{
struct page *block;
dma_addr_t phys = 0;
- int blk_idx = 0;
- int order, num_of_pages;
- int dma_enabled;
+ int blk_idx, order, num_of_pages, size, dma_enabled;
if (mvm->fw_paging_db[0].fw_paging_block)
return 0;
@@ -272,9 +287,8 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
- mvm->num_of_paging_blk = ((num_of_pages - 1) /
- NUM_OF_PAGE_PER_GROUP) + 1;
-
+ mvm->num_of_paging_blk =
+ DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
mvm->num_of_pages_in_last_blk =
num_of_pages -
NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
@@ -284,46 +298,13 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
mvm->num_of_paging_blk,
mvm->num_of_pages_in_last_blk);
- /* allocate block of 4Kbytes for paging CSS */
- order = get_order(FW_PAGING_SIZE);
- block = alloc_pages(GFP_KERNEL, order);
- if (!block) {
- /* free all the previous pages since we failed */
- iwl_free_fw_paging(mvm);
- return -ENOMEM;
- }
-
- mvm->fw_paging_db[blk_idx].fw_paging_block = block;
- mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
-
- if (dma_enabled) {
- phys = dma_map_page(mvm->trans->dev, block, 0,
- PAGE_SIZE << order, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(mvm->trans->dev, phys)) {
- /*
- * free the previous pages and the current one since
- * we failed to map_page.
- */
- iwl_free_fw_paging(mvm);
- return -ENOMEM;
- }
- mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
- } else {
- mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
- blk_idx << BLOCK_2_EXP_SIZE;
- }
-
- IWL_DEBUG_FW(mvm,
- "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
- order);
-
/*
- * allocate blocks in dram.
- * since that CSS allocated in fw_paging_db[0] loop start from index 1
+ * Allocate CSS and paging blocks in dram.
*/
- for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
- /* allocate block of PAGING_BLOCK_SIZE (32K) */
- order = get_order(PAGING_BLOCK_SIZE);
+ for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+ /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
+ size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
+ order = get_order(size);
block = alloc_pages(GFP_KERNEL, order);
if (!block) {
/* free all the previous pages since we failed */
@@ -332,7 +313,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
}
mvm->fw_paging_db[blk_idx].fw_paging_block = block;
- mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+ mvm->fw_paging_db[blk_idx].fw_paging_size = size;
if (dma_enabled) {
phys = dma_map_page(mvm->trans->dev, block, 0,
@@ -353,9 +334,14 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
blk_idx << BLOCK_2_EXP_SIZE;
}
- IWL_DEBUG_FW(mvm,
- "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
- order);
+ if (!blk_idx)
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+ order);
+ else
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+ order);
}
return 0;
@@ -475,80 +461,60 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
- struct mvm_alive_resp_ver1 *palive1;
- struct mvm_alive_resp_ver2 *palive2;
+ struct mvm_alive_resp_v3 *palive3;
struct mvm_alive_resp *palive;
+ struct iwl_umac_alive *umac;
+ struct iwl_lmac_alive *lmac1;
+ struct iwl_lmac_alive *lmac2 = NULL;
+ u16 status;
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+ } else {
+ palive3 = (void *)pkt->data;
+ umac = &palive3->umac_data;
+ lmac1 = &palive3->lmac_data;
+ status = le16_to_cpu(palive3->status);
+ }
- if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
- palive1 = (void *)pkt->data;
+ mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
+ if (lmac2)
+ mvm->error_event_table[1] =
+ le32_to_cpu(lmac2->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
+ mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
+ mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
- mvm->support_umac_log = false;
- mvm->error_event_table =
- le32_to_cpu(palive1->error_event_table_ptr);
- mvm->log_event_table =
- le32_to_cpu(palive1->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
+ mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
- alive_data->valid = le16_to_cpu(palive1->status) ==
- IWL_ALIVE_STATUS_OK;
- IWL_DEBUG_FW(mvm,
- "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive1->status), palive1->ver_type,
- palive1->ver_subtype, palive1->flags);
- } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
- palive2 = (void *)pkt->data;
-
- mvm->error_event_table =
- le32_to_cpu(palive2->error_event_table_ptr);
- mvm->log_event_table =
- le32_to_cpu(palive2->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
- mvm->umac_error_event_table =
- le32_to_cpu(palive2->error_info_addr);
- mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
- mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
-
- alive_data->valid = le16_to_cpu(palive2->status) ==
- IWL_ALIVE_STATUS_OK;
- if (mvm->umac_error_event_table)
- mvm->support_umac_log = true;
+ alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
+ alive_data->valid = status == IWL_ALIVE_STATUS_OK;
+ if (mvm->umac_error_event_table)
+ mvm->support_umac_log = true;
- IWL_DEBUG_FW(mvm,
- "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive2->status), palive2->ver_type,
- palive2->ver_subtype, palive2->flags);
+ IWL_DEBUG_FW(mvm,
+ "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ status, lmac1->ver_type, lmac1->ver_subtype);
- IWL_DEBUG_FW(mvm,
- "UMAC version: Major - 0x%x, Minor - 0x%x\n",
- palive2->umac_major, palive2->umac_minor);
- } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
- palive = (void *)pkt->data;
+ if (lmac2)
+ IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
- mvm->error_event_table =
- le32_to_cpu(palive->error_event_table_ptr);
- mvm->log_event_table =
- le32_to_cpu(palive->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
- mvm->umac_error_event_table =
- le32_to_cpu(palive->error_info_addr);
- mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
- mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
-
- alive_data->valid = le16_to_cpu(palive->status) ==
- IWL_ALIVE_STATUS_OK;
- if (mvm->umac_error_event_table)
- mvm->support_umac_log = true;
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ le32_to_cpu(umac->umac_major),
+ le32_to_cpu(umac->umac_minor));
- IWL_DEBUG_FW(mvm,
- "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive->status), palive->ver_type,
- palive->ver_subtype, palive->flags);
+ return true;
+}
- IWL_DEBUG_FW(mvm,
- "UMAC version: Major - 0x%x, Minor - 0x%x\n",
- le32_to_cpu(palive->umac_major),
- le32_to_cpu(palive->umac_minor));
- }
+static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
return true;
}
@@ -568,6 +534,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
+static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
+{
+ const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
+ int ret;
+
+ /*
+ * Configure and operate fw paging mechanism.
+ * The driver configures the paging flow only once.
+ * The CPU2 paging image is included in the IWL_UCODE_INIT image.
+ */
+ if (!fw->paging_mem_size)
+ return 0;
+
+ /*
+ * When dma is not enabled, the driver needs to copy / write
+ * the downloaded / uploaded page to / from the smem.
+ * This gets the location of the place were the pages are
+ * stored.
+ */
+ if (!is_device_dma_capable(mvm->trans->dev)) {
+ ret = iwl_trans_get_paging_item(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "failed to get FW paging item\n");
+ return ret;
+ }
+ }
+
+ ret = iwl_save_fw_paging(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to save the FW paging image\n");
+ return ret;
+ }
+
+ ret = iwl_send_paging_cmd(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to send the paging cmd\n");
+ iwl_free_fw_paging(mvm);
+ return ret;
+ }
+
+ return 0;
+}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@@ -639,40 +647,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
- * configure and operate fw paging mechanism.
- * driver configures the paging flow only once, CPU2 paging image
- * included in the IWL_UCODE_INIT image.
- */
- if (fw->paging_mem_size) {
- /*
- * When dma is not enabled, the driver needs to copy / write
- * the downloaded / uploaded page to / from the smem.
- * This gets the location of the place were the pages are
- * stored.
- */
- if (!is_device_dma_capable(mvm->trans->dev)) {
- ret = iwl_trans_get_paging_item(mvm);
- if (ret) {
- IWL_ERR(mvm, "failed to get FW paging item\n");
- return ret;
- }
- }
-
- ret = iwl_save_fw_paging(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to save the FW paging image\n");
- return ret;
- }
-
- ret = iwl_send_paging_cmd(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to send the paging cmd\n");
- iwl_free_fw_paging(mvm);
- return ret;
- }
- }
-
- /*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
* could be stopped, so wake them up. In firmware restart,
@@ -829,6 +803,75 @@ out:
return ret;
}
+int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_nvm_access_complete_cmd nvm_complete = {};
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_init_complete,
+ NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ /* TODO: remove when integrating context info */
+ ret = iwl_mvm_init_paging(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to init paging: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (read_nvm) {
+ /* Read nvm */
+ ret = iwl_nvm_init(mvm, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (mvm->nvm_file_name)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
+ ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+ if (WARN_ON(ret))
+ goto error;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ NVM_ACCESS_COMPLETE), 0,
+ sizeof(nvm_complete), &nvm_complete);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* We wait for the INIT complete notification */
+ return iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &init_wait);
+ return ret;
+}
+
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1089,23 +1132,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
return ret;
}
-int iwl_mvm_up(struct iwl_mvm *mvm)
+static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
{
- int ret, i;
- struct ieee80211_channel *chan;
- struct cfg80211_chan_def chandef;
-
- lockdep_assert_held(&mvm->mutex);
+ int ret;
- ret = iwl_trans_start_hw(mvm->trans);
- if (ret)
- return ret;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_run_unified_mvm_ucode(mvm, false);
- /*
- * If we haven't completed the run of the init ucode during
- * module loading, load init ucode now
- * (for example, if we were in RFKILL)
- */
ret = iwl_run_init_mvm_ucode(mvm, false);
if (iwlmvm_mod_params.init_dbg)
@@ -1116,7 +1149,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
- goto error;
+ return ret;
}
/*
@@ -1127,9 +1160,28 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
_iwl_trans_stop_device(mvm->trans, false);
ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
- goto error;
+ return ret;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_init_paging(mvm);
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+ int ret, i;
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
goto error;
@@ -1156,13 +1208,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Send phy db control command and then phy db calibration*/
- ret = iwl_send_phy_db_data(mvm->phy_db);
- if (ret)
- goto error;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
- ret = iwl_send_phy_cfg_cmd(mvm);
- if (ret)
- goto error;
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+ }
/* Init RSS configuration */
if (iwl_mvm_has_new_rx_api(mvm)) {
@@ -1348,4 +1402,9 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: image size: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->image_size));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 4a0874e40731..99132ea16ede 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -531,38 +531,26 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ /*
+ * If DQA is supported - queues were already disabled, since in
+ * DQA-mode the queues are a property of the STA and not of the
+ * vif, and at this point the STA was already deleted
+ */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ return;
+
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- if (!iwl_mvm_is_dqa_supported(mvm))
- iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MAX_TID_COUNT, 0);
- else
- iwl_mvm_disable_txq(mvm,
- IWL_MVM_DQA_P2P_DEVICE_QUEUE,
- vif->hw_queue[0], IWL_MAX_TID_COUNT,
- 0);
+ iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MAX_TID_COUNT, 0);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
-
- if (iwl_mvm_is_dqa_supported(mvm))
- iwl_mvm_disable_txq(mvm,
- IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
- vif->hw_queue[0], IWL_MAX_TID_COUNT,
- 0);
/* fall through */
default:
- /*
- * If DQA is supported - queues were already disabled, since in
- * DQA-mode the queues are a property of the STA and not of the
- * vif, and at this point the STA was already deleted
- */
- if (iwl_mvm_is_dqa_supported(mvm))
- break;
-
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
@@ -991,7 +979,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
}
static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
- struct iwl_mac_beacon_cmd_v6 *beacon_cmd,
+ __le32 *tim_index, __le32 *tim_size,
u8 *beacon, u32 frame_size)
{
u32 tim_idx;
@@ -1008,8 +996,8 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
/* If TIM field was found, set variables */
if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
- beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
- beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
+ *tim_index = cpu_to_le32(tim_idx);
+ *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]);
} else {
IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
}
@@ -1043,8 +1031,9 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
};
union {
struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
- struct iwl_mac_beacon_cmd beacon_cmd;
+ struct iwl_mac_beacon_cmd_v7 beacon_cmd;
} u = {};
+ struct iwl_mac_beacon_cmd beacon_cmd;
struct ieee80211_tx_info *info;
u32 beacon_skb_len;
u32 rate, tx_flags;
@@ -1054,6 +1043,46 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
beacon_skb_len = beacon->len;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
+ u32 csa_offset, ecsa_offset;
+
+ csa_offset = iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon_skb_len);
+ ecsa_offset =
+ iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon_skb_len);
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ beacon_cmd.data.template_id =
+ cpu_to_le32((u32)mvmvif->id);
+ beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset);
+ beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
+ beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len);
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm,
+ &beacon_cmd.data.tim_idx,
+ &beacon_cmd.data.tim_size,
+ beacon->data,
+ beacon_skb_len);
+ cmd.len[0] = sizeof(beacon_cmd);
+ cmd.data[0] = &beacon_cmd;
+ goto send;
+
+ } else {
+ u.beacon_cmd.data.ecsa_offset =
+ cpu_to_le32(ecsa_offset);
+ u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
+ cmd.len[0] = sizeof(u.beacon_cmd);
+ cmd.data[0] = &u;
+ }
+ } else {
+ cmd.len[0] = sizeof(u.beacon_cmd_v6);
+ cmd.data[0] = &u;
+ }
+
/* TODO: for now the beacon template id is set to be the mac context id.
* Might be better to handle it as another resource ... */
u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id);
@@ -1092,29 +1121,13 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
/* Set up TX beacon command fields */
if (vif->type == NL80211_IFTYPE_AP)
- iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6,
+ iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx,
+ &u.beacon_cmd_v6.tim_size,
beacon->data,
beacon_skb_len);
+send:
/* Submit command */
-
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
- u.beacon_cmd.csa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_CHANNEL_SWITCH,
- beacon_skb_len));
- u.beacon_cmd.ecsa_offset =
- cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
- WLAN_EID_EXT_CHANSWITCH_ANN,
- beacon_skb_len));
-
- cmd.len[0] = sizeof(u.beacon_cmd);
- } else {
- cmd.len[0] = sizeof(u.beacon_cmd_v6);
- }
-
- cmd.data[0] = &u;
cmd.dataflags[0] = 0;
cmd.len[1] = beacon_skb_len;
cmd.data[1] = beacon->data;
@@ -1565,7 +1578,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
rx_status.device_timestamp = le32_to_cpu(sb->system_time);
rx_status.band =
- (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 45122dafe922..d37b1695c64e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -463,6 +463,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+ /* this is the case for CCK frames, it's better (only 8) for OFDM */
+ hw->radiotap_timestamp.accuracy = 22;
+
hw->rate_control_algorithm = "iwl-mvm-rs";
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -670,7 +677,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->wowlan = &mvm->wowlan;
}
- if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
@@ -1203,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
- iwl_free_fw_paging(mvm);
-
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
@@ -2003,16 +2008,16 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
- } else if (changes & BSS_CHANGED_BEACON_INFO) {
+ }
+
+ if (changes & BSS_CHANGED_BEACON_INFO) {
/*
- * We received a beacon _after_ association so
+ * We received a beacon from the associated AP so
* remove the session protection.
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
- }
- if (changes & BSS_CHANGED_BEACON_INFO) {
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
}
@@ -2099,22 +2104,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unbind;
- /* enable the multicast queue, now that we have a station for it */
- if (iwl_mvm_is_dqa_supported(mvm)) {
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_MCAST,
- .sta_id = mvmvif->bcast_sta.sta_id,
- .tid = IWL_MAX_TID_COUNT,
- .aggregate = false,
- .frame_limit = IWL_FRAME_LIMIT,
- };
-
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
- &cfg, wdg_timeout);
- }
-
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
@@ -2547,6 +2536,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int ret;
IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
@@ -2575,8 +2565,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST &&
iwl_mvm_is_dqa_supported(mvm)) {
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk);
@@ -2587,6 +2575,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
}
mutex_lock(&mvm->mutex);
+ /* track whether or not the station is associated */
+ mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC;
+
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
/*
@@ -2636,11 +2627,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
+
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ true);
ret = iwl_mvm_update_sta(mvm, vif, sta);
- if (ret == 0)
- iwl_mvm_rs_rate_init(mvm, sta,
- mvmvif->phy_ctxt->channel->band,
- true);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4a9cb76b7611..73a216524af2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -739,8 +739,9 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
+ bool hw_registered;
bool calibrating;
- u32 error_event_table;
+ u32 error_event_table[2];
u32 log_event_table;
u32 umac_error_event_table;
bool support_umac_log;
@@ -1217,6 +1218,19 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
return mvm->trans->cfg->use_tfh;
}
+static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
+{
+ /*
+ * TODO:
+ * The issue of how to determine CDB support is still not well defined.
+ * It may be that it will be for all next HW devices and it may be per
+ * FW compilation and it may also differ between different devices.
+ * For now take a ride on the new TX API and get back to it when
+ * it is well defined.
+ */
+ return iwl_mvm_has_new_tx_api(mvm);
+}
+
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{
#ifdef CONFIG_THERMAL
@@ -1257,6 +1271,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
******************/
/* uCode */
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
+int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
@@ -1657,8 +1672,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
* Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
*/
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags);
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
/* Return a bitmask with all the hw supported queues, except for the
@@ -1686,6 +1701,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
+ iwl_free_fw_paging(mvm);
mvm->ucode_loaded = false;
iwl_trans_stop_device(mvm->trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index f14aada390c5..4cd72d4cdc47 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF),
};
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
+ HCMD_NAME(NVM_ACCESS_COMPLETE),
+};
+
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
@@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
+ [REGULATORY_AND_NVM_GROUP] =
+ HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
};
/* this forward declaration can avoid to export the function */
@@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
mvm->sf_state = SF_UNINIT;
- mvm->cur_ucode = IWL_UCODE_INIT;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ mvm->cur_ucode = IWL_UCODE_REGULAR;
+ else
+ mvm->cur_ucode = IWL_UCODE_INIT;
mvm->drop_bcn_ap_mode = true;
mutex_init(&mvm->mutex);
@@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
- err = iwl_run_init_mvm_ucode(mvm, true);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ err = iwl_run_unified_mvm_ucode(mvm, true);
+ else
+ err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
err = iwl_mvm_mac_setup_register(mvm);
if (err)
goto out_free;
+ mvm->hw_registered = true;
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_thermal_initialize(mvm, min_backoff);
@@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_unregister:
ieee80211_unregister_hw(mvm->hw);
+ mvm->hw_registered = false;
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
@@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
reprobe->dev = mvm->trans->dev;
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
- } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
+ } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
+ mvm->hw_registered) {
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index af6d10c23e5a..e684811f8e8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -174,6 +174,14 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac;
bool tid_found = false;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+ if (mvmvif->dbgfs_pm.use_ps_poll) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ return;
+ }
+#endif
+
for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
if (!mvmvif->queue_params[ac].uapsd)
continue;
@@ -204,16 +212,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
}
}
- if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* set advanced pm flag with no uapsd ACs to enable ps-poll */
- if (mvmvif->dbgfs_pm.use_ps_poll)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-#endif
- return;
- }
-
cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
@@ -601,9 +599,8 @@ static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool *disable_ps = _data;
- if (mvmvif->phy_ctxt)
- if (mvmvif->phy_ctxt->id < MAX_PHYS)
- *disable_ps |= mvmvif->ps_disabled;
+ if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+ *disable_ps |= mvmvif->ps_disabled;
}
static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
@@ -611,6 +608,7 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_power_vifs *power_iterator = _data;
+ bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
@@ -621,34 +619,30 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
/* only a single MAC of the same type */
WARN_ON(power_iterator->ap_vif);
power_iterator->ap_vif = vif;
- if (mvmvif->phy_ctxt)
- if (mvmvif->phy_ctxt->id < MAX_PHYS)
- power_iterator->ap_active = true;
+ if (active)
+ power_iterator->ap_active = true;
break;
case NL80211_IFTYPE_MONITOR:
/* only a single MAC of the same type */
WARN_ON(power_iterator->monitor_vif);
power_iterator->monitor_vif = vif;
- if (mvmvif->phy_ctxt)
- if (mvmvif->phy_ctxt->id < MAX_PHYS)
- power_iterator->monitor_active = true;
+ if (active)
+ power_iterator->monitor_active = true;
break;
case NL80211_IFTYPE_P2P_CLIENT:
/* only a single MAC of the same type */
WARN_ON(power_iterator->p2p_vif);
power_iterator->p2p_vif = vif;
- if (mvmvif->phy_ctxt)
- if (mvmvif->phy_ctxt->id < MAX_PHYS)
- power_iterator->p2p_active = true;
+ if (active)
+ power_iterator->p2p_active = true;
break;
case NL80211_IFTYPE_STATION:
power_iterator->bss_vif = vif;
- if (mvmvif->phy_ctxt)
- if (mvmvif->phy_ctxt->id < MAX_PHYS)
- power_iterator->bss_active = true;
+ if (active)
+ power_iterator->bss_active = true;
break;
default:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 227c5ed9cbe6..ce907c58ebf6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -161,9 +161,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- struct iwl_mvm_sta *mvmsta;
- struct iwl_mvm_vif *mvmvif;
-
if (!sta->ht_cap.ht_supported)
return false;
@@ -176,9 +173,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
return false;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
if (mvm->nvm_data->sku_cap_mimo_disabled)
return false;
@@ -978,7 +972,9 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
/* Find the previous rate that is in the rate mask */
i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (i >= 0)
+ mask = BIT(i);
+ for (; i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;
@@ -3071,7 +3067,7 @@ static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
{
- u8 nss = 0, mcs = 0;
+ u8 nss = 0;
spin_lock(&mvm->drv_stats_lock);
@@ -3099,11 +3095,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
if (rate & RATE_MCS_HT_MSK) {
mvm->drv_rx_stats.ht_frames++;
- mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
} else if (rate & RATE_MCS_VHT_MSK) {
mvm->drv_rx_stats.vht_frames++;
- mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
} else {
@@ -3624,6 +3618,8 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
} else if (rate & RATE_MCS_HT_MSK) {
type = "HT";
mcs = rate & RATE_HT_MCS_INDEX_MSK;
+ nss = ((rate & RATE_HT_MCS_NSS_MSK)
+ >> RATE_HT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 0e60e38b2acf..20473df79c94 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -497,8 +497,7 @@ struct iwl_mvm_stat_data {
struct iwl_mvm *mvm;
__le32 mac_id;
u8 beacon_filter_average_energy;
- struct mvm_statistics_general_v8 *general;
- struct mvm_statistics_load *load;
+ void *general;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -518,10 +517,26 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
* the notification directly.
*/
if (data->general) {
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
- mvmvif->beacon_stats.avg_signal =
- -data->general->beacon_average_energy[mvmvif->id];
+ u16 vif_id = mvmvif->id;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ struct mvm_statistics_general_cdb *general =
+ data->general;
+
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(general->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -general->beacon_average_energy[vif_id];
+ } else {
+ struct mvm_statistics_general_v8 *general =
+ data->general;
+
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(general->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -general->beacon_average_energy[vif_id];
+ }
+
}
if (mvmvif->id != id)
@@ -571,6 +586,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
ieee80211_cqm_rssi_notify(
vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ sig,
GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
@@ -580,6 +596,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
ieee80211_cqm_rssi_notify(
vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ sig,
GFP_KERNEL);
}
}
@@ -615,48 +632,65 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
+ struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
- int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) :
- sizeof(struct iwl_notif_statistics_v10);
- u32 temperature;
+ int expected_size;
+
+ if (iwl_mvm_is_cdb_supported(mvm))
+ expected_size = sizeof(*stats);
+ else if (iwl_mvm_has_new_rx_api(mvm))
+ expected_size = sizeof(struct iwl_notif_statistics_v11);
+ else
+ expected_size = sizeof(struct iwl_notif_statistics_v10);
if (iwl_rx_packet_payload_len(pkt) != expected_size)
goto invalid;
- temperature = le32_to_cpu(stats->general.radio_temperature);
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
- stats->general.beacon_filter_average_energy;
+ stats->general.common.beacon_filter_average_energy;
iwl_mvm_update_rx_statistics(mvm, &stats->rx);
- mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
- mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
mvm->radio_stats.on_time_rf =
- le64_to_cpu(stats->general.on_time_rf);
+ le64_to_cpu(stats->general.common.on_time_rf);
mvm->radio_stats.on_time_scan =
- le64_to_cpu(stats->general.on_time_scan);
+ le64_to_cpu(stats->general.common.on_time_scan);
data.general = &stats->general;
if (iwl_mvm_has_new_rx_api(mvm)) {
int i;
-
- data.load = &stats->load_stats;
+ u8 *energy;
+ __le32 *bytes, *air_time;
+
+ if (!iwl_mvm_is_cdb_supported(mvm)) {
+ struct iwl_notif_statistics_v11 *v11 =
+ (void *)&pkt->data;
+
+ energy = (void *)&v11->load_stats.avg_energy;
+ bytes = (void *)&v11->load_stats.byte_count;
+ air_time = (void *)&v11->load_stats.air_time;
+ } else {
+ energy = (void *)&stats->load_stats.avg_energy;
+ bytes = (void *)&stats->load_stats.byte_count;
+ air_time = (void *)&stats->load_stats.air_time;
+ }
rcu_read_lock();
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
struct iwl_mvm_sta *sta;
- if (!data.load->avg_energy[i])
+ if (!energy[i])
continue;
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
if (!sta)
continue;
- sta->avg_energy = data.load->avg_energy[i];
+ sta->avg_energy = energy[i];
}
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 6c802cee900c..d79e9c2a2654 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -149,8 +149,17 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
unsigned int headlen, fraglen, pad_len = 0;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
- if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
pad_len = 2;
+
+ /*
+ * If the device inserted padding it means that (it thought)
+ * the 802.11 header wasn't a multiple of 4 bytes long. In
+ * this case, reserve two bytes at the start of the SKB to
+ * align the payload properly in case we end up copying it.
+ */
+ skb_reserve(skb, pad_len);
+ }
len -= pad_len;
/* If frame is small enough to fit in skb->head, pull it completely.
@@ -409,7 +418,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
/* ignore nssn smaller than head sn - this can happen due to timeout */
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
- return;
+ goto set_timer;
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
int index = ssn % reorder_buf->buf_size;
@@ -432,6 +441,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
}
reorder_buf->head_sn = nssn;
+set_timer:
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index fa9743205491..0a64efa844b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -197,7 +197,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
int *global_cnt = data;
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
- mvmvif->phy_ctxt->id < MAX_PHYS)
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX)
*global_cnt += 1;
}
@@ -943,18 +943,92 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
return cpu_to_le32(rates);
}
-int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_dwell *dwell,
+ struct iwl_mvm_scan_timing_params *timing)
+{
+ dwell->active = timing->dwell_active;
+ dwell->passive = timing->dwell_passive;
+ dwell->fragmented = timing->dwell_fragmented;
+ dwell->extended = timing->dwell_extended;
+}
+
+static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
{
- struct iwl_scan_config *scan_config;
struct ieee80211_supported_band *band;
- int num_channels =
- mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
- mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
- int ret, i, j = 0, cmd_size;
+ int i, j = 0;
+
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ channels[j] = band->channels[i].hw_value;
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ channels[j] = band->channels[i].hw_value;
+}
+
+static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
+{
+ enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ struct iwl_scan_config *cfg = config;
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+ cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+
+ memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg->channel_flags = channel_flags;
+
+ iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
+{
+ enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ struct iwl_scan_config_cdb *cfg = config;
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+ cfg->out_of_channel_time[0] =
+ cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->out_of_channel_time[1] =
+ cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
+ cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+
+ memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg->channel_flags = channel_flags;
+
+ iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+ void *cfg;
+ int ret, cmd_size;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
};
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ int num_channels =
+ mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
+ u32 flags;
+ u8 channel_flags;
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
@@ -965,52 +1039,45 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return 0;
}
- cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
+ if (iwl_mvm_is_cdb_supported(mvm))
+ cmd_size = sizeof(struct iwl_scan_config_cdb);
+ else
+ cmd_size = sizeof(struct iwl_scan_config);
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
- scan_config = kzalloc(cmd_size, GFP_KERNEL);
- if (!scan_config)
+ cfg = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cfg)
return -ENOMEM;
- scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
- SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
- SCAN_CONFIG_FLAG_SET_TX_CHAINS |
- SCAN_CONFIG_FLAG_SET_RX_CHAINS |
- SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
- SCAN_CONFIG_FLAG_SET_ALL_TIMES |
- SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
- SCAN_CONFIG_FLAG_SET_MAC_ADDR |
- SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
- SCAN_CONFIG_N_CHANNELS(num_channels) |
- (type == IWL_SCAN_TYPE_FRAGMENTED ?
- SCAN_CONFIG_FLAG_SET_FRAGMENTED :
- SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED));
- scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
- scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
- scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
- scan_config->out_of_channel_time =
- cpu_to_le32(scan_timing[type].max_out_time);
- scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
- scan_config->dwell_active = scan_timing[type].dwell_active;
- scan_config->dwell_passive = scan_timing[type].dwell_passive;
- scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented;
- scan_config->dwell_extended = scan_timing[type].dwell_extended;
-
- memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
-
- scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
- scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
- IWL_CHANNEL_FLAG_ACCURATE_EBS |
- IWL_CHANNEL_FLAG_EBS_ADD |
- IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
-
- band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
- for (i = 0; i < band->n_channels; i++, j++)
- scan_config->channel_array[j] = band->channels[i].hw_value;
- band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
- for (i = 0; i < band->n_channels; i++, j++)
- scan_config->channel_array[j] = band->channels[i].hw_value;
+ flags = SCAN_CONFIG_FLAG_ACTIVATE |
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
+ SCAN_CONFIG_N_CHANNELS(num_channels) |
+ (type == IWL_SCAN_TYPE_FRAGMENTED ?
+ SCAN_CONFIG_FLAG_SET_FRAGMENTED :
+ SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
+
+ channel_flags = IWL_CHANNEL_FLAG_EBS |
+ IWL_CHANNEL_FLAG_ACCURATE_EBS |
+ IWL_CHANNEL_FLAG_EBS_ADD |
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
+ SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
+ SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
+ iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
+ } else {
+ iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+ }
- cmd.data[0] = scan_config;
+ cmd.data[0] = cfg;
cmd.len[0] = cmd_size;
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
@@ -1020,7 +1087,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (!ret)
mvm->scan_type = type;
- kfree(scan_config);
+ kfree(cfg);
return ret;
}
@@ -1039,19 +1106,31 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
+ struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
+
if (params->measurement_dwell) {
cmd->active_dwell = params->measurement_dwell;
cmd->passive_dwell = params->measurement_dwell;
cmd->extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = scan_timing[params->type].dwell_active;
- cmd->passive_dwell = scan_timing[params->type].dwell_passive;
- cmd->extended_dwell = scan_timing[params->type].dwell_extended;
+ cmd->active_dwell = timing->dwell_active;
+ cmd->passive_dwell = timing->dwell_passive;
+ cmd->extended_dwell = timing->dwell_extended;
+ }
+ cmd->fragmented_dwell = timing->dwell_fragmented;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
+ cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
+ cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
+ cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ } else {
+ cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
+ cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
+ cmd->no_cdb.scan_priority =
+ cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
- cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
- cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
- cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
- cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_is_regular_scan(params))
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
@@ -1063,9 +1142,8 @@ static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
int n_channels, u32 ssid_bitmap,
- struct iwl_scan_req_umac *cmd)
+ struct iwl_scan_channel_cfg_umac *channel_cfg)
{
- struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
int i;
for (i = 0; i < n_channels; i++) {
@@ -1088,8 +1166,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED) {
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
+ if (iwl_mvm_is_cdb_supported(mvm))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
+ }
if (iwl_mvm_rrm_scan_needed(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
@@ -1126,11 +1207,14 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
+ void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
+ (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
+ struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
int uid, i;
u32 ssid_bitmap = 0;
+ u8 channel_flags = 0;
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
@@ -1157,16 +1241,23 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
if (iwl_mvm_scan_use_ebs(mvm, vif))
- cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+ channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- cmd->n_channels = params->n_channels;
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->cdb.channel_flags = channel_flags;
+ cmd->cdb.n_channels = params->n_channels;
+ } else {
+ cmd->no_cdb.channel_flags = channel_flags;
+ cmd->no_cdb.n_channels = params->n_channels;
+ }
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
- params->n_channels, ssid_bitmap, cmd);
+ params->n_channels, ssid_bitmap,
+ cmd_data);
for (i = 0; i < params->n_scan_plans; i++) {
struct cfg80211_sched_scan_plan *scan_plan =
@@ -1601,8 +1692,13 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
+ int base_size = IWL_SCAN_REQ_UMAC_SIZE;
+
+ if (iwl_mvm_is_cdb_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
+
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- return sizeof(struct iwl_scan_req_umac) +
+ return base_size +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_req_umac_tail);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 636c8b03e318..bd1dcc863d8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -202,7 +202,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
add_sta_cmd.station_flags |=
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
- add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
+ if (mvm_sta->associated)
+ add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
if (sta->wme) {
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
@@ -454,14 +455,53 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
rcu_read_unlock();
+ return disable_agg_tids;
+}
+
+static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
+ bool same_sta)
+{
+ struct iwl_mvm_sta *mvmsta;
+ u8 txq_curr_ac, sta_id, tid;
+ unsigned long disable_agg_tids = 0;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
spin_lock_bh(&mvm->queue_info_lock);
- /* Unmap MAC queues and TIDs from this queue */
- mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
- mvm->queue_info[queue].hw_queue_refcount = 0;
- mvm->queue_info[queue].tid_bitmap = 0;
+ txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
- return disable_agg_tids;
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+ disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
+ /* Disable the queue */
+ if (disable_agg_tids)
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
+
+ ret = iwl_mvm_disable_txq(mvm, queue,
+ mvmsta->vif->hw_queue[txq_curr_ac],
+ tid, 0);
+ if (ret) {
+ /* Re-mark the inactive queue as inactive */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm,
+ "Failed to free inactive queue %d (ret=%d)\n",
+ queue, ret);
+
+ return ret;
+ }
+
+ /* If TXQ is allocated to another STA, update removal in FW */
+ if (!same_sta)
+ iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
+
+ return 0;
}
static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
@@ -652,7 +692,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
- bool using_inactive_queue = false;
+ bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
bool shared_queue = false;
@@ -709,6 +749,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
using_inactive_queue = true;
+ same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
IWL_DEBUG_TX_QUEUES(mvm,
"Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
queue, mvmsta->sta_id, tid);
@@ -755,44 +796,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
* first
*/
if (using_inactive_queue) {
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_DISABLE_QUEUE,
- };
- u8 txq_curr_ac;
-
- disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
-
- spin_lock_bh(&mvm->queue_info_lock);
- txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
- cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
- cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
- cmd.tid = mvm->queue_info[queue].txq_tid;
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* Disable the queue */
- if (disable_agg_tids)
- iwl_mvm_invalidate_sta_queue(mvm, queue,
- disable_agg_tids, false);
- iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
- &cmd);
- if (ret) {
- IWL_ERR(mvm,
- "Failed to free inactive queue %d (ret=%d)\n",
- queue, ret);
-
- /* Re-mark the inactive queue as inactive */
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
- spin_unlock_bh(&mvm->queue_info_lock);
-
+ ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+ if (ret)
return ret;
- }
-
- /* If TXQ is allocated to another STA, update removal in FW */
- if (cmd.sta_id != mvmsta->sta_id)
- iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
}
IWL_DEBUG_TX_QUEUES(mvm,
@@ -868,7 +874,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
.scd_queue = queue,
.action = SCD_CFG_UPDATE_QUEUE_TID,
};
- s8 sta_id;
int tid;
unsigned long tid_bitmap;
int ret;
@@ -876,7 +881,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
- sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -1110,6 +1114,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
+ bool using_inactive_queue = false, same_sta = false;
/*
* Check for inactive queues, so we don't reach a situation where we
@@ -1133,6 +1138,14 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "No available queues for new station\n");
return -ENOSPC;
+ } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+ /*
+ * If this queue is already allocated but inactive we'll need to
+ * first free this queue before enabling it again, we'll mark
+ * it as reserved to make sure no new traffic arrives on it
+ */
+ using_inactive_queue = true;
+ same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
}
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
@@ -1140,6 +1153,9 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
mvmsta->reserved_queue = queue;
+ if (using_inactive_queue)
+ iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
queue, mvmsta->sta_id);
@@ -1164,9 +1180,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
.frame_limit = IWL_FRAME_LIMIT,
};
- /* Make sure reserved queue is still marked as such (or allocated) */
- mvm->queue_info[mvm_sta->reserved_queue].status =
- IWL_MVM_QUEUE_RESERVED;
+ /* Make sure reserved queue is still marked as such (if allocated) */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+ mvm->queue_info[mvm_sta->reserved_queue].status =
+ IWL_MVM_QUEUE_RESERVED;
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
@@ -1485,6 +1502,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ u8 sta_id = mvm_sta->sta_id;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1493,7 +1511,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
kfree(mvm_sta->dup_data);
if ((vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id == mvm_sta->sta_id) ||
+ mvmvif->ap_sta_id == sta_id) ||
iwl_mvm_is_dqa_supported(mvm)){
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
@@ -1509,8 +1527,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+ /*
+ * If pending_frames is set at this point - it must be
+ * driver internal logic error, since queues are empty
+ * and removed successuly.
+ * warn on it but set it to 0 anyway to avoid station
+ * not being removed later in the function
+ */
+ WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
+ }
/* If there is a TXQ still marked as reserved - free it */
if (iwl_mvm_is_dqa_supported(mvm) &&
@@ -1528,7 +1555,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d",
- mvm_sta->sta_id, reserved_txq, *status)) {
+ sta_id, reserved_txq, *status)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -EINVAL;
}
@@ -1538,7 +1565,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
}
if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id == mvm_sta->sta_id) {
+ mvmvif->ap_sta_id == sta_id) {
/* if associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@@ -1547,7 +1574,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* clear d0i3_ap_sta_id if no longer relevant */
- if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+ if (mvm->d0i3_ap_sta_id == sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
}
@@ -1556,7 +1583,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* This shouldn't happen - the TDLS channel switch should be canceled
* before the STA is removed.
*/
- if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
+ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
@@ -1566,21 +1593,20 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* calls the drain worker.
*/
spin_lock_bh(&mvm_sta->lock);
+
/*
* There are frames pending on the AC queues for this station.
* We need to wait until all the frames are drained...
*/
- if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
- rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ if (atomic_read(&mvm->pending_frames[sta_id])) {
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
ERR_PTR(-EBUSY));
spin_unlock_bh(&mvm_sta->lock);
/* disable TDLS sta queues on drain complete */
if (sta->tdls) {
- mvm->tfd_drained[mvm_sta->sta_id] =
- mvm_sta->tfd_queue_msk;
- IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
- mvm_sta->sta_id);
+ mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
+ IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
}
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
@@ -1764,6 +1790,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
const u8 *baddr = _baddr;
+ int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1779,19 +1806,16 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int queue;
- if ((vif->type == NL80211_IFTYPE_AP) &&
- (mvmvif->bcast_sta.tfd_queue_msk &
- BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
+ if (vif->type == NL80211_IFTYPE_AP)
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
- else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
- (mvmvif->bcast_sta.tfd_queue_msk &
- BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
+ else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
- else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
+ else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
return -EINVAL;
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
wdg_timeout);
+ bsta->tfd_queue_msk |= BIT(queue);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -1800,8 +1824,67 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
- return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
- mvmvif->id, mvmvif->color);
+ ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+ mvmvif->id, mvmvif->color);
+ if (ret)
+ return ret;
+
+ /*
+ * In AP vif type, we also need to enable the cab_queue. However, we
+ * have to enable it after the ADD_STA command is sent, otherwise the
+ * FW will throw an assert once we send the ADD_STA command (it'll
+ * detect a mismatch in the tfd_queue_msk, as we can't add the
+ * enabled-cab_queue to the mask)
+ */
+ if (iwl_mvm_is_dqa_supported(mvm) &&
+ vif->type == NL80211_IFTYPE_AP) {
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
+ 0, &cfg, wdg_timeout);
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
+ IWL_MAX_TID_COUNT, 0);
+
+ if (mvmvif->bcast_sta.tfd_queue_msk &
+ BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
+ mvmvif->bcast_sta.tfd_queue_msk &=
+ ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
+ }
+
+ if (mvmvif->bcast_sta.tfd_queue_msk &
+ BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
+ mvmvif->bcast_sta.tfd_queue_msk &=
+ ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
+ }
}
/* Send the FW a request to remove the station from it's internal data
@@ -1813,6 +1896,9 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_free_bcast_sta_queues(mvm, vif);
+
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -1826,22 +1912,16 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- if (!iwl_mvm_is_dqa_supported(mvm))
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
qmask = iwl_mvm_mac_get_queues_mask(vif);
- if (vif->type == NL80211_IFTYPE_AP) {
/*
* The firmware defines the TFD queue mask to only be relevant
* for *unicast* queues, so the multicast (CAB) queue shouldn't
- * be included.
+ * be included. This only happens in NL80211_IFTYPE_AP vif type,
+ * so the next line will only have an effect there.
*/
qmask &= ~BIT(vif->cab_queue);
-
- if (iwl_mvm_is_dqa_supported(mvm))
- qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
- } else if (iwl_mvm_is_dqa_supported(mvm) &&
- vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
}
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
@@ -2246,6 +2326,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
}
+ /*
+ * TXQ shouldn't be in inactive mode for non-DQA, so getting
+ * an inactive queue from iwl_mvm_find_free_queue() is
+ * certainly a bug
+ */
+ WARN_ON(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_INACTIVE);
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
@@ -2961,6 +3048,11 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
/* Get the station from the mvm local station table */
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (!mvm_sta) {
+ IWL_ERR(mvm, "Failed to find station\n");
+ return -EINVAL;
+ }
+ sta_id = mvm_sta->sta_id;
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
@@ -2988,8 +3080,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
return 0;
}
- sta_id = mvm_sta->sta_id;
-
ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b45c7b9937c8..4be34f902278 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -437,6 +437,7 @@ struct iwl_mvm_sta {
bool disable_tx;
bool tlc_amsdu;
bool sleeping;
+ bool associated;
u8 agg_tids;
u8 sleep_tx_count;
u8 avg_energy;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 63a051be832e..bec7d9c46087 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
return;
IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
- thermal_zone_device_unregister(mvm->tz_device.tzone);
- mvm->tz_device.tzone = NULL;
+ if (mvm->tz_device.tzone) {
+ thermal_zone_device_unregister(mvm->tz_device.tzone);
+ mvm->tz_device.tzone = NULL;
+ }
}
static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
return;
IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
- thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
- mvm->cooling_dev.cdev = NULL;
+ if (mvm->cooling_dev.cdev) {
+ thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+ mvm->cooling_dev.cdev = NULL;
+ }
}
#endif /* CONFIG_THERMAL */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 66957ac12ca4..dd2b4a300819 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -102,14 +102,13 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
-static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_hdr *hdr,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd)
+static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info)
{
+ u16 offload_assist = 0;
#if IS_ENABLED(CONFIG_INET)
u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
- u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
u8 protocol = 0;
/*
@@ -117,7 +116,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
* compute it
*/
if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
- return;
+ goto out;
/* We do not expect to be requested to csum stuff we do not support */
if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
@@ -125,7 +124,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
skb->protocol != htons(ETH_P_IPV6)),
"No support for requested checksum\n")) {
skb_checksum_help(skb);
- return;
+ goto out;
}
if (skb->protocol == htons(ETH_P_IP)) {
@@ -145,7 +144,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
protocol != NEXTHDR_HOP &&
protocol != NEXTHDR_DEST) {
skb_checksum_help(skb);
- return;
+ goto out;
}
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
@@ -159,7 +158,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
WARN_ON_ONCE(1);
skb_checksum_help(skb);
- return;
+ goto out;
}
/* enable L4 csum */
@@ -191,8 +190,9 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
mh_len /= 2;
offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
- tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+out:
#endif
+ return offload_assist;
}
/*
@@ -202,7 +202,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id)
{
- struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
@@ -284,9 +283,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
tx_cmd->tx_flags = cpu_to_le32(tx_flags);
- /* Total # bytes to be transmitted */
- tx_cmd->len = cpu_to_le16((u16)skb->len +
- (uintptr_t)skb_info->driver_data[0]);
+ /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
+ tx_cmd->len = cpu_to_le16((u16)skb->len);
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
@@ -295,7 +293,52 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
!(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
- iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
+ tx_cmd->offload_assist |=
+ cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info));
+}
+
+static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta)
+{
+ int rate_idx;
+ u8 rate_plcp;
+ u32 rate_flags;
+
+ /* HT rate doesn't make sense for a non data frame */
+ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
+ "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
+ info->control.rates[0].flags,
+ info->control.rates[0].idx);
+
+ rate_idx = info->control.rates[0].idx;
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+ rate_idx = rate_lowest_index(
+ &mvm->nvm_data->bands[info->band], sta);
+
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == NL80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+
+ /* For 2.4 GHZ band, check that there is no need to remap */
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+
+ if (info->band == NL80211_BAND_2GHZ &&
+ !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+ rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
+ else
+ rate_flags =
+ BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ return (u32)rate_plcp | rate_flags;
}
/*
@@ -305,10 +348,6 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
- u32 rate_flags;
- int rate_idx;
- u8 rate_plcp;
-
/* Set retry limit on RTS packets */
tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
@@ -337,46 +376,12 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
}
- /* HT rate doesn't make sense for a non data frame */
- WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
- "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
- info->control.rates[0].flags,
- info->control.rates[0].idx,
- le16_to_cpu(fc));
-
- rate_idx = info->control.rates[0].idx;
- /* if the rate isn't a well known legacy rate, take the lowest one */
- if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
- rate_idx = rate_lowest_index(
- &mvm->nvm_data->bands[info->band], sta);
-
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == NL80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
-
- /* For 2.4 GHZ band, check that there is no need to remap */
- BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
-
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
-
mvm->mgmt_last_antenna_idx =
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
- if (info->band == NL80211_BAND_2GHZ &&
- !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
- rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
- else
- rate_flags =
- BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
/* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
+ tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
}
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
@@ -459,7 +464,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
@@ -479,12 +483,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+ return dev_cmd;
+}
+
+static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
+ struct iwl_device_cmd *cmd)
+{
+ struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
memset(&skb_info->status, 0, sizeof(skb_info->status));
memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
- skb_info->driver_data[1] = dev_cmd;
-
- return dev_cmd;
+ skb_info->driver_data[1] = cmd;
}
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
@@ -496,15 +506,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
/*
- * handle legacy hostapd as well, where station may be added
- * only after assoc.
+ * Handle legacy hostapd as well, where station may be added
+ * only after assoc. Take care of the case where we send a
+ * deauth to a station that we don't have.
*/
- if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
+ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
+ ieee80211_is_deauth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
@@ -536,9 +548,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* queue. STATION (HS2.0) uses the auxiliary context of the FW,
* and hence needs to be sent on the aux queue
*/
- if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
skb_info->control.vif->type == NL80211_IFTYPE_STATION)
- IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+ skb_info->hw_queue = mvm->aux_queue;
memcpy(&info, skb->cb, sizeof(info));
@@ -550,9 +562,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.hw_queue != info.control.vif->cab_queue)))
return -1;
- /* This holds the amsdu headers length */
- skb_info->driver_data[0] = (void *)(uintptr_t)0;
-
queue = info.hw_queue;
/*
@@ -563,9 +572,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* (this is not possible for unicast packets as a TLDS discovery
* response are sent without a station entry); otherwise use the
* AUX station.
- * In DQA mode, if vif is of type STATION and frames are not multicast,
- * they should be sent from the BSS queue. For example, TDLS setup
- * frames should be sent on this queue, as they go through the AP.
+ * In DQA mode, if vif is of type STATION and frames are not multicast
+ * or offchannel, they should be sent from the BSS queue.
+ * For example, TDLS setup frames should be sent on this queue,
+ * as they go through the AP.
*/
sta_id = mvm->aux_sta.sta_id;
if (info.control.vif) {
@@ -587,7 +597,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (ap_sta_id != IWL_MVM_STATION_COUNT)
sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
- info.control.vif->type == NL80211_IFTYPE_STATION) {
+ info.control.vif->type == NL80211_IFTYPE_STATION &&
+ queue != mvm->aux_queue) {
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
}
}
@@ -598,6 +609,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (!dev_cmd)
return -1;
+ /* From now on, we cannot access info->control */
+ iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* Copy MAC header from skb into command buffer */
@@ -634,7 +648,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
- u16 amsdu_add, snap_ip_tcp, pad, i = 0;
+ u16 snap_ip_tcp, pad, i = 0;
unsigned int dbg_max_amsdu_len;
netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 *qc, tid, txf;
@@ -736,21 +750,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
/* This skb fits in one single A-MSDU */
if (num_subframes * mss >= tcp_payload_len) {
- struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
-
- /*
- * Compute the length of all the data added for the A-MSDU.
- * This will be used to compute the length to write in the TX
- * command. We have: SNAP + IP + TCP for n -1 subframes and
- * ETH header for n subframes. Note that the original skb
- * already had one set of SNAP / IP / TCP headers.
- */
- num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
- amsdu_add = num_subframes * sizeof(struct ethhdr) +
- (num_subframes - 1) * (snap_ip_tcp + pad);
- /* This holds the amsdu headers length */
- skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
-
__skb_queue_tail(mpdus_skb, skb);
return 0;
}
@@ -789,14 +788,6 @@ segment:
ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
if (tcp_payload_len > mss) {
- struct ieee80211_tx_info *skb_info =
- IEEE80211_SKB_CB(tmp);
-
- num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
- amsdu_add = num_subframes * sizeof(struct ethhdr) +
- (num_subframes - 1) * (snap_ip_tcp + pad);
- skb_info->driver_data[0] =
- (void *)(uintptr_t)amsdu_add;
skb_shinfo(tmp)->gso_size = mss;
} else {
qc = ieee80211_get_qos_ctl((void *)tmp->data);
@@ -908,7 +899,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
goto drop;
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
- /* From now on, we cannot access info->control */
/*
* we handle that entirely ourselves -- for uAPSD the firmware
@@ -919,6 +909,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_lock(&mvmsta->lock);
+ /* nullfunc frames should go to the MGMT queue regardless of QOS,
+ * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
+ * assignment of MGMT TID
+ */
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
u8 *qc = NULL;
qc = ieee80211_get_qos_ctl(hdr);
@@ -931,27 +925,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
- } else if (iwl_mvm_is_dqa_supported(mvm) &&
- (ieee80211_is_qos_nullfunc(fc) ||
- ieee80211_is_nullfunc(fc))) {
- /*
- * nullfunc frames should go to the MGMT queue regardless of QOS
- */
- tid = IWL_MAX_TID_COUNT;
+ if (WARN_ON_ONCE(is_ampdu &&
+ mvmsta->tid_data[tid].state != IWL_AGG_ON))
+ goto drop_unlock_sta;
}
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id;
-
- if (ieee80211_is_mgmt(fc))
- tx_cmd->tid_tspec = IWL_TID_NON_QOS;
- }
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
-
- WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
-
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@@ -959,11 +939,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
- if (is_ampdu) {
- if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
- goto drop_unlock_sta;
- txq_id = mvmsta->tid_data[tid].txq_id;
- }
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
+
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
@@ -1015,6 +994,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
+ /* From now on, we cannot access info->control */
+ iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
@@ -1024,7 +1006,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
/* Increase pending frames count if this isn't AMPDU */
- if (!is_ampdu)
+ if ((iwl_mvm_is_dqa_supported(mvm) &&
+ mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
+ mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
+ (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -1040,7 +1025,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
unsigned int payload_len;
@@ -1054,9 +1038,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
memcpy(&info, skb->cb, sizeof(info));
- /* This holds the amsdu headers length */
- skb_info->driver_data[0] = (void *)(uintptr_t)0;
-
if (!skb_is_gso(skb))
return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
@@ -1295,8 +1276,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
memset(&info->status, 0, sizeof(info->status));
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
@@ -1319,10 +1298,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
/* Single frame failure in an AMPDU queue => send BAR */
- if (txq_id >= mvm->first_agg_queue &&
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
!(info->flags & IEEE80211_TX_STAT_ACK) &&
!(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
/* W/A FW bug: seq_ctl is wrong when the status isn't success */
if (status != TX_STATUS_SUCCESS) {
@@ -1357,7 +1337,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status(mvm->hw, skb);
}
- if (txq_id >= mvm->first_agg_queue) {
+ if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
/* If this is an aggregation queue, we use the ssn since:
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index d04babd99b53..dedea96a8e0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -497,13 +497,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
- u32 base;
- base = mvm->error_event_table;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base)
base = mvm->fw->init_errlog_ptr;
@@ -574,6 +572,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+ iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
+
+ if (mvm->error_event_table[1])
+ iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
if (mvm->support_umac_log)
iwl_mvm_dump_umac_error_log(mvm);
@@ -649,8 +655,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
/* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
- cfg->tid);
+ IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+ queue, cfg->tid);
return;
}
@@ -693,10 +699,6 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
.tid = cfg->tid,
};
- /* Set sta_id in the command, if it exists */
- if (iwl_mvm_is_dqa_supported(mvm))
- cmd.sta_id = cfg->sta_id;
-
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
@@ -706,8 +708,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
}
}
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags)
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u8 tid, u8 flags)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
@@ -720,7 +722,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
spin_unlock_bh(&mvm->queue_info_lock);
- return;
+ return 0;
}
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
@@ -760,7 +762,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
spin_unlock_bh(&mvm->queue_info_lock);
- return;
+ return 0;
}
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
@@ -791,6 +793,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
+
+ return ret;
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2f8134b2a504..ba8a81cb0e2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -533,7 +533,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -673,11 +673,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg = &iwl9000lc_2ac_cfg;
iwl_trans->cfg = cfg;
}
+
+ if (cfg == &iwla000_2ac_cfg_hr &&
+ iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
+ cfg = &iwla000_2ac_cfg_jf;
+ iwl_trans->cfg = cfg;
+ }
}
#endif
pci_set_drvdata(pdev, iwl_trans);
- iwl_trans->drv = iwl_drv_start(iwl_trans, cfg);
+ iwl_trans->drv = iwl_drv_start(iwl_trans);
if (IS_ERR(iwl_trans->drv)) {
ret = PTR_ERR(iwl_trans->drv);
@@ -778,13 +784,14 @@ static int iwl_pci_resume(struct device *device)
/*
* Enable rfkill interrupt (in order to keep track of
- * the rfkill status)
+ * the rfkill status). Must be locked to avoid processing
+ * a possible rfkill interrupt between reading the state
+ * and calling iwl_trans_pcie_rf_kill() with it.
*/
+ mutex_lock(&trans_pcie->mutex);
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
-
- mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cac6d99012b3..10937309641a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -279,7 +279,7 @@ struct iwl_txq {
bool frozen;
u8 active;
bool ampdu;
- bool block;
+ int block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
@@ -670,6 +670,8 @@ static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex);
+
return !(iwl_read32(trans, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 6fe5546dc773..de94dfdf2ec9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1607,17 +1607,19 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_RF_KILL) {
bool hw_rfkill;
+ mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
- mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
- set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
@@ -1952,17 +1954,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
bool hw_rfkill;
+ mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
- mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
- set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index b10e3633df1a..7f05fc56587a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -805,7 +805,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
(*first_ucode_section)++;
}
- for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ for (i = *first_ucode_section; i < image->num_sec; i++) {
last_read_idx = i;
/*
@@ -868,19 +868,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
int cpu,
int *first_ucode_section)
{
- int shift_param;
int i, ret = 0;
u32 last_read_idx = 0;
- if (cpu == 1) {
- shift_param = 0;
+ if (cpu == 1)
*first_ucode_section = 0;
- } else {
- shift_param = 16;
+ else
(*first_ucode_section)++;
- }
- for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ for (i = *first_ucode_section; i < image->num_sec; i++) {
last_read_idx = i;
/*
@@ -1066,6 +1062,137 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
&first_ucode_section);
}
+static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
+{
+ bool hw_rfkill = iwl_is_rfkill_set(trans);
+
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ return hw_rfkill;
+}
+
+struct iwl_causes_list {
+ u32 cause_num;
+ u32 mask_reg;
+ u8 addr;
+};
+
+static struct iwl_causes_list causes_list[] = {
+ {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
+ {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
+ {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
+ {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
+ {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
+ {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
+ {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
+ {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+ {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+ {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+ {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+ {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+ int i;
+
+ /*
+ * Access all non RX causes and map them to the default irq.
+ * In case we are missing at least one interrupt vector,
+ * the first interrupt vector will serve non-RX and FBQ causes.
+ */
+ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+ iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
+ iwl_clear_bit(trans, causes_list[i].mask_reg,
+ causes_list[i].cause_num);
+ }
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 offset =
+ trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+ u32 val, idx;
+
+ /*
+ * The first RX queue - fallback queue, which is designated for
+ * management frame, command responses etc, is always mapped to the
+ * first interrupt vector. The other RX queues are mapped to
+ * the other (N - 2) interrupt vectors.
+ */
+ val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+ for (idx = 1; idx < trans->num_rx_queues; idx++) {
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+ MSIX_FH_INT_CAUSES_Q(idx - offset));
+ val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+ }
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+ val = MSIX_FH_INT_CAUSES_Q(0);
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
+static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
+{
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ if (!trans_pcie->msix_enabled) {
+ if (trans->cfg->mq_rx_supported &&
+ test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_write_prph(trans, UREG_CHICK,
+ UREG_CHICK_MSI_ENABLE);
+ return;
+ }
+ /*
+ * The IVAR table needs to be configured again after reset,
+ * but if the device is disabled, we can't write to
+ * prph.
+ */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
+
+ /*
+ * Each cause from the causes list above and the RX causes is
+ * represented as a byte in the IVAR table. The first nibble
+ * represents the bound interrupt vector of the cause, the second
+ * represents no auto clear for this cause. This will be set if its
+ * interrupt vector is bound to serve other causes.
+ */
+ iwl_pcie_map_rx_causes(trans);
+
+ iwl_pcie_map_non_rx_causes(trans);
+}
+
+static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
+{
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ iwl_pcie_conf_msix_hw(trans_pcie);
+
+ if (!trans_pcie->msix_enabled)
+ return;
+
+ trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
+ trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+ trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
+ trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+}
+
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1119,6 +1246,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
usleep_range(1000, 2000);
/*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+
+ /*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
* This is a bug in certain verions of the hardware.
* Certain devices also keep sending HW RF kill interrupt all
@@ -1208,12 +1344,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
mutex_lock(&trans_pcie->mutex);
/* If platform's RF_KILL switch is NOT set to KILL */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
- else
- clear_bit(STATUS_RFKILL, &trans->status);
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill) {
ret = -ERFKILL;
goto out;
@@ -1261,13 +1392,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
ret = iwl_pcie_load_given_ucode(trans, fw);
/* re-check RF-Kill state since we may have missed the interrupt */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
- else
- clear_bit(STATUS_RFKILL, &trans->status);
-
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill)
ret = -ERFKILL;
@@ -1347,6 +1472,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
int ret;
@@ -1359,11 +1485,15 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true);
/*
- * Also enables interrupts - none will happen as the device doesn't
- * know we're waking it up, only when the opmode actually tells it
- * after this call.
+ * Reconfigure IVAR table in case of MSIX or reset ict table in
+ * MSI mode since HW reset erased it.
+ * Also enables interrupts - none will happen as
+ * the device doesn't know we're waking it up, only when
+ * the opmode actually tells it after this call.
*/
- iwl_pcie_reset_ict(trans);
+ iwl_pcie_conf_msix_hw(trans_pcie);
+ if (!trans_pcie->msix_enabled)
+ iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1406,109 +1536,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
-struct iwl_causes_list {
- u32 cause_num;
- u32 mask_reg;
- u8 addr;
-};
-
-static struct iwl_causes_list causes_list[] = {
- {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
- {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
- {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
- {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
- {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
- {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
- {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
- {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
- {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
- {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
- {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
- {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
- {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
-};
-
-static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i;
-
- /*
- * Access all non RX causes and map them to the default irq.
- * In case we are missing at least one interrupt vector,
- * the first interrupt vector will serve non-RX and FBQ causes.
- */
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
- }
-}
-
-static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 offset =
- trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
- u32 val, idx;
-
- /*
- * The first RX queue - fallback queue, which is designated for
- * management frame, command responses etc, is always mapped to the
- * first interrupt vector. The other RX queues are mapped to
- * the other (N - 2) interrupt vectors.
- */
- val = BIT(MSIX_FH_INT_CAUSES_Q(0));
- for (idx = 1; idx < trans->num_rx_queues; idx++) {
- iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
- MSIX_FH_INT_CAUSES_Q(idx - offset));
- val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
- }
- iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
-
- val = MSIX_FH_INT_CAUSES_Q(0);
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
- val |= MSIX_NON_AUTO_CLEAR_CAUSE;
- iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
-
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
- iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
-}
-
-static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
-{
- struct iwl_trans *trans = trans_pcie->trans;
-
- if (!trans_pcie->msix_enabled) {
- if (trans->cfg->mq_rx_supported)
- iwl_write_prph(trans, UREG_CHICK,
- UREG_CHICK_MSI_ENABLE);
- return;
- }
-
- iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
-
- /*
- * Each cause from the causes list above and the RX causes is
- * represented as a byte in the IVAR table. The first nibble
- * represents the bound interrupt vector of the cause, the second
- * represents no auto clear for this cause. This will be set if its
- * interrupt vector is bound to serve other causes.
- */
- iwl_pcie_map_rx_causes(trans);
-
- iwl_pcie_map_non_rx_causes(trans);
-
- trans_pcie->fh_init_mask =
- ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
- trans_pcie->fh_mask = trans_pcie->fh_init_mask;
- trans_pcie->hw_init_mask =
- ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
- trans_pcie->hw_mask = trans_pcie->hw_init_mask;
-}
-
static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
@@ -1659,7 +1686,6 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
int err;
lockdep_assert_held(&trans_pcie->mutex);
@@ -1677,19 +1703,15 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
iwl_pcie_apm_init(trans);
iwl_pcie_init_msix(trans_pcie);
+
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
/* Set is_down to false here so that...*/
trans_pcie->is_down = false;
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
- else
- clear_bit(STATUS_RFKILL, &trans->status);
- /* ... rfkill can call stop_device and set it false if needed */
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ /* ...rfkill can call stop_device and set it false if needed */
+ iwl_trans_check_hw_rf_kill(trans);
/* Make sure we sync here, because we'll need full access later */
if (low_power)
@@ -2960,16 +2982,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
- if (cfg->mq_rx_supported)
- addr_size = 64;
- else
- addr_size = 36;
-
if (cfg->use_tfh) {
+ addr_size = 64;
trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
-
} else {
+ addr_size = 36;
trans_pcie->max_tbs = IWL_NUM_OF_TBS;
trans_pcie->tfd_size = sizeof(struct iwl_tfd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index e44e5adc2b95..911cf9868107 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -2096,6 +2096,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -2145,6 +2146,13 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
*/
skb_pull(skb, hdr_len + iv_len);
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
tso_start(skb, &tso);
while (total_len) {
@@ -2155,7 +2163,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
unsigned int hdr_tb_len;
dma_addr_t hdr_tb_phys;
struct tcphdr *tcph;
- u8 *iph;
+ u8 *iph, *subf_hdrs_start = hdr_page->pos;
total_len -= data_left;
@@ -2216,6 +2224,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
hdr_tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
/* prepare the start_hdr for the next subframe */
start_hdr = hdr_page->pos;
@@ -2408,9 +2418,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_len = len;
}
- /* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ /*
+ * The first TB points to bi-directional DMA data, we'll
+ * memcpy the data into it later.
+ */
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_FIRST_TB_SIZE, true);
@@ -2434,6 +2445,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
}
+ /* building the A-MSDU might have changed this data, so memcpy it now */
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE);
+
tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 9d96b7c928f7..28cf97489001 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -294,14 +294,6 @@ int orinoco_stop(struct net_device *dev)
}
EXPORT_SYMBOL(orinoco_stop);
-struct net_device_stats *orinoco_get_stats(struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
-
- return &priv->stats;
-}
-EXPORT_SYMBOL(orinoco_get_stats);
-
void orinoco_set_multicast_list(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -433,7 +425,7 @@ EXPORT_SYMBOL(orinoco_process_xmit_skb);
static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
struct hermes *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
@@ -593,10 +585,7 @@ static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw)
static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw)
{
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
-
- stats->tx_packets++;
+ dev->stats.tx_packets++;
netif_wake_queue(dev);
@@ -605,8 +594,7 @@ static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw)
static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
{
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
u16 fid = hermes_read_regn(hw, TXCOMPLFID);
u16 status;
struct hermes_txexc_data hdr;
@@ -662,7 +650,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
void orinoco_tx_timeout(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
struct hermes *hw = &priv->hw;
printk(KERN_WARNING "%s: Tx timeout! "
@@ -749,7 +737,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
int len;
struct sk_buff *skb;
struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
struct hermes *hw = &priv->hw;
len = le16_to_cpu(desc->data_len);
@@ -840,7 +828,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
struct iw_statistics *wstats = &priv->wstats;
struct sk_buff *skb = NULL;
u16 rxfid, status;
@@ -959,7 +947,7 @@ static void orinoco_rx(struct net_device *dev,
struct sk_buff *skb)
{
struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
u16 status, fc;
int length;
struct ethhdr *hdr;
@@ -2137,7 +2125,6 @@ static const struct net_device_ops orinoco_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = orinoco_tx_timeout,
- .ndo_get_stats = orinoco_get_stats,
};
/* Allocate private data.
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index 5fa1c3e3713f..430862a6a24b 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -84,7 +84,6 @@ struct orinoco_private {
/* Net device stuff */
struct net_device *ndev;
- struct net_device_stats stats;
struct iw_statistics wstats;
/* Hardware control variables */
@@ -206,7 +205,6 @@ int orinoco_process_xmit_skb(struct sk_buff *skb,
/* Common ndo functions exported for reuse by orinoco_usb */
int orinoco_open(struct net_device *dev);
int orinoco_stop(struct net_device *dev);
-struct net_device_stats *orinoco_get_stats(struct net_device *dev);
void orinoco_set_multicast_list(struct net_device *dev);
int orinoco_change_mtu(struct net_device *dev, int new_mtu);
void orinoco_tx_timeout(struct net_device *dev);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index bca6935a94db..98e1380b9917 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -403,8 +403,7 @@ static void ezusb_ctx_complete(struct request_context *ctx)
if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
struct net_device *dev = upriv->dev;
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
if (ctx->state != EZUSB_CTX_COMPLETE)
stats->tx_errors++;
@@ -1183,7 +1182,7 @@ static int ezusb_program(struct hermes *hw, const char *buf,
static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device_stats *stats = &dev->stats;
struct ezusb_priv *upriv = priv->card;
u8 mic[MICHAEL_MIC_LEN + 1];
int err = 0;
@@ -1556,7 +1555,6 @@ static const struct net_device_ops ezusb_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = orinoco_tx_timeout,
- .ndo_get_stats = orinoco_get_stats,
};
static int ezusb_probe(struct usb_interface *interface,
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 7ff2efadceca..3f97acb57e66 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -2086,7 +2086,7 @@ static int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
* Initialization
*/
-static struct cfg80211_ops lbs_cfg80211_ops = {
+static const struct cfg80211_ops lbs_cfg80211_ops = {
.set_monitor_channel = lbs_cfg_set_monitor_channel,
.libertas_set_mesh_channel = lbs_cfg_set_mesh_channel,
.scan = lbs_cfg_scan,
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 301170cccfff..033ff881c751 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -305,7 +305,7 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
}
lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return 0;
+ return ret;
}
static int lbs_wait_for_ds_awake(struct lbs_private *priv)
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index c47d6366875d..a75013ac84d7 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -101,13 +101,6 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
{
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
- unsigned int pad;
- int headroom = (priv->adapter->iface_type ==
- MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
-
- pad = ((void *)skb->data - sizeof(*local_tx_pd) -
- headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
- skb_push(skb, pad);
skb_push(skb, sizeof(*local_tx_pd));
@@ -121,12 +114,10 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
/* Always zero as the data is followed by struct txpd */
- local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
- pad);
+ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
- sizeof(*local_tx_pd) -
- pad);
+ sizeof(*local_tx_pd));
if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
@@ -190,7 +181,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ra_list_flags);
return -1;
}
- skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ /* skb_aggr->data already 64 byte align, just reserve bus interface
+ * header and txpd.
+ */
+ skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 145cc4b5103b..1e3bd435a694 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2078,7 +2078,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- chan = __ieee80211_get_channel(priv->wdev.wiphy,
+ chan = ieee80211_get_channel(priv->wdev.wiphy,
ieee80211_channel_to_frequency(bss_info.bss_chan,
band));
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index b9284b533294..ae2b69db5994 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -114,7 +114,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
p += sprintf(p, "multicast_count=\"%d\"\n",
netdev_mc_count(netdev));
- p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
+ p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len,
+ info.ssid.ssid);
p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index bec300b9c2ea..188e4c370836 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -27,7 +27,7 @@
#include <linux/timer.h>
#include <linux/ieee80211.h>
#include <uapi/linux/if_arp.h>
-#include <net/mac80211.h>
+#include <net/cfg80211.h>
#define MWIFIEX_BSS_COEX_COUNT 2
#define MWIFIEX_MAX_BSS_NUM (3)
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index ea455948a68a..cb6a1a81d44e 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -434,14 +434,14 @@ enum mwifiex_channel_flags {
#define HostCmd_ACT_BITWISE_SET 0x0002
#define HostCmd_ACT_BITWISE_CLR 0x0003
#define HostCmd_RESULT_OK 0x0000
-
-#define HostCmd_ACT_MAC_RX_ON 0x0001
-#define HostCmd_ACT_MAC_TX_ON 0x0002
-#define HostCmd_ACT_MAC_WEP_ENABLE 0x0008
-#define HostCmd_ACT_MAC_ETHERNETII_ENABLE 0x0010
-#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
-#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
-#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON 0x2000
+#define HostCmd_ACT_MAC_RX_ON BIT(0)
+#define HostCmd_ACT_MAC_TX_ON BIT(1)
+#define HostCmd_ACT_MAC_WEP_ENABLE BIT(3)
+#define HostCmd_ACT_MAC_ETHERNETII_ENABLE BIT(4)
+#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE BIT(7)
+#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE BIT(8)
+#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON BIT(13)
+#define HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE BIT(16)
#define HostCmd_BSS_MODE_IBSS 0x0002
#define HostCmd_BSS_MODE_ANY 0x0003
@@ -550,6 +550,7 @@ enum mwifiex_channel_flags {
#define EVENT_TX_DATA_PAUSE 0x00000055
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_RXBA_SYNC 0x00000059
+#define EVENT_UNKNOWN_DEBUG 0x00000063
#define EVENT_BG_SCAN_STOPPED 0x00000065
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_MULTI_CHAN_INFO 0x0000006a
@@ -1084,8 +1085,7 @@ struct host_cmd_ds_802_11_mac_address {
};
struct host_cmd_ds_mac_control {
- __le16 action;
- __le16 reserved;
+ __le32 action;
};
struct host_cmd_ds_mac_multicast_adr {
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index b36cb3fef358..756948385b60 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -92,7 +92,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
for (i = 0; i < ARRAY_SIZE(priv->wep_key); i++)
memset(&priv->wep_key[i], 0, sizeof(struct mwifiex_wep_key));
priv->wep_key_curr_index = 0;
- priv->curr_pkt_filter = HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON |
+ priv->curr_pkt_filter = HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE |
+ HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON |
HostCmd_ACT_MAC_ETHERNETII_ENABLE;
priv->beacon_period = 100; /* beacon interval */
@@ -408,8 +409,6 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
- int idx;
-
if (!adapter) {
pr_err("%s: adapter is NULL\n", __func__);
return;
@@ -427,23 +426,6 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n");
mwifiex_free_cmd_buffer(adapter);
- for (idx = 0; idx < adapter->num_mem_types; idx++) {
- struct memory_type_mapping *entry =
- &adapter->mem_type_mapping_tbl[idx];
-
- if (entry->mem_ptr) {
- vfree(entry->mem_ptr);
- entry->mem_ptr = NULL;
- }
- entry->mem_size = 0;
- }
-
- if (adapter->drv_info_dump) {
- vfree(adapter->drv_info_dump);
- adapter->drv_info_dump = NULL;
- adapter->drv_info_size = 0;
- }
-
if (adapter->sleep_cfm)
dev_kfree_skb_any(adapter->sleep_cfm);
}
@@ -656,10 +638,9 @@ void mwifiex_free_priv(struct mwifiex_private *priv)
* - Free the adapter
* - Notify completion
*/
-int
+void
mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
{
- int ret = -EINPROGRESS;
struct mwifiex_private *priv;
s32 i;
unsigned long flags;
@@ -667,15 +648,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
/* mwifiex already shutdown */
if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
- return 0;
-
- adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
- /* wait for mwifiex_process to complete */
- if (adapter->mwifiex_processing) {
- mwifiex_dbg(adapter, WARN,
- "main process is still running\n");
- return ret;
- }
+ return;
/* cancel current command */
if (adapter->curr_cmd) {
@@ -726,11 +699,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
mwifiex_adapter_cleanup(adapter);
spin_unlock(&adapter->mwifiex_lock);
-
- /* Notify completion */
- ret = mwifiex_shutdown_fw_complete(adapter);
-
- return ret;
+ adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index e5c3a8aa3929..5ebca1d0cfc7 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -248,15 +248,14 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
if (adapter->mwifiex_processing || adapter->main_locked) {
adapter->more_task_flag = true;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
- goto exit_main_proc;
+ return 0;
} else {
adapter->mwifiex_processing = true;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
}
process_start:
do {
- if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
- (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
break;
/* For non-USB interfaces, If we process interrupts first, it
@@ -464,9 +463,6 @@ process_start:
adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
-exit_main_proc:
- if (adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING)
- mwifiex_shutdown_drv(adapter);
return ret;
}
EXPORT_SYMBOL_GPL(mwifiex_main_process);
@@ -645,16 +641,14 @@ err_dnld_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
- adapter->init_wait_q_woken = false;
-
- if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
+ mwifiex_shutdown_drv(adapter);
}
- adapter->surprise_removed = true;
- mwifiex_terminate_workqueue(adapter);
+
init_failed = true;
done:
if (adapter->cal_data) {
@@ -1032,7 +1026,7 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
}
EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
-void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
+int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
{
void *p;
char drv_version[64];
@@ -1042,21 +1036,17 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
int i, idx;
struct netdev_queue *txq;
struct mwifiex_debug_info *debug_info;
-
- if (adapter->drv_info_dump) {
- vfree(adapter->drv_info_dump);
- adapter->drv_info_dump = NULL;
- adapter->drv_info_size = 0;
- }
+ void *drv_info_dump;
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
- adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
+ /* memory allocate here should be free in mwifiex_upload_device_dump*/
+ drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
- if (!adapter->drv_info_dump)
- return;
+ if (!drv_info_dump)
+ return 0;
- p = (char *)(adapter->drv_info_dump);
+ p = (char *)(drv_info_dump);
p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
mwifiex_drv_get_driver_version(adapter, drv_version,
@@ -1140,18 +1130,20 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
kfree(debug_info);
}
- adapter->drv_info_size = p - adapter->drv_info_dump;
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
+ *drv_info = drv_info_dump;
+ return p - drv_info_dump;
}
EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
+ int drv_info_size)
{
u8 idx, *dump_data, *fw_dump_ptr;
u32 dump_len;
dump_len = (strlen("========Start dump driverinfo========\n") +
- adapter->drv_info_size +
+ drv_info_size +
strlen("\n========End dump========\n"));
for (idx = 0; idx < adapter->num_mem_types; idx++) {
@@ -1181,8 +1173,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
fw_dump_ptr += strlen("========Start dump driverinfo========\n");
- memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size);
- fw_dump_ptr += adapter->drv_info_size;
+ memcpy(fw_dump_ptr, drv_info, drv_info_size);
+ fw_dump_ptr += drv_info_size;
strcpy(fw_dump_ptr, "\n========End dump========\n");
fw_dump_ptr += strlen("\n========End dump========\n");
@@ -1220,18 +1212,12 @@ done:
struct memory_type_mapping *entry =
&adapter->mem_type_mapping_tbl[idx];
- if (entry->mem_ptr) {
- vfree(entry->mem_ptr);
- entry->mem_ptr = NULL;
- }
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
entry->mem_size = 0;
}
- if (adapter->drv_info_dump) {
- vfree(adapter->drv_info_dump);
- adapter->drv_info_dump = NULL;
- adapter->drv_info_size = 0;
- }
+ vfree(drv_info);
}
EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
@@ -1362,7 +1348,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
* This function gets called during PCIe function level reset. Required
* code is extracted from mwifiex_remove_card()
*/
-static int
+int
mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
@@ -1399,11 +1385,8 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
}
mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n");
- adapter->init_wait_q_woken = false;
- if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
+ mwifiex_shutdown_drv(adapter);
if (adapter->if_ops.down_dev)
adapter->if_ops.down_dev(adapter);
@@ -1434,24 +1417,18 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
exit_return:
return 0;
}
+EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw);
/* This function gets called during PCIe function level reset. Required
* code is extracted from mwifiex_add_card()
*/
-static int
-mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done,
- struct mwifiex_if_ops *if_ops, u8 iface_type)
+int
+mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
{
- char fw_name[32];
- struct pcie_service_card *card = adapter->card;
-
mwifiex_init_lock_list(adapter);
if (adapter->if_ops.up_dev)
adapter->if_ops.up_dev(adapter);
- adapter->iface_type = iface_type;
- adapter->fw_done = fw_done;
-
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
adapter->surprise_removed = false;
init_waitqueue_head(&adapter->init_wait_q);
@@ -1488,18 +1465,12 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done,
* mwifiex_register_dev()
*/
mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__);
- strcpy(fw_name, adapter->fw_name);
- strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
- adapter->tx_buf_size = card->pcie.tx_buf_size;
- adapter->ext_scan = card->pcie.can_ext_scan;
if (mwifiex_init_hw_fw(adapter, false)) {
- strcpy(adapter->fw_name, fw_name);
mwifiex_dbg(adapter, ERROR,
"%s: firmware init failed\n", __func__);
goto err_init_fw;
}
- strcpy(adapter->fw_name, fw_name);
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
complete_all(adapter->fw_done);
@@ -1509,43 +1480,22 @@ err_init_fw:
mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__);
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
+
+err_kmalloc:
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
mwifiex_dbg(adapter, ERROR,
"info: %s: shutdown mwifiex\n", __func__);
- adapter->init_wait_q_woken = false;
-
- if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
+ mwifiex_shutdown_drv(adapter);
}
-err_kmalloc:
- mwifiex_terminate_workqueue(adapter);
- adapter->surprise_removed = true;
complete_all(adapter->fw_done);
mwifiex_dbg(adapter, INFO, "%s, error\n", __func__);
return -1;
}
-
-/* This function processes pre and post PCIe function level resets.
- * It performs software cleanup without touching PCIe specific code.
- * Also, during initialization PCIe stuff is skipped.
- */
-void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare)
-{
- struct mwifiex_if_ops if_ops;
-
- if (!prepare) {
- mwifiex_reinit_sw(adapter, adapter->fw_done, &if_ops,
- adapter->iface_type);
- } else {
- memcpy(&if_ops, &adapter->if_ops,
- sizeof(struct mwifiex_if_ops));
- mwifiex_shutdown_sw(adapter);
- }
-}
-EXPORT_SYMBOL_GPL(mwifiex_do_flr);
+EXPORT_SYMBOL_GPL(mwifiex_reinit_sw);
static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv)
{
@@ -1569,13 +1519,13 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter)
struct device *dev = adapter->dev;
if (!dev->of_node)
- return;
+ goto err_exit;
adapter->dt_node = dev->of_node;
adapter->irq_wakeup = irq_of_parse_and_map(adapter->dt_node, 0);
if (!adapter->irq_wakeup) {
- dev_info(dev, "fail to parse irq_wakeup from device tree\n");
- return;
+ dev_dbg(dev, "fail to parse irq_wakeup from device tree\n");
+ goto err_exit;
}
ret = devm_request_irq(dev, adapter->irq_wakeup,
@@ -1595,7 +1545,7 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter)
return;
err_exit:
- adapter->irq_wakeup = 0;
+ adapter->irq_wakeup = -1;
}
/*
@@ -1681,17 +1631,13 @@ err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
- if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
- pr_debug("info: %s: shutdown mwifiex\n", __func__);
- adapter->init_wait_q_woken = false;
-
- if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
- }
err_registerdev:
adapter->surprise_removed = true;
mwifiex_terminate_workqueue(adapter);
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
+ pr_debug("info: %s: shutdown mwifiex\n", __func__);
+ mwifiex_shutdown_drv(adapter);
+ }
err_kmalloc:
mwifiex_free_adapter(adapter);
@@ -1741,11 +1687,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, CMD,
"cmd: calling mwifiex_shutdown_drv...\n");
- adapter->init_wait_q_woken = false;
- if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
+ mwifiex_shutdown_drv(adapter);
mwifiex_dbg(adapter, CMD,
"cmd: mwifiex_shutdown_drv done\n");
if (atomic_read(&adapter->rx_pending) ||
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 5c9bd944b6ea..5c8297207f33 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -248,7 +248,6 @@ enum MWIFIEX_HARDWARE_STATUS {
MWIFIEX_HW_STATUS_INITIALIZING,
MWIFIEX_HW_STATUS_INIT_DONE,
MWIFIEX_HW_STATUS_RESET,
- MWIFIEX_HW_STATUS_CLOSING,
MWIFIEX_HW_STATUS_NOT_READY
};
@@ -530,7 +529,7 @@ struct mwifiex_private {
u8 tx_timeout_cnt;
struct net_device *netdev;
struct net_device_stats stats;
- u16 curr_pkt_filter;
+ u32 curr_pkt_filter;
u32 bss_mode;
u32 pkt_tx_ctrl;
u16 tx_power_level;
@@ -995,8 +994,6 @@ struct mwifiex_adapter {
u8 key_api_major_ver, key_api_minor_ver;
struct memory_type_mapping *mem_type_mapping_tbl;
u8 num_mem_types;
- void *drv_info_dump;
- u32 drv_info_size;
bool scan_chan_gap_enabled;
struct sk_buff_head rx_data_q;
bool mfg_mode;
@@ -1041,9 +1038,7 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter);
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
-int mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
-
-int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
+void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
@@ -1644,8 +1639,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
u8 rx_rate, u8 ht_info);
-void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
+int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
+ int drv_info_size);
void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
@@ -1670,5 +1666,6 @@ void mwifiex_debugfs_remove(void);
void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
#endif
-void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare);
+int mwifiex_reinit_sw(struct mwifiex_adapter *adapter);
+int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter);
#endif /* !_MWIFIEX_MAIN_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 4db07da81d8d..a0d918094889 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -31,8 +31,6 @@
#define PCIE_VERSION "1.0"
#define DRV_NAME "Marvell mwifiex PCIe"
-static u8 user_rmmod;
-
static struct mwifiex_if_ops pcie_ops;
static const struct of_device_id mwifiex_pcie_of_match_table[] = {
@@ -51,6 +49,8 @@ static int mwifiex_pcie_probe_of(struct device *dev)
return 0;
}
+static void mwifiex_pcie_work(struct work_struct *work);
+
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
size_t size, int flags)
@@ -79,6 +79,42 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
}
/*
+ * This function writes data into PCIE card register.
+ */
+static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ iowrite32(data, card->pci_mmap1 + reg);
+
+ return 0;
+}
+
+/* This function reads data from PCIE card register.
+ */
+static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ *data = ioread32(card->pci_mmap1 + reg);
+ if (*data == 0xffffffff)
+ return 0xffffffff;
+
+ return 0;
+}
+
+/* This function reads u8 data from PCIE card register. */
+static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
+ int reg, u8 *data)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ *data = ioread8(card->pci_mmap1 + reg);
+
+ return 0;
+}
+
+/*
* This function reads sleep cookie and checks if FW is ready
*/
static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
@@ -219,6 +255,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl;
card->pcie.num_mem_types = data->num_mem_types;
card->pcie.can_ext_scan = data->can_ext_scan;
+ INIT_WORK(&card->work, mwifiex_pcie_work);
}
/* device tree node parsing and platform specific configuration*/
@@ -245,6 +282,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
struct pcie_service_card *card;
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
+ const struct mwifiex_pcie_card_reg *reg;
+ u32 fw_status;
+ int ret;
card = pci_get_drvdata(pdev);
@@ -254,7 +294,15 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- if (user_rmmod && !adapter->mfg_mode) {
+ cancel_work_sync(&card->work);
+
+ reg = card->pcie.reg;
+ if (reg)
+ ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
+ else
+ fw_status = -1;
+
+ if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) {
mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -269,7 +317,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
{
- user_rmmod = 1;
mwifiex_pcie_remove(pdev);
return;
@@ -330,7 +377,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
* Cleanup all software without cleaning anything related to
* PCIe and HW.
*/
- mwifiex_do_flr(adapter, prepare);
+ mwifiex_shutdown_sw(adapter);
adapter->surprise_removed = true;
} else {
/* Kernel stores and restores PCIe function context before and
@@ -338,7 +385,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
* and firmware including firmware redownload
*/
adapter->surprise_removed = false;
- mwifiex_do_flr(adapter, prepare);
+ mwifiex_reinit_sw(adapter);
}
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
}
@@ -369,43 +416,6 @@ static struct pci_driver __refdata mwifiex_pcie = {
};
/*
- * This function writes data into PCIE card register.
- */
-static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data)
-{
- struct pcie_service_card *card = adapter->card;
-
- iowrite32(data, card->pci_mmap1 + reg);
-
- return 0;
-}
-
-/*
- * This function reads data from PCIE card register.
- */
-static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
-{
- struct pcie_service_card *card = adapter->card;
-
- *data = ioread32(card->pci_mmap1 + reg);
- if (*data == 0xffffffff)
- return 0xffffffff;
-
- return 0;
-}
-
-/* This function reads u8 data from PCIE card register. */
-static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
- int reg, u8 *data)
-{
- struct pcie_service_card *card = adapter->card;
-
- *data = ioread8(card->pci_mmap1 + reg);
-
- return 0;
-}
-
-/*
* This function adds delay loop to ensure FW is awake before proceeding.
*/
static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter)
@@ -429,16 +439,25 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
struct pcie_service_card *card = adapter->card;
u8 *buffer;
u32 sleep_cookie, count;
+ struct sk_buff *cmdrsp = card->cmdrsp_buf;
for (count = 0; count < max_delay_loop_cnt; count++) {
- buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN;
- sleep_cookie = *(u32 *)buffer;
+ pci_dma_sync_single_for_cpu(card->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie),
+ PCI_DMA_FROMDEVICE);
+ buffer = cmdrsp->data;
+ sleep_cookie = READ_ONCE(*(u32 *)buffer);
if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
mwifiex_dbg(adapter, INFO,
"sleep cookie found at count %d\n", count);
break;
}
+ pci_dma_sync_single_for_device(card->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie),
+ PCI_DMA_FROMDEVICE);
usleep_range(20, 30);
}
@@ -450,7 +469,6 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
/* This function wakes up the card by reading fw_status register. */
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
- u32 fw_status;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -460,10 +478,10 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
if (reg->sleep_cookie)
mwifiex_pcie_dev_wakeup_delay(adapter);
- /* Reading fw_status register will wakeup device */
- if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) {
+ /* Accessing fw_status register will wakeup device */
+ if (mwifiex_write_reg(adapter, reg->fw_status, FIRMWARE_READY_PCIE)) {
mwifiex_dbg(adapter, ERROR,
- "Reading fw_status register failed\n");
+ "Writing fw_status register failed\n");
return -1;
}
@@ -1681,7 +1699,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, CMD,
"info: Rx CMD Response\n");
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+ if (adapter->curr_cmd)
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+ else
+ pci_dma_sync_single_for_cpu(card->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_UPLD_SIZE,
+ PCI_DMA_FROMDEVICE);
/* Unmap the command as a response has been received. */
if (card->cmd_buf) {
@@ -1694,10 +1718,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
rx_len = le16_to_cpu(pkt_len);
skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
skb_trim(skb, rx_len);
- skb_pull(skb, INTF_HEADER_LEN);
if (!adapter->curr_cmd) {
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
+ pci_dma_sync_single_for_device(card->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_SLEEP_COOKIE_SIZE,
+ PCI_DMA_FROMDEVICE);
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
@@ -1707,6 +1734,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
}
mwifiex_delay_for_sleep_cookie(adapter,
MWIFIEX_MAX_DELAY_COUNT);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
+ skb_pull(skb, INTF_HEADER_LEN);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1724,6 +1754,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ skb_pull(skb, INTF_HEADER_LEN);
adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
/* Take the pointer and set it to CMD node and will
@@ -2325,79 +2356,41 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
}
}
}
- while (pcie_ireg & HOST_INTR_MASK) {
- if (pcie_ireg & HOST_INTR_DNLD_DONE) {
- pcie_ireg &= ~HOST_INTR_DNLD_DONE;
- mwifiex_dbg(adapter, INTR,
- "info: TX DNLD Done\n");
- ret = mwifiex_pcie_send_data_complete(adapter);
- if (ret)
- return ret;
- }
- if (pcie_ireg & HOST_INTR_UPLD_RDY) {
- pcie_ireg &= ~HOST_INTR_UPLD_RDY;
- mwifiex_dbg(adapter, INTR,
- "info: Rx DATA\n");
- ret = mwifiex_pcie_process_recv_data(adapter);
- if (ret)
- return ret;
- }
- if (pcie_ireg & HOST_INTR_EVENT_RDY) {
- pcie_ireg &= ~HOST_INTR_EVENT_RDY;
- mwifiex_dbg(adapter, INTR,
- "info: Rx EVENT\n");
- ret = mwifiex_pcie_process_event_ready(adapter);
- if (ret)
- return ret;
- }
-
- if (pcie_ireg & HOST_INTR_CMD_DONE) {
- pcie_ireg &= ~HOST_INTR_CMD_DONE;
- if (adapter->cmd_sent) {
- mwifiex_dbg(adapter, INTR,
- "info: CMD sent Interrupt\n");
- adapter->cmd_sent = false;
- }
- /* Handle command response */
- ret = mwifiex_pcie_process_cmd_complete(adapter);
- if (ret)
- return ret;
- if (adapter->hs_activated)
- return ret;
- }
-
- if (card->msi_enable) {
- spin_lock_irqsave(&adapter->int_lock, flags);
- adapter->int_status = 0;
- spin_unlock_irqrestore(&adapter->int_lock, flags);
- }
-
- if (mwifiex_pcie_ok_to_access_hw(adapter)) {
- if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
- &pcie_ireg)) {
- mwifiex_dbg(adapter, ERROR,
- "Read register failed\n");
- return -1;
- }
-
- if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
- if (mwifiex_write_reg(adapter,
- PCIE_HOST_INT_STATUS,
- ~pcie_ireg)) {
- mwifiex_dbg(adapter, ERROR,
- "Write register failed\n");
- return -1;
- }
- }
+ if (pcie_ireg & HOST_INTR_DNLD_DONE) {
+ pcie_ireg &= ~HOST_INTR_DNLD_DONE;
+ mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n");
+ ret = mwifiex_pcie_send_data_complete(adapter);
+ if (ret)
+ return ret;
+ }
+ if (pcie_ireg & HOST_INTR_UPLD_RDY) {
+ pcie_ireg &= ~HOST_INTR_UPLD_RDY;
+ mwifiex_dbg(adapter, INTR, "info: Rx DATA\n");
+ ret = mwifiex_pcie_process_recv_data(adapter);
+ if (ret)
+ return ret;
+ }
+ if (pcie_ireg & HOST_INTR_EVENT_RDY) {
+ pcie_ireg &= ~HOST_INTR_EVENT_RDY;
+ mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n");
+ ret = mwifiex_pcie_process_event_ready(adapter);
+ if (ret)
+ return ret;
+ }
+ if (pcie_ireg & HOST_INTR_CMD_DONE) {
+ pcie_ireg &= ~HOST_INTR_CMD_DONE;
+ if (adapter->cmd_sent) {
+ mwifiex_dbg(adapter, INTR,
+ "info: CMD sent Interrupt\n");
+ adapter->cmd_sent = false;
}
- if (!card->msi_enable) {
- spin_lock_irqsave(&adapter->int_lock, flags);
- pcie_ireg |= adapter->int_status;
- adapter->int_status = 0;
- spin_unlock_irqrestore(&adapter->int_lock, flags);
- }
+ /* Handle command response */
+ ret = mwifiex_pcie_process_cmd_complete(adapter);
+ if (ret)
+ return ret;
}
+
mwifiex_dbg(adapter, INTR,
"info: cmd_sent=%d data_sent=%d\n",
adapter->cmd_sent, adapter->data_sent);
@@ -2715,31 +2708,35 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
{
- mwifiex_drv_info_dump(adapter);
+ int drv_info_size;
+ void *drv_info;
+
+ drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
mwifiex_pcie_fw_dump(adapter);
- mwifiex_upload_device_dump(adapter);
+ mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
}
-static unsigned long iface_work_flags;
-static struct mwifiex_adapter *save_adapter;
static void mwifiex_pcie_work(struct work_struct *work)
{
+ struct pcie_service_card *card =
+ container_of(work, struct pcie_service_card, work);
+
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
- &iface_work_flags))
- mwifiex_pcie_device_dump_work(save_adapter);
+ &card->work_flags))
+ mwifiex_pcie_device_dump_work(card->adapter);
}
-static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
/* This function dumps FW information */
static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
{
- save_adapter = adapter;
- if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
+ struct pcie_service_card *card = adapter->card;
+
+ if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
- schedule_work(&pcie_work);
+ schedule_work(&card->work);
}
/*
@@ -2752,7 +2749,7 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
* - Allocate command response ring buffer
* - Allocate sleep cookie buffer
*/
-static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
+static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
int ret;
@@ -2861,13 +2858,16 @@ err_enable_dev:
* - Command response ring buffer
* - Sleep cookie buffer
*/
-static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
+static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
struct pci_dev *pdev = card->dev;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ int ret;
+ u32 fw_status;
- if (user_rmmod) {
+ ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
+ if (fw_status == FIRMWARE_READY_PCIE) {
mwifiex_dbg(adapter, INFO,
"Clearing driver ready signature\n");
if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
@@ -3058,7 +3058,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
* - Allocate event BD ring buffers
* - Allocate command response ring buffer
* - Allocate sleep cookie buffer
- * Part of mwifiex_pcie_init(), not reset the PCIE registers
+ * Part of mwifiex_init_pcie(), not reset the PCIE registers
*/
static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
{
@@ -3067,6 +3067,17 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
struct pci_dev *pdev = card->dev;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ /* Bluetooth is not on pcie interface. Download Wifi only firmware
+ * during pcie FLR, so that bluetooth part of firmware which is
+ * already running doesn't get affected.
+ */
+ strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
+
+ /* tx_buf_size might be changed to 3584 by firmware during
+ * data transfer, we should reset it to default size.
+ */
+ adapter->tx_buf_size = card->pcie.tx_buf_size;
+
card->cmdrsp_buf = NULL;
ret = mwifiex_pcie_create_txbd_ring(adapter);
if (ret) {
@@ -3128,7 +3139,6 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
adapter->seq_num = 0;
- adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
if (reg->sleep_cookie)
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
@@ -3141,8 +3151,8 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
}
static struct mwifiex_if_ops pcie_ops = {
- .init_if = mwifiex_pcie_init,
- .cleanup_if = mwifiex_pcie_cleanup,
+ .init_if = mwifiex_init_pcie,
+ .cleanup_if = mwifiex_cleanup_pcie,
.check_fw_status = mwifiex_check_fw_status,
.check_winner_status = mwifiex_check_winner_status,
.prog_fw = mwifiex_prog_fw_w_helper,
@@ -3168,49 +3178,7 @@ static struct mwifiex_if_ops pcie_ops = {
.up_dev = mwifiex_pcie_up_dev,
};
-/*
- * This function initializes the PCIE driver module.
- *
- * This registers the device with PCIE bus.
- */
-static int mwifiex_pcie_init_module(void)
-{
- int ret;
-
- pr_debug("Marvell PCIe Driver\n");
-
- /* Clear the flag in case user removes the card. */
- user_rmmod = 0;
-
- ret = pci_register_driver(&mwifiex_pcie);
- if (ret)
- pr_err("Driver register failed!\n");
- else
- pr_debug("info: Driver registered successfully!\n");
-
- return ret;
-}
-
-/*
- * This function cleans up the PCIE driver.
- *
- * The following major steps are followed for cleanup -
- * - Resume the device if its suspended
- * - Disconnect the device if connected
- * - Shutdown the firmware
- * - Unregister the device from PCIE bus.
- */
-static void mwifiex_pcie_cleanup_module(void)
-{
- /* Set the flag as user is removing this module. */
- user_rmmod = 1;
-
- cancel_work_sync(&pcie_work);
- pci_unregister_driver(&mwifiex_pcie);
-}
-
-module_init(mwifiex_pcie_init_module);
-module_exit(mwifiex_pcie_cleanup_module);
+module_pci_driver(mwifiex_pcie);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index ae3365d1c34e..00e8ee5ad4a8 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -116,6 +116,7 @@
/* FW awake cookie after FW ready */
#define FW_AWAKE_COOKIE (0xAA55AA55)
#define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF
+#define MWIFIEX_SLEEP_COOKIE_SIZE 4
#define MWIFIEX_MAX_DELAY_COUNT 100
struct mwifiex_pcie_card_reg {
@@ -386,6 +387,8 @@ struct pcie_service_card {
#endif
struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS];
struct mwifiex_msix_context share_irq_ctx;
+ struct work_struct work;
+ unsigned long work_flags;
};
static inline int
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 740d79cd91fa..a4b356d267f9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -31,23 +31,9 @@
#define SDIO_VERSION "1.0"
-/* The mwifiex_sdio_remove() callback function is called when
- * user removes this module from kernel space or ejects
- * the card from the slot. The driver handles these 2 cases
- * differently.
- * If the user is removing the module, the few commands (FUNC_SHUTDOWN,
- * HS_CANCEL etc.) are sent to the firmware.
- * If the card is removed, there is no need to send these command.
- *
- * The variable 'user_rmmod' is used to distinguish these two
- * scenarios. This flag is initialized as FALSE in case the card
- * is removed, and will be set to TRUE for module removal when
- * module_exit function is called.
- */
-static u8 user_rmmod;
+static void mwifiex_sdio_work(struct work_struct *work);
static struct mwifiex_if_ops sdio_ops;
-static unsigned long iface_work_flags;
static struct memory_type_mapping generic_mem_type_map[] = {
{"DUMP", NULL, 0, 0xDD},
@@ -116,7 +102,6 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
init_completion(&card->fw_done);
card->func = func;
- card->device_id = id;
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
@@ -136,6 +121,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->fw_dump_enh = data->fw_dump_enh;
card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan;
+ INIT_WORK(&card->work, mwifiex_sdio_work);
}
sdio_claim_host(func);
@@ -212,6 +198,171 @@ static int mwifiex_sdio_resume(struct device *dev)
return 0;
}
+/* Write data into SDIO card register. Caller claims SDIO device. */
+static int
+mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
+{
+ int ret = -1;
+
+ sdio_writeb(func, data, reg, &ret);
+ return ret;
+}
+
+/* This function writes data into SDIO card register.
+ */
+static int
+mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ int ret;
+
+ sdio_claim_host(card->func);
+ ret = mwifiex_write_reg_locked(card->func, reg, data);
+ sdio_release_host(card->func);
+
+ return ret;
+}
+
+/* This function reads data from SDIO card register.
+ */
+static int
+mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ int ret = -1;
+ u8 val;
+
+ sdio_claim_host(card->func);
+ val = sdio_readb(card->func, reg, &ret);
+ sdio_release_host(card->func);
+
+ *data = val;
+
+ return ret;
+}
+
+/* This function writes multiple data into SDIO card memory.
+ *
+ * This does not work in suspended mode.
+ */
+static int
+mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
+ u8 *buffer, u32 pkt_len, u32 port)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ int ret;
+ u8 blk_mode =
+ (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
+ u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
+ u32 blk_cnt =
+ (blk_mode ==
+ BLOCK_MODE) ? (pkt_len /
+ MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
+ u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
+
+ if (adapter->is_suspended) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: not allowed while suspended\n", __func__);
+ return -1;
+ }
+
+ sdio_claim_host(card->func);
+
+ ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
+
+ sdio_release_host(card->func);
+
+ return ret;
+}
+
+/* This function reads multiple data from SDIO card memory.
+ */
+static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
+ u32 len, u32 port, u8 claim)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ int ret;
+ u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
+ : BLOCK_MODE;
+ u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
+ u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE)
+ : len;
+ u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
+
+ if (claim)
+ sdio_claim_host(card->func);
+
+ ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
+
+ if (claim)
+ sdio_release_host(card->func);
+
+ return ret;
+}
+
+/* This function reads the firmware status.
+ */
+static int
+mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ const struct mwifiex_sdio_card_reg *reg = card->reg;
+ u8 fws0, fws1;
+
+ if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
+ return -1;
+
+ if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
+ return -1;
+
+ *dat = (u16)((fws1 << 8) | fws0);
+ return 0;
+}
+
+/* This function checks the firmware status in card.
+ */
+static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
+ u32 poll_num)
+{
+ int ret = 0;
+ u16 firmware_stat;
+ u32 tries;
+
+ for (tries = 0; tries < poll_num; tries++) {
+ ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
+ if (ret)
+ continue;
+ if (firmware_stat == FIRMWARE_READY_SDIO) {
+ ret = 0;
+ break;
+ }
+
+ msleep(100);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+ int ret = 0;
+ u8 winner = 0;
+ struct sdio_mmc_card *card = adapter->card;
+
+ if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner))
+ return -1;
+
+ if (winner)
+ adapter->winner = 0;
+ else
+ adapter->winner = 1;
+
+ return ret;
+}
+
/*
* SDIO remove.
*
@@ -223,6 +374,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
+ int ret = 0;
+ u16 firmware_stat;
card = sdio_get_drvdata(func);
if (!card)
@@ -234,9 +387,12 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
+ cancel_work_sync(&card->work);
+
mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
- if (user_rmmod && !adapter->mfg_mode) {
+ ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
+ if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) {
mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -364,111 +520,6 @@ static struct sdio_driver mwifiex_sdio = {
}
};
-/* Write data into SDIO card register. Caller claims SDIO device. */
-static int
-mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
-{
- int ret = -1;
- sdio_writeb(func, data, reg, &ret);
- return ret;
-}
-
-/*
- * This function writes data into SDIO card register.
- */
-static int
-mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
-{
- struct sdio_mmc_card *card = adapter->card;
- int ret;
-
- sdio_claim_host(card->func);
- ret = mwifiex_write_reg_locked(card->func, reg, data);
- sdio_release_host(card->func);
-
- return ret;
-}
-
-/*
- * This function reads data from SDIO card register.
- */
-static int
-mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
-{
- struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
- u8 val;
-
- sdio_claim_host(card->func);
- val = sdio_readb(card->func, reg, &ret);
- sdio_release_host(card->func);
-
- *data = val;
-
- return ret;
-}
-
-/*
- * This function writes multiple data into SDIO card memory.
- *
- * This does not work in suspended mode.
- */
-static int
-mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
- u8 *buffer, u32 pkt_len, u32 port)
-{
- struct sdio_mmc_card *card = adapter->card;
- int ret;
- u8 blk_mode =
- (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
- u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
- u32 blk_cnt =
- (blk_mode ==
- BLOCK_MODE) ? (pkt_len /
- MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
- u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
-
- if (adapter->is_suspended) {
- mwifiex_dbg(adapter, ERROR,
- "%s: not allowed while suspended\n", __func__);
- return -1;
- }
-
- sdio_claim_host(card->func);
-
- ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
-
- sdio_release_host(card->func);
-
- return ret;
-}
-
-/*
- * This function reads multiple data from SDIO card memory.
- */
-static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
- u32 len, u32 port, u8 claim)
-{
- struct sdio_mmc_card *card = adapter->card;
- int ret;
- u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
- : BLOCK_MODE;
- u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
- u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE)
- : len;
- u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
-
- if (claim)
- sdio_claim_host(card->func);
-
- ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
-
- if (claim)
- sdio_release_host(card->func);
-
- return ret;
-}
-
/*
* This function wakes up the card.
*
@@ -755,27 +806,6 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
}
/*
- * This function reads the firmware status.
- */
-static int
-mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
-{
- struct sdio_mmc_card *card = adapter->card;
- const struct mwifiex_sdio_card_reg *reg = card->reg;
- u8 fws0, fws1;
-
- if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
- return -1;
-
- if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
- return -1;
-
- *dat = (u16) ((fws1 << 8) | fws0);
-
- return 0;
-}
-
-/*
* This function disables the host interrupt.
*
* The host interrupt mask is read, the disable bit is reset and
@@ -1080,51 +1110,6 @@ done:
}
/*
- * This function checks the firmware status in card.
- */
-static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
- u32 poll_num)
-{
- int ret = 0;
- u16 firmware_stat;
- u32 tries;
-
- for (tries = 0; tries < poll_num; tries++) {
- ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
- if (ret)
- continue;
- if (firmware_stat == FIRMWARE_READY_SDIO) {
- ret = 0;
- break;
- } else {
- msleep(100);
- ret = -1;
- }
- }
-
- return ret;
-}
-
-/* This function checks if WLAN is the winner.
- */
-static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
-{
- int ret = 0;
- u8 winner = 0;
- struct sdio_mmc_card *card = adapter->card;
-
- if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner))
- return -1;
-
- if (winner)
- adapter->winner = 0;
- else
- adapter->winner = 1;
-
- return ret;
-}
-
-/*
* This function decode sdio aggreation pkt.
*
* Based on the the data block size and pkt_len,
@@ -2204,54 +2189,25 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
port, card->mp_data_port_mask);
}
-static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
{
+ struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
- const struct sdio_device_id *device_id = card->device_id;
-
- /* TODO mmc_hw_reset does not require destroying and re-probing the
- * whole adapter. Hence there was no need to for this rube-goldberg
- * design to reload the fw from an external workqueue. If we don't
- * destroy the adapter we could reload the fw from
- * mwifiex_main_work_queue directly.
- * The real difficulty with fw reset is to restore all the user
- * settings applied through ioctl. By destroying and recreating the
- * adapter, we take the easy way out, since we rely on user space to
- * restore them. We assume that user space will treat the new
- * incarnation of the adapter(interfaces) as if they had been just
- * discovered and initializes them from scratch.
- */
- mwifiex_sdio_remove(func);
-
- /*
- * Normally, we would let the driver core take care of releasing these.
- * But we're not letting the driver core handle this one. See above
- * TODO.
- */
- sdio_set_drvdata(func, NULL);
- devm_kfree(&func->dev, card);
+ mwifiex_shutdown_sw(adapter);
/* power cycle the adapter */
sdio_claim_host(func);
mmc_hw_reset(func->card->host);
sdio_release_host(func);
- mwifiex_sdio_probe(func, device_id);
-}
-
-static struct mwifiex_adapter *save_adapter;
-static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
-{
- struct sdio_mmc_card *card = adapter->card;
-
- /* TODO card pointer is unprotected. If the adapter is removed
- * physically, sdio core might trigger mwifiex_sdio_remove, before this
- * workqueue is run, which will destroy the adapter struct. When this
- * workqueue eventually exceutes it will dereference an invalid adapter
- * pointer
+ /* Previous save_adapter won't be valid after this. We will cancel
+ * pending work requests.
*/
- mwifiex_recreate_adapter(card);
+ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
+
+ mwifiex_reinit_sw(adapter);
}
/* This function read/write firmware */
@@ -2542,47 +2498,53 @@ done:
static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
+ int drv_info_size;
+ void *drv_info;
- mwifiex_drv_info_dump(adapter);
+ drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
if (card->fw_dump_enh)
mwifiex_sdio_generic_fw_dump(adapter);
else
mwifiex_sdio_fw_dump(adapter);
- mwifiex_upload_device_dump(adapter);
+ mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
}
static void mwifiex_sdio_work(struct work_struct *work)
{
+ struct sdio_mmc_card *card =
+ container_of(work, struct sdio_mmc_card, work);
+
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
- &iface_work_flags))
- mwifiex_sdio_device_dump_work(save_adapter);
+ &card->work_flags))
+ mwifiex_sdio_device_dump_work(card->adapter);
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
- &iface_work_flags))
- mwifiex_sdio_card_reset_work(save_adapter);
+ &card->work_flags))
+ mwifiex_sdio_card_reset_work(card->adapter);
}
-static DECLARE_WORK(sdio_work, mwifiex_sdio_work);
/* This function resets the card */
static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
{
- save_adapter = adapter;
- if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags))
+ struct sdio_mmc_card *card = adapter->card;
+
+ if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
- schedule_work(&sdio_work);
+ schedule_work(&card->work);
}
/* This function dumps FW information */
static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter)
{
- save_adapter = adapter;
- if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
+ struct sdio_mmc_card *card = adapter->card;
+
+ if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
- schedule_work(&sdio_work);
+ set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ schedule_work(&card->work);
}
/* Function to dump SDIO function registers and SDIO scratch registers in case
@@ -2678,6 +2640,33 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
return p - drv_buf;
}
+/* sdio device/function initialization, code is extracted
+ * from init_if handler and register_dev handler.
+ */
+static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ u8 sdio_ireg;
+
+ sdio_claim_host(card->func);
+ sdio_enable_func(card->func);
+ sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
+ sdio_release_host(card->func);
+
+ /* tx_buf_size might be changed to 3584 by firmware during
+ * data transfer, we will reset to default size.
+ */
+ adapter->tx_buf_size = card->tx_buf_size;
+
+ /* Read the host_int_status_reg for ACK the first interrupt got
+ * from the bootloader. If we don't do this we get a interrupt
+ * as soon as we register the irq.
+ */
+ mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg);
+
+ mwifiex_init_sdio_ioport(adapter);
+}
+
static struct mwifiex_if_ops sdio_ops = {
.init_if = mwifiex_init_sdio,
.cleanup_if = mwifiex_cleanup_sdio,
@@ -2703,43 +2692,10 @@ static struct mwifiex_if_ops sdio_ops = {
.reg_dump = mwifiex_sdio_reg_dump,
.device_dump = mwifiex_sdio_device_dump,
.deaggr_pkt = mwifiex_deaggr_sdio_pkt,
+ .up_dev = mwifiex_sdio_up_dev,
};
-/*
- * This function initializes the SDIO driver.
- *
- * This registers the device with SDIO bus.
- */
-static int
-mwifiex_sdio_init_module(void)
-{
- /* Clear the flag in case user removes the card. */
- user_rmmod = 0;
-
- return sdio_register_driver(&mwifiex_sdio);
-}
-
-/*
- * This function cleans up the SDIO driver.
- *
- * The following major steps are followed for cleanup -
- * - Resume the device if its suspended
- * - Disconnect the device if connected
- * - Shutdown the firmware
- * - Unregister the device from SDIO bus.
- */
-static void
-mwifiex_sdio_cleanup_module(void)
-{
- /* Set the flag as user is removing this module. */
- user_rmmod = 1;
- cancel_work_sync(&sdio_work);
-
- sdio_unregister_driver(&mwifiex_sdio);
-}
-
-module_init(mwifiex_sdio_init_module);
-module_exit(mwifiex_sdio_cleanup_module);
+module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index cdbf3a3ac7f9..dccf7fd1aef3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -268,8 +268,8 @@ struct sdio_mmc_card {
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
- /* needed for card reset */
- const struct sdio_device_id *device_id;
+ struct work_struct work;
+ unsigned long work_flags;
};
struct mwifiex_sdio_device {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 125e448712dd..2f1f4d190b28 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -76,7 +76,7 @@ mwifiex_cmd_802_11_rssi_info(struct mwifiex_private *priv,
*/
static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- u16 cmd_action, u16 *action)
+ u16 cmd_action, u32 *action)
{
struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
@@ -89,7 +89,7 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_MAC_CONTROL);
cmd->size =
cpu_to_le16(sizeof(struct host_cmd_ds_mac_control) + S_DS_GEN);
- mac_ctrl->action = cpu_to_le16(*action);
+ mac_ctrl->action = cpu_to_le32(*action);
return 0;
}
@@ -1935,8 +1935,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
mwifiex_dbg(priv->adapter, ERROR,
"0x%x command not supported by firmware\n",
cmd_no);
- return -EOPNOTSUPP;
- }
+ return -EOPNOTSUPP;
+ }
/* Prepare command */
switch (cmd_no) {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 9df0c4dc06ed..d63d163eb1ec 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -824,7 +824,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_RSSI_LOW:
cfg80211_cqm_rssi_notify(priv->netdev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
@@ -839,7 +839,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_RSSI_HIGH:
cfg80211_cqm_rssi_notify(priv->netdev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
@@ -1009,6 +1009,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->event_skb->len -
sizeof(eventcause));
break;
+ /* Debugging event; not used, but let's not print an ERROR for it. */
+ case EVENT_UNKNOWN_DEBUG:
+ mwifiex_dbg(adapter, EVENT, "event: debug\n");
+ break;
default:
mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 644f3a248741..1532ac9cee0b 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1159,8 +1159,6 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
encrypt_key.is_rx_seq_valid = true;
}
} else {
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
- return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index c563160b3b6b..9cf3334adf4d 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -22,7 +22,6 @@
#define USB_VERSION "1.0"
-static u8 user_rmmod;
static struct mwifiex_if_ops usb_ops;
static struct usb_device_id mwifiex_usb_table[] = {
@@ -618,7 +617,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
if (!adapter || !adapter->priv_num)
return;
- if (user_rmmod && !adapter->mfg_mode) {
+ if (card->udev->state != USB_STATE_NOTATTACHED && !adapter->mfg_mode) {
mwifiex_deauthenticate_all(adapter);
mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
@@ -1201,43 +1200,7 @@ static struct mwifiex_if_ops usb_ops = {
.is_port_ready = mwifiex_usb_is_port_ready,
};
-/* This function initializes the USB driver module.
- *
- * This registers the device with USB bus.
- */
-static int mwifiex_usb_init_module(void)
-{
- int ret;
-
- pr_debug("Marvell USB8797 Driver\n");
-
- ret = usb_register(&mwifiex_usb_driver);
- if (ret)
- pr_err("Driver register failed!\n");
- else
- pr_debug("info: Driver registered successfully!\n");
-
- return ret;
-}
-
-/* This function cleans up the USB driver.
- *
- * The following major steps are followed in .disconnect for cleanup:
- * - Resume the device if its suspended
- * - Disconnect the device if connected
- * - Shutdown the firmware
- * - Unregister the device from USB bus.
- */
-static void mwifiex_usb_cleanup_module(void)
-{
- /* set the flag as user is removing this module */
- user_rmmod = 1;
-
- usb_deregister(&mwifiex_usb_driver);
-}
-
-module_init(mwifiex_usb_init_module);
-module_exit(mwifiex_usb_cleanup_module);
+module_usb_driver(mwifiex_usb_driver);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 18fbb96a46e9..b1ab8da121dd 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -146,21 +146,6 @@ int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
}
/*
- * Firmware shutdown complete callback handler.
- *
- * This function sets the hardware status to not ready and wakes up
- * the function waiting on the init wait queue for the firmware
- * shutdown to complete.
- */
-int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter)
-{
- adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY;
- adapter->init_wait_q_woken = true;
- wake_up_interruptible(&adapter->init_wait_q);
- return 0;
-}
-
-/*
* This function sends init/shutdown command
* to firmware.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 085c5b423bdf..19874439ac40 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1200,7 +1200,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
out:
/*
* Enable beaconing again.
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 9832fd50c793..791434de8052 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1349,7 +1349,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
out:
/*
* Enable beaconing again.
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index cd3ab5a9e98d..0d2670a56c4c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
* If the csr_mutex is already held then the _lock variants must
* be used instead.
*/
-static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u16 *value)
{
@@ -66,7 +66,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
*value = le16_to_cpu(reg);
}
-static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u16 *value)
{
@@ -77,16 +77,7 @@ static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
*value = le16_to_cpu(reg);
}
-static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- void *value, const u16 length)
-{
- rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
- USB_VENDOR_REQUEST_IN, offset,
- value, length);
-}
-
-static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u16 value)
{
@@ -96,7 +87,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
&reg, sizeof(reg));
}
-static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u16 value)
{
@@ -106,7 +97,7 @@ static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
&reg, sizeof(reg), REGISTER_TIMEOUT);
}
-static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
void *value, const u16 length)
{
@@ -1170,7 +1161,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* USB devices cannot blindly pass the skb->len as the
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index 95c1d7c0a2f3..256496bfbafb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -72,6 +72,7 @@
#define RF5592 0x000f
#define RF3070 0x3070
#define RF3290 0x3290
+#define RF5350 0x5350
#define RF5360 0x5360
#define RF5362 0x5362
#define RF5370 0x5370
@@ -2286,6 +2287,8 @@ struct mac_iveiv_entry {
#define RFCSR30_RX_H20M FIELD8(0x04)
#define RFCSR30_RX_VCM FIELD8(0x18)
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
+#define RF3322_RFCSR30_TX_H20M FIELD8(0x01)
+#define RF3322_RFCSR30_RX_H20M FIELD8(0x02)
/*
* RFCSR 31:
@@ -2301,6 +2304,12 @@ struct mac_iveiv_entry {
#define RFCSR36_RF_BS FIELD8(0x80)
/*
+ * RFCSR 34:
+ */
+#define RFCSR34_TX0_EXT_PA FIELD8(0x04)
+#define RFCSR34_TX1_EXT_PA FIELD8(0x08)
+
+/*
* RFCSR 38:
*/
#define RFCSR38_RX_LO1_EN FIELD8(0x20)
@@ -2312,6 +2321,18 @@ struct mac_iveiv_entry {
#define RFCSR39_RX_LO2_EN FIELD8(0x80)
/*
+ * RFCSR 41:
+ */
+#define RFCSR41_BIT1 FIELD8(0x01)
+#define RFCSR41_BIT4 FIELD8(0x08)
+
+/*
+ * RFCSR 42:
+ */
+#define RFCSR42_BIT1 FIELD8(0x01)
+#define RFCSR42_BIT4 FIELD8(0x08)
+
+/*
* RFCSR 49:
*/
#define RFCSR49_TX FIELD8(0x3f)
@@ -2324,6 +2345,8 @@ struct mac_iveiv_entry {
* RFCSR 50:
*/
#define RFCSR50_TX FIELD8(0x3f)
+#define RFCSR50_TX0_EXT_PA FIELD8(0x02)
+#define RFCSR50_TX1_EXT_PA FIELD8(0x10)
#define RFCSR50_EP FIELD8(0xc0)
/* bits for RT3593 */
#define RFCSR50_TX_LO1_EN FIELD8(0x20)
@@ -2471,6 +2494,8 @@ enum rt2800_eeprom_word {
* INTERNAL_TX_ALC: 0: disable, 1: enable
* BT_COEXIST: 0: disable, 1: enable
* DAC_TEST: 0: disable, 1: enable
+ * EXTERNAL_TX0_PA: 0: disable, 1: enable (only on RT3352)
+ * EXTERNAL_TX1_PA: 0: disable, 1: enable (only on RT3352)
*/
#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
@@ -2487,6 +2512,8 @@ enum rt2800_eeprom_word {
#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352 FIELD16(0x4000)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352 FIELD16(0x8000)
/*
* EEPROM frequency
@@ -2979,7 +3006,9 @@ struct rt2800_drv_data {
u8 bbp26;
u8 txmixer_gain_24g;
u8 txmixer_gain_5g;
+ u8 max_psdu;
unsigned int tbtt_tick;
+ unsigned int ampdu_factor_cnt[4];
DECLARE_BITMAP(sta_ids, STA_IDS_SIZE);
};
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 4fb79e05078f..8223a1520316 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -373,9 +373,6 @@ static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
int i, count;
rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
- if (rt2x00_get_field32(reg, WLAN_EN))
- return 0;
-
rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
@@ -967,8 +964,6 @@ static void rt2800_update_beacons_setup(struct rt2x00_dev *rt2x00dev)
bcn_num++;
}
- WARN_ON_ONCE(bcn_num != rt2x00dev->intf_beaconing);
-
rt2800_register_write(rt2x00dev, BCN_OFFSET0, (u32) reg);
rt2800_register_write(rt2x00dev, BCN_OFFSET1, (u32) (reg >> 32));
@@ -1019,7 +1014,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
/*
* Dump beacon to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* Write entire beacon with TXWI and padding to register.
@@ -1418,6 +1413,23 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key);
+static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev)
+{
+ u8 i, max_psdu;
+ u32 reg;
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+
+ for (i = 0; i < 3; i++)
+ if (drv_data->ampdu_factor_cnt[i] > 0)
+ break;
+
+ max_psdu = min(drv_data->max_psdu, i);
+
+ rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, max_psdu);
+ rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+}
+
int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
@@ -1426,6 +1438,17 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
/*
+ * Limit global maximum TX AMPDU length to smallest value of all
+ * connected stations. In AP mode this can be suboptimal, but we
+ * do not have a choice if some connected STA is not capable to
+ * receive the same amount of data like the others.
+ */
+ if (sta->ht_cap.ht_supported) {
+ drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]++;
+ rt2800_set_max_psdu_len(rt2x00dev);
+ }
+
+ /*
* Search for the first free WCID entry and return the corresponding
* index.
*/
@@ -1457,9 +1480,16 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(rt2800_sta_add);
-int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid)
+int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+ int wcid = sta_priv->wcid;
+
+ if (sta->ht_cap.ht_supported) {
+ drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]--;
+ rt2800_set_max_psdu_len(rt2x00dev);
+ }
if (wcid > WCID_END)
return 0;
@@ -1902,9 +1932,14 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
rt2x00dev->lna_gain = lna_gain;
}
+static inline bool rt2800_clk_is_20mhz(struct rt2x00_dev *rt2x00dev)
+{
+ return clk_get_rate(rt2x00dev->clk) == 20000000;
+}
+
#define FREQ_OFFSET_BOUND 0x5f
-static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
+static void rt2800_freq_cal_mode1(struct rt2x00_dev *rt2x00dev)
{
u8 freq_offset, prev_freq_offset;
u8 rfcsr, prev_rfcsr;
@@ -2075,7 +2110,9 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- msleep(1);
+
+ usleep_range(1000, 1500);
+
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
@@ -2380,7 +2417,7 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
}
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_adjust_freq_offset(rt2x00dev);
+ rt2800_freq_cal_mode1(rt2x00dev);
if (conf_is_ht40(conf)) {
txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40,
@@ -2570,7 +2607,7 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
- rt2800_adjust_freq_offset(rt2x00dev);
+ rt2800_freq_cal_mode1(rt2x00dev);
if (rf->channel <= 14) {
if (rf->channel == 6)
@@ -2611,7 +2648,7 @@ static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
else
rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
- rt2800_adjust_freq_offset(rt2x00dev);
+ rt2800_freq_cal_mode1(rt2x00dev);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -2676,7 +2713,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_adjust_freq_offset(rt2x00dev);
+ rt2800_freq_cal_mode1(rt2x00dev);
if (rf->channel <= 14) {
int idx = rf->channel-1;
@@ -2723,6 +2760,13 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 59,
r59_non_bt[idx]);
+ } else if (rt2x00_rt(rt2x00dev, RT5350)) {
+ static const char r59_non_bt[] = {0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a,
+ 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06};
+
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_non_bt[idx]);
}
}
}
@@ -2971,7 +3015,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
}
/* TODO proper frequency adjustment */
- rt2800_adjust_freq_offset(rt2x00dev);
+ rt2800_freq_cal_mode1(rt2x00dev);
/* TODO merge with others */
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
@@ -3160,6 +3204,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
case RF3070:
+ case RF5350:
case RF5360:
case RF5362:
case RF5370:
@@ -3178,6 +3223,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rf(rt2x00dev, RF3070) ||
rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3322) ||
+ rt2x00_rf(rt2x00dev, RF5350) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5362) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3185,8 +3231,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_rf(rt2x00dev, RF5390) ||
rt2x00_rf(rt2x00dev, RF5392)) {
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
- rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+ if (rt2x00_rf(rt2x00dev, RF3322)) {
+ rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_TX_H20M,
+ conf_is_ht40(conf));
+ rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_RX_H20M,
+ conf_is_ht40(conf));
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M,
+ conf_is_ht40(conf));
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M,
+ conf_is_ht40(conf));
+ }
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
@@ -3197,11 +3252,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* Change BBP settings
*/
+
if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+
rt2800_bbp_write(rt2x00dev, 27, 0x0);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 27, 0x20);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
} else if (rt2x00_rt(rt2x00dev, RT3593)) {
if (rf->channel > 14) {
/* Disable CCK Packet detection on 5GHz */
@@ -3407,7 +3469,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
}
}
- msleep(1);
+ usleep_range(1000, 1500);
/*
* Clear channel statistic counters
@@ -3419,7 +3481,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* Clear update flag
*/
- if (rt2x00_rt(rt2x00dev, RT3352)) {
+ if (rt2x00_rt(rt2x00dev, RT3352) ||
+ rt2x00_rt(rt2x00dev, RT5350)) {
rt2800_bbp_read(rt2x00dev, 49, &bbp);
rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
rt2800_bbp_write(rt2x00dev, 49, bbp);
@@ -4300,21 +4363,25 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
case RF3053:
case RF3070:
case RF3290:
+ case RF5350:
case RF5360:
case RF5362:
case RF5370:
case RF5372:
case RF5390:
case RF5392:
+ case RF5592:
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
break;
default:
+ WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
+ rt2x00dev->chip.rf);
return;
}
- mdelay(1);
+ usleep_range(1000, 1500);
rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
if (rt2x00dev->rf_channel <= 14) {
@@ -4536,6 +4603,7 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
*/
static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u32 reg;
u16 eeprom;
unsigned int i;
@@ -4678,6 +4746,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ } else if (rt2x00_rt(rt2x00dev, RT5350)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -4702,14 +4772,18 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
- rt2x00_rt(rt2x00dev, RT2883) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
- rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
- else
- rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
- rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
- rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
+ if (rt2x00_is_usb(rt2x00dev)) {
+ drv_data->max_psdu = 3;
+ } else if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
+ rt2x00_rt(rt2x00dev, RT2883) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) {
+ drv_data->max_psdu = 2;
+ } else {
+ drv_data->max_psdu = 1;
+ }
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, drv_data->max_psdu);
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 10);
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 10);
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
rt2800_register_read(rt2x00dev, LED_CFG, &reg);
@@ -4725,8 +4799,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
- rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
- rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 2);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 2);
rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
@@ -4858,10 +4932,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
- rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
+ rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7);
rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
IEEE80211_MAX_RTS_THRESHOLD);
- rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
+ rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 1);
rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
@@ -5319,9 +5393,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 82, 0x62);
- rt2800_bbp_write(rt2x00dev, 83, 0x6a);
-
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+ if (rt2x00_rt(rt2x00dev, RT5350)) {
+ rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+ rt2800_bbp_write(rt2x00dev, 84, 0x9a);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+ }
rt2800_bbp_write(rt2x00dev, 86, 0x38);
@@ -5335,9 +5413,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 104, 0x92);
- rt2800_bbp_write(rt2x00dev, 105, 0x34);
-
- rt2800_bbp_write(rt2x00dev, 106, 0x05);
+ if (rt2x00_rt(rt2x00dev, RT5350)) {
+ rt2800_bbp_write(rt2x00dev, 105, 0x3c);
+ rt2800_bbp_write(rt2x00dev, 106, 0x03);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+ rt2800_bbp_write(rt2x00dev, 106, 0x05);
+ }
rt2800_bbp_write(rt2x00dev, 120, 0x50);
@@ -5362,6 +5444,16 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 143, 0xa2);
rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+
+ if (rt2x00_rt(rt2x00dev, RT5350)) {
+ /* Antenna Software OFDM */
+ rt2800_bbp_write(rt2x00dev, 150, 0x40);
+ /* Antenna Software CCK */
+ rt2800_bbp_write(rt2x00dev, 151, 0x30);
+ rt2800_bbp_write(rt2x00dev, 152, 0xa3);
+ /* Clear previously selected antenna */
+ rt2800_bbp_write(rt2x00dev, 154, 0);
+ }
}
static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev)
@@ -5662,6 +5754,7 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_init_bbp_3290(rt2x00dev);
break;
case RT3352:
+ case RT5350:
rt2800_init_bbp_3352(rt2x00dev);
break;
case RT3390:
@@ -6135,6 +6228,12 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
{
+ int tx0_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags);
+ int tx1_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX1,
+ &rt2x00dev->cap_flags);
+ u8 rfcsr;
+
rt2800_rf_init_calibration(rt2x00dev, 30);
rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
@@ -6170,15 +6269,30 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
+ rfcsr = 0x01;
+ if (!tx0_int_pa)
+ rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1);
+ if (!tx1_int_pa)
+ rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1);
+ rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
- rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
- rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+ rfcsr = 0x52;
+ if (tx0_int_pa) {
+ rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1);
+ }
+ rt2800_rfcsr_write(rt2x00dev, 41, rfcsr);
+ rfcsr = 0x52;
+ if (tx1_int_pa) {
+ rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1);
+ }
+ rt2800_rfcsr_write(rt2x00dev, 42, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
@@ -6186,15 +6300,20 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
- rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
- rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
- rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
- rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
- rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
+ rfcsr = 0x2d;
+ if (!tx0_int_pa)
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1);
+ if (!tx1_int_pa)
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+ rt2800_rfcsr_write(rt2x00dev, 51, (tx0_int_pa ? 0x7f : 0x52));
+ rt2800_rfcsr_write(rt2x00dev, 52, (tx0_int_pa ? 0x00 : 0xc0));
+ rt2800_rfcsr_write(rt2x00dev, 53, (tx0_int_pa ? 0x52 : 0xd2));
+ rt2800_rfcsr_write(rt2x00dev, 54, (tx0_int_pa ? 0x1b : 0xc0));
+ rt2800_rfcsr_write(rt2x00dev, 55, (tx1_int_pa ? 0x7f : 0x52));
+ rt2800_rfcsr_write(rt2x00dev, 56, (tx1_int_pa ? 0x00 : 0xc0));
+ rt2800_rfcsr_write(rt2x00dev, 57, (tx0_int_pa ? 0x52 : 0x49));
+ rt2800_rfcsr_write(rt2x00dev, 58, (tx1_int_pa ? 0x1b : 0xc0));
rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
@@ -6415,7 +6534,7 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
- rt2800_adjust_freq_offset(rt2x00dev);
+ rt2800_freq_cal_mode1(rt2x00dev);
rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
@@ -6446,6 +6565,76 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
/* TODO: enable stream mode support */
}
+static void rt2800_init_rfcsr_5350(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x49);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
+ if (rt2800_clk_is_20mhz(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x1f);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0xd0);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x9b);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0c);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa6);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x82);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x0b);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
+}
+
static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -6641,7 +6830,7 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
msleep(1);
- rt2800_adjust_freq_offset(rt2x00dev);
+ rt2800_freq_cal_mode1(rt2x00dev);
/* Enable DC filter */
if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
@@ -6683,6 +6872,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
case RT3593:
rt2800_init_rfcsr_3593(rt2x00dev);
break;
+ case RT5350:
+ rt2800_init_rfcsr_5350(rt2x00dev);
+ break;
case RT5390:
rt2800_init_rfcsr_5390(rt2x00dev);
break;
@@ -7060,6 +7252,10 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
+ else if (rt2x00_rt(rt2x00dev, RT3352))
+ rf = RF3322;
+ else if (rt2x00_rt(rt2x00dev, RT5350))
+ rf = RF5350;
else
rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -7078,6 +7274,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3290:
case RF3320:
case RF3322:
+ case RF5350:
case RF5360:
case RF5362:
case RF5370:
@@ -7149,7 +7346,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Detect if this device has Bluetooth co-existence.
*/
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST))
+ if (!rt2x00_rt(rt2x00dev, RT3352) &&
+ rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST))
__set_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags);
/*
@@ -7178,6 +7376,22 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
EIRP_MAX_TX_POWER_LIMIT)
__set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags);
+ /*
+ * Detect if device uses internal or external PA
+ */
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+
+ if (rt2x00_rt(rt2x00dev, RT3352)) {
+ if (!rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352))
+ __set_bit(CAPABILITY_INTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags);
+ if (!rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352))
+ __set_bit(CAPABILITY_INTERNAL_PA_TX1,
+ &rt2x00dev->cap_flags);
+ }
+
return 0;
}
@@ -7322,6 +7536,27 @@ static const struct rf_channel rf_vals_3x[] = {
{173, 0x61, 0, 9},
};
+/*
+ * RF value list for rt3xxx with Xtal20MHz
+ * Supports: 2.4 GHz (all) (RF3322)
+ */
+static const struct rf_channel rf_vals_3x_xtal20[] = {
+ {1, 0xE2, 2, 0x14},
+ {2, 0xE3, 2, 0x14},
+ {3, 0xE4, 2, 0x14},
+ {4, 0xE5, 2, 0x14},
+ {5, 0xE6, 2, 0x14},
+ {6, 0xE7, 2, 0x14},
+ {7, 0xE8, 2, 0x14},
+ {8, 0xE9, 2, 0x14},
+ {9, 0xEA, 2, 0x14},
+ {10, 0xEB, 2, 0x14},
+ {11, 0xEC, 2, 0x14},
+ {12, 0xED, 2, 0x14},
+ {13, 0xEE, 2, 0x14},
+ {14, 0xF0, 2, 0x18},
+};
+
static const struct rf_channel rf_vals_5592_xtal20[] = {
/* Channel, N, K, mod, R */
{1, 482, 4, 10, 3},
@@ -7470,6 +7705,13 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
+ * Change default retry settings to values corresponding more closely
+ * to rate[0].count setting of minstrel rate control algorithm.
+ */
+ rt2x00dev->hw->wiphy->retry_short = 2;
+ rt2x00dev->hw->wiphy->retry_long = 2;
+
+ /*
* Initialize all hw fields.
*/
ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS);
@@ -7536,6 +7778,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3290:
case RF3320:
case RF3322:
+ case RF5350:
case RF5360:
case RF5362:
case RF5370:
@@ -7543,7 +7786,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF5390:
case RF5392:
spec->num_channels = 14;
- spec->channels = rf_vals_3x;
+ if (rt2800_clk_is_20mhz(rt2x00dev))
+ spec->channels = rf_vals_3x_xtal20;
+ else
+ spec->channels = rf_vals_3x;
break;
case RF3052:
@@ -7593,7 +7839,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->ht.cap |= rx_chains << IEEE80211_HT_CAP_RX_STBC_SHIFT;
- spec->ht.ampdu_factor = 3;
+ spec->ht.ampdu_factor = (rx_chains > 1) ? 3 : 2;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
if (tx_chains != rx_chains) {
@@ -7669,12 +7915,14 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3053:
case RF3070:
case RF3290:
+ case RF5350:
case RF5360:
case RF5362:
case RF5370:
case RF5372:
case RF5390:
case RF5392:
+ case RF5592:
__set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
break;
}
@@ -7708,6 +7956,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
case RT3390:
case RT3572:
case RT3593:
+ case RT5350:
case RT5390:
case RT5392:
case RT5592:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 83f1a44fb9b4..0a8b4df665fe 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -183,7 +183,7 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
struct ieee80211_key_conf *key);
int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid);
+int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta);
void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
const unsigned int filter_flags);
void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index f38c44061b5b..205a7b8ac8a7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -123,7 +123,7 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
return false;
- tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
+ tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(500));
if (unlikely(tout))
rt2x00_dbg(entry->queue->rt2x00dev,
"TX status timeout for entry %d in queue %d\n",
@@ -436,47 +436,6 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
}
/*
- * Watchdog handlers
- */
-static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
- if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
- rt2x00_warn(rt2x00dev, "TX HW queue 0 timed out, invoke forced kick\n");
-
- rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012);
-
- for (i = 0; i < 10; i++) {
- udelay(10);
- if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q))
- break;
- }
-
- rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
- }
-
- rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
- if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
- rt2x00_warn(rt2x00dev, "TX HW queue 1 timed out, invoke forced kick\n");
-
- rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
-
- for (i = 0; i < 10; i++) {
- udelay(10);
- if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q))
- break;
- }
-
- rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
- }
-
- rt2x00usb_watchdog(rt2x00dev);
-}
-
-/*
* TX descriptor initialization
*/
static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
@@ -643,10 +602,9 @@ static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
break;
- if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
+ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
+ rt2800usb_entry_txstatus_timeout(entry))
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
- else if (rt2800usb_entry_txstatus_timeout(entry))
- rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
else
break;
}
@@ -877,7 +835,6 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
.vco_calibration = rt2800_vco_calibration,
- .watchdog = rt2800usb_watchdog,
.start_queue = rt2800usb_start_queue,
.kick_queue = rt2x00usb_kick_queue,
.stop_queue = rt2800usb_stop_queue,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index aa3d4ceef4ad..26869b3bef45 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
#include <linux/hrtimer.h>
#include <linux/average.h>
#include <linux/usb.h>
+#include <linux/clk.h>
#include <net/mac80211.h>
@@ -169,6 +170,7 @@ struct rt2x00_chip {
#define RT3572 0x3572
#define RT3593 0x3593
#define RT3883 0x3883 /* WSOC */
+#define RT5350 0x5350 /* WSOC 2.4GHz */
#define RT5390 0x5390 /* 2.4GHz */
#define RT5392 0x5392 /* 2.4GHz */
#define RT5592 0x5592
@@ -627,7 +629,7 @@ struct rt2x00lib_ops {
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int (*sta_remove) (struct rt2x00_dev *rt2x00dev,
- int wcid);
+ struct ieee80211_sta *sta);
};
/*
@@ -716,6 +718,8 @@ enum rt2x00_capability_flags {
CAPABILITY_DOUBLE_ANTENNA,
CAPABILITY_BT_COEXIST,
CAPABILITY_VCO_RECALIBRATION,
+ CAPABILITY_INTERNAL_PA_TX0,
+ CAPABILITY_INTERNAL_PA_TX1,
};
/*
@@ -834,6 +838,10 @@ struct rt2x00_dev {
struct mutex csr_mutex;
/*
+ * Mutex to synchronize config and link tuner.
+ */
+ struct mutex conf_mutex;
+ /*
* Current packet filter configuration for the device.
* This contains all currently active FIF_* flags send
* to us by mac80211 during configure_filter().
@@ -1005,6 +1013,9 @@ struct rt2x00_dev {
unsigned int extra_tx_headroom;
struct usb_anchor *anchor;
+
+ /* Clock for System On Chip devices. */
+ struct clk *clk;
};
struct rt2x00_bar_list_entry {
@@ -1389,11 +1400,11 @@ void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop);
*/
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
- enum rt2x00_dump_type type, struct sk_buff *skb);
+ enum rt2x00_dump_type type, struct queue_entry *entry);
#else
static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
enum rt2x00_dump_type type,
- struct sk_buff *skb)
+ struct queue_entry *entry)
{
}
#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
index 6a1f508d472f..350507458ddc 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
@@ -249,6 +249,22 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
*/
rt2x00dev->ops->lib->config(rt2x00dev, &libconf, ieee80211_flags);
+ if (conf->flags & IEEE80211_CONF_PS)
+ set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
+ else
+ clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
+
+ if (conf->flags & IEEE80211_CONF_MONITOR)
+ set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+ else
+ clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+
+ rt2x00dev->curr_band = conf->chandef.chan->band;
+ rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
+ rt2x00dev->tx_power = conf->power_level;
+ rt2x00dev->short_retry = conf->short_frame_max_tx_count;
+ rt2x00dev->long_retry = conf->long_frame_max_tx_count;
+
/*
* Some configuration changes affect the link quality
* which means we need to reset the link tuner.
@@ -271,20 +287,4 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
&rt2x00dev->autowakeup_work,
autowake_timeout - 15);
}
-
- if (conf->flags & IEEE80211_CONF_PS)
- set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
- else
- clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
-
- if (conf->flags & IEEE80211_CONF_MONITOR)
- set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
- else
- clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
-
- rt2x00dev->curr_band = conf->chandef.chan->band;
- rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
- rt2x00dev->tx_power = conf->power_level;
- rt2x00dev->short_retry = conf->short_frame_max_tx_count;
- rt2x00dev->long_retry = conf->long_frame_max_tx_count;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 72ae530e4a3b..964aefdc11f0 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -157,9 +157,10 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
}
void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
- enum rt2x00_dump_type type, struct sk_buff *skb)
+ enum rt2x00_dump_type type, struct queue_entry *entry)
{
struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
+ struct sk_buff *skb = entry->skb;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct sk_buff *skbcopy;
struct rt2x00dump_hdr *dump_hdr;
@@ -196,8 +197,8 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
dump_hdr->type = cpu_to_le16(type);
- dump_hdr->queue_index = skbdesc->entry->queue->qid;
- dump_hdr->entry_index = skbdesc->entry->entry_idx;
+ dump_hdr->queue_index = entry->queue->qid;
+ dump_hdr->entry_index = entry->entry_idx;
dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index eb7b71443657..dd6678109b7e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -87,9 +87,6 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
*/
rt2x00queue_start_queues(rt2x00dev);
rt2x00link_start_tuner(rt2x00dev);
- rt2x00link_start_agc(rt2x00dev);
- if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
- rt2x00link_start_vcocal(rt2x00dev);
/*
* Start watchdog monitoring.
@@ -112,9 +109,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Stop all queues
*/
- rt2x00link_stop_agc(rt2x00dev);
- if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
- rt2x00link_stop_vcocal(rt2x00dev);
rt2x00link_stop_tuner(rt2x00dev);
rt2x00queue_stop_queues(rt2x00dev);
rt2x00queue_flush_queues(rt2x00dev, true);
@@ -369,7 +363,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* Send frame to debugfs immediately, after this call is completed
* we are going to overwrite the skb->cb array.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
/*
* Determine if the frame has been successfully transmitted and
@@ -778,7 +772,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
*/
rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry);
/*
* Initialize RX status information, and send frame
@@ -1319,6 +1313,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
spin_lock_init(&rt2x00dev->irqmask_lock);
mutex_init(&rt2x00dev->csr_mutex);
+ mutex_init(&rt2x00dev->conf_mutex);
INIT_LIST_HEAD(&rt2x00dev->bar_list);
spin_lock_init(&rt2x00dev->bar_list_lock);
@@ -1441,21 +1436,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
-#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
- if (rt2x00_is_usb(rt2x00dev)) {
- usb_kill_anchored_urbs(rt2x00dev->anchor);
- hrtimer_cancel(&rt2x00dev->txstatus_timer);
- cancel_work_sync(&rt2x00dev->rxdone_work);
- cancel_work_sync(&rt2x00dev->txdone_work);
- }
-#endif
- if (rt2x00dev->workqueue)
- destroy_workqueue(rt2x00dev->workqueue);
-
- /*
- * Free the tx status fifo.
- */
- kfifo_free(&rt2x00dev->txstatus_fifo);
/*
* Kill the tx status tasklet.
@@ -1471,6 +1451,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
*/
rt2x00lib_uninitialize(rt2x00dev);
+ if (rt2x00dev->workqueue)
+ destroy_workqueue(rt2x00dev->workqueue);
+
+ /*
+ * Free the tx status fifo.
+ */
+ kfifo_free(&rt2x00dev->txstatus_fifo);
+
/*
* Free extra components
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h
index fb7c349ccc9c..9ddc1681b86a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h
@@ -29,9 +29,10 @@
* Interval defines
*/
#define WATCHDOG_INTERVAL round_jiffies_relative(HZ)
-#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ)
-#define AGC_INTERVAL round_jiffies_relative(4 * HZ)
-#define VCO_INTERVAL round_jiffies_relative(10 * HZ) /* 10 sec */
+#define LINK_TUNE_SECONDS 1
+#define LINK_TUNE_INTERVAL round_jiffies_relative(LINK_TUNE_SECONDS * HZ)
+#define AGC_SECONDS 4
+#define VCO_SECONDS 10
/*
* rt2x00_rate: Per rate device information
@@ -271,30 +272,6 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev);
void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev);
/**
- * rt2x00link_start_agc - Start periodic gain calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00link_start_vcocal - Start periodic VCO calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00link_stop_agc - Stop periodic gain calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00link_stop_vcocal - Stop periodic VCO calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev);
-
-/**
* rt2x00link_register - Initialize link tuning & watchdog functionality
* @rt2x00dev: Pointer to &struct rt2x00_dev.
*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
index 017188e5a736..2010a7715f21 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
@@ -233,15 +233,13 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
struct link *link = &rt2x00dev->link;
/*
- * Link tuning should only be performed when
- * an active sta interface exists. AP interfaces
- * don't need link tuning and monitor mode interfaces
- * should never have to work with link tuners.
+ * Single monitor mode interfaces should never have
+ * work with link tuners.
*/
- if (!rt2x00dev->intf_sta_count)
+ if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)
return;
- /**
+ /*
* While scanning, link tuning is disabled. By default
* the most sensitive settings will be used to make sure
* that all beacons and probe responses will be received
@@ -308,22 +306,11 @@ static void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev)
qual->tx_failed = 0;
}
-static void rt2x00link_tuner(struct work_struct *work)
+static void rt2x00link_tuner_sta(struct rt2x00_dev *rt2x00dev, struct link *link)
{
- struct rt2x00_dev *rt2x00dev =
- container_of(work, struct rt2x00_dev, link.work.work);
- struct link *link = &rt2x00dev->link;
struct link_qual *qual = &rt2x00dev->link.qual;
/*
- * When the radio is shutting down we should
- * immediately cease all link tuning.
- */
- if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
- test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
- return;
-
- /*
* Update statistics.
*/
rt2x00dev->ops->lib->link_stats(rt2x00dev, qual);
@@ -360,6 +347,38 @@ static void rt2x00link_tuner(struct work_struct *work)
*/
if (rt2x00lib_antenna_diversity(rt2x00dev))
rt2x00link_reset_qual(rt2x00dev);
+}
+
+static void rt2x00link_tuner(struct work_struct *work)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(work, struct rt2x00_dev, link.work.work);
+ struct link *link = &rt2x00dev->link;
+
+ /*
+ * When the radio is shutting down we should
+ * immediately cease all link tuning.
+ */
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
+ test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
+ return;
+
+ /* Do not race with rt2x00mac_config(). */
+ mutex_lock(&rt2x00dev->conf_mutex);
+
+ if (rt2x00dev->intf_sta_count)
+ rt2x00link_tuner_sta(rt2x00dev, link);
+
+ if (rt2x00dev->ops->lib->gain_calibration &&
+ (link->count % (AGC_SECONDS / LINK_TUNE_SECONDS)) == 0)
+ rt2x00dev->ops->lib->gain_calibration(rt2x00dev);
+
+ if (rt2x00dev->ops->lib->vco_calibration &&
+ rt2x00_has_cap_vco_recalibration(rt2x00dev) &&
+ (link->count % (VCO_SECONDS / LINK_TUNE_SECONDS)) == 0)
+ rt2x00dev->ops->lib->vco_calibration(rt2x00dev);
+
+ mutex_unlock(&rt2x00dev->conf_mutex);
/*
* Increase tuner counter, and reschedule the next link tuner run.
@@ -408,85 +427,8 @@ static void rt2x00link_watchdog(struct work_struct *work)
WATCHDOG_INTERVAL);
}
-void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev)
-{
- struct link *link = &rt2x00dev->link;
-
- if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
- rt2x00dev->ops->lib->gain_calibration)
- ieee80211_queue_delayed_work(rt2x00dev->hw,
- &link->agc_work,
- AGC_INTERVAL);
-}
-
-void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev)
-{
- struct link *link = &rt2x00dev->link;
-
- if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
- rt2x00dev->ops->lib->vco_calibration)
- ieee80211_queue_delayed_work(rt2x00dev->hw,
- &link->vco_work,
- VCO_INTERVAL);
-}
-
-void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev)
-{
- cancel_delayed_work_sync(&rt2x00dev->link.agc_work);
-}
-
-void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev)
-{
- cancel_delayed_work_sync(&rt2x00dev->link.vco_work);
-}
-
-static void rt2x00link_agc(struct work_struct *work)
-{
- struct rt2x00_dev *rt2x00dev =
- container_of(work, struct rt2x00_dev, link.agc_work.work);
- struct link *link = &rt2x00dev->link;
-
- /*
- * When the radio is shutting down we should
- * immediately cease the watchdog monitoring.
- */
- if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- return;
-
- rt2x00dev->ops->lib->gain_calibration(rt2x00dev);
-
- if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- ieee80211_queue_delayed_work(rt2x00dev->hw,
- &link->agc_work,
- AGC_INTERVAL);
-}
-
-static void rt2x00link_vcocal(struct work_struct *work)
-{
- struct rt2x00_dev *rt2x00dev =
- container_of(work, struct rt2x00_dev, link.vco_work.work);
- struct link *link = &rt2x00dev->link;
-
- /*
- * When the radio is shutting down we should
- * immediately cease the VCO calibration.
- */
- if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- return;
-
- rt2x00dev->ops->lib->vco_calibration(rt2x00dev);
-
- if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- ieee80211_queue_delayed_work(rt2x00dev->hw,
- &link->vco_work,
- VCO_INTERVAL);
-}
-
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
{
- INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
- if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
- INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 13da95a24cf7..ecc96312a370 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -320,6 +320,9 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
*/
rt2x00queue_stop_queue(rt2x00dev->rx);
+ /* Do not race with with link tuner. */
+ mutex_lock(&rt2x00dev->conf_mutex);
+
/*
* When we've just turned on the radio, we want to reprogram
* everything to ensure a consistent state
@@ -335,6 +338,8 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
*/
rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
+ mutex_unlock(&rt2x00dev->conf_mutex);
+
/* Turn RX back on */
rt2x00queue_start_queue(rt2x00dev->rx);
@@ -539,9 +544,8 @@ int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
- return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta_priv->wcid);
+ return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
}
EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
@@ -739,7 +743,8 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
tx_queue_for_each(rt2x00dev, queue)
- rt2x00queue_flush_queue(queue, drop);
+ if (!rt2x00queue_empty(queue))
+ rt2x00queue_flush_queue(queue, drop);
}
EXPORT_SYMBOL_GPL(rt2x00mac_flush);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
index f0178fd4fe5f..da38d254c26f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
@@ -101,7 +101,7 @@ void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
unsigned int i;
for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
- msleep(10);
+ msleep(50);
}
EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 68b620b2462f..e1660b92b20c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -83,7 +83,6 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
*/
skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->entry = entry;
if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
dma_addr_t skb_dma;
@@ -306,13 +305,12 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rt2x00_sta *sta_priv = NULL;
+ u8 density = 0;
if (sta) {
- txdesc->u.ht.mpdu_density =
- sta->ht_cap.ampdu_density;
-
sta_priv = sta_to_rt2x00_sta(sta);
txdesc->u.ht.wcid = sta_priv->wcid;
+ density = sta->ht_cap.ampdu_density;
}
/*
@@ -345,8 +343,6 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
return;
}
- txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
-
/*
* Only one STBC stream is supported for now.
*/
@@ -358,8 +354,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
* frames that are intended to probe a specific tx rate.
*/
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
- !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
+ !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
+ txdesc->u.ht.mpdu_density = density;
+ txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
+ }
/*
* Set 40Mhz mode if necessary (for legacy rates this will
@@ -544,7 +543,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
* All processing on the frame has been completed, this means
* it is now ready to be dumped to userspace through debugfs.
*/
- rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
+ rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
}
static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
@@ -689,7 +688,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
goto out;
}
- skbdesc->entry = entry;
entry->skb = skb;
/*
@@ -774,7 +772,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
skbdesc = get_skb_frame_desc(intf->beacon->skb);
memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->entry = intf->beacon;
/*
* Send beacon to hardware.
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
index 2233b911a1d7..22d18818e850 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
@@ -116,8 +116,6 @@ struct skb_frame_desc {
__le32 iv[2];
dma_addr_t skb_dma;
-
- struct queue_entry *entry;
};
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
index 69a0cdadb07f..29250f79c4a4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
@@ -93,6 +93,10 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
rt2x00dev->irq = platform_get_irq(pdev, 0);
rt2x00dev->name = pdev->dev.driver->name;
+ rt2x00dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rt2x00dev->clk))
+ rt2x00dev->clk = NULL;
+
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
retval = rt2x00soc_alloc_reg(rt2x00dev);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 6005e14213ca..c696f0ad6a68 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
entry->skb->data, entry->skb->len,
rt2x00usb_interrupt_rxdone, entry);
- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -517,7 +513,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
* Wait for a little while to give the driver
* the oppurtunity to recover itself.
*/
- msleep(10);
+ msleep(50);
}
}
EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
@@ -744,6 +740,11 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
+ usb_kill_anchored_urbs(rt2x00dev->anchor);
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
+ cancel_work_sync(&rt2x00dev->rxdone_work);
+ cancel_work_sync(&rt2x00dev->txdone_work);
+
queue_for_each(rt2x00dev, queue)
rt2x00usb_free_entries(queue);
}
@@ -824,10 +825,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
if (retval)
goto exit_free_device;
- retval = rt2x00lib_probe_dev(rt2x00dev);
- if (retval)
- goto exit_free_reg;
-
rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
sizeof(struct usb_anchor),
GFP_KERNEL);
@@ -835,10 +832,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
retval = -ENOMEM;
goto exit_free_reg;
}
-
init_usb_anchor(rt2x00dev->anchor);
+
+ retval = rt2x00lib_probe_dev(rt2x00dev);
+ if (retval)
+ goto exit_free_anchor;
+
return 0;
+exit_free_anchor:
+ usb_kill_anchored_urbs(rt2x00dev->anchor);
+
exit_free_reg:
rt2x00usb_free_reg(rt2x00dev);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 5306a3b2622d..973d418b8113 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -1903,8 +1903,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_read(txd, 5, &word);
rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
- rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
- skbdesc->entry->entry_idx);
+ rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
rt2x00_set_field32(&word, TXD_W5_TX_POWER,
TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
@@ -1989,7 +1988,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* Write entire beacon with descriptor and padding to register.
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 1a29c4d205a5..bb8d307a789f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -1557,7 +1557,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* Write entire beacon with descriptor and padding to register.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index df551b2b56eb..95e3993d8a33 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index f9e2050812ab..a41a29612582 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -1,7 +1,7 @@
/*
* RTL8XXXU mac80211 USB driver - 8188c/8188r/8192c specific subdriver
*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* Portions, notably calibration code:
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index a1178c5d6ad8..80fee699f58a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1,7 +1,7 @@
/*
* RTL8XXXU mac80211 USB driver - 8192e specific subdriver
*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* Portions, notably calibration code:
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index aef373028155..174631132b96 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -1,7 +1,7 @@
/*
* RTL8XXXU mac80211 USB driver - 8723a specific subdriver
*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* Portions, notably calibration code:
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 02b8ddd98a95..c4b86a84a721 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1,7 +1,7 @@
/*
* RTL8XXXU mac80211 USB driver - 8723b specific subdriver
*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* Portions, notably calibration code:
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 3a86675020a2..e544dd1d618c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1,7 +1,7 @@
/*
* RTL8XXXU mac80211 USB driver
*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* Portions, notably calibration code:
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
@@ -48,7 +48,7 @@ static bool rtl8xxxu_dma_aggregation;
static int rtl8xxxu_dma_agg_timeout = -1;
static int rtl8xxxu_dma_agg_pages = -1;
-MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
@@ -6000,6 +6000,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
case 0x8176:
case 0x8178:
case 0x817f:
+ case 0x818b:
untested = 0;
break;
}
@@ -6196,6 +6197,12 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8723au_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192eu_fops},
+/* TP-Link TL-WN822N v4 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0108, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+/* D-Link DWA-131 rev E1, tested by David Patiño */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3319, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
/* Tested by Myckel Habets */
{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192eu_fops},
@@ -6347,6 +6354,13 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+/* found in rtl8192eu vendor driver */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab33, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
#endif
{ }
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 315ccfb2dff5..3d3e2e1ada6f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 4ac928bf1f8e..caea350f05aa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -207,8 +207,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
*highest supported RX rate
*/
if (rtlpriv->dm.supp_phymode_switch) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "Support phy mode switch\n");
+ pr_info("Support phy mode switch\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
@@ -389,8 +388,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
/* <4> set mac->sband to wiphy->sband */
hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
- rtlhal->current_bandtype);
+ pr_err("Err BAND %d\n",
+ rtlhal->current_bandtype);
}
}
/* <5> set hw caps */
@@ -476,6 +475,8 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
(void *)rtl_swlps_rfon_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
(void *)rtl_fwevt_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
+ (void *)rtl_c2hcmd_wq_callback);
}
@@ -490,6 +491,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
cancel_delayed_work(&rtlpriv->works.ps_work);
cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
cancel_delayed_work(&rtlpriv->works.fwevt_wq);
+ cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
}
EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
@@ -544,7 +546,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
* mac80211 hw in _rtl_init_mac80211.
*/
if (rtl_regd_init(hw, rtl_reg_notifier)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "REGD init failed\n");
+ pr_err("REGD init failed\n");
return 1;
}
@@ -557,6 +559,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.rf_lock);
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.entry_list_lock);
+ spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
spin_lock_init(&rtlpriv->locks.fw_ps_lock);
@@ -564,6 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.iqk_lock);
/* <5> init list */
INIT_LIST_HEAD(&rtlpriv->entry_list);
+ INIT_LIST_HEAD(&rtlpriv->c2hcmd_list);
rtlmac->link_state = MAC80211_NOLINK;
@@ -576,6 +580,7 @@ EXPORT_SYMBOL_GPL(rtl_init_core);
void rtl_deinit_core(struct ieee80211_hw *hw)
{
+ rtl_c2hcmd_launcher(hw, 0);
}
EXPORT_SYMBOL_GPL(rtl_deinit_core);
@@ -1694,8 +1699,7 @@ void rtl_watchdog_wq_callback(void *data)
* we should reconnect this AP
*/
if (rtlpriv->link_info.roam_times >= 5) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "AP off, try to reconnect now\n");
+ pr_err("AP off, try to reconnect now\n");
rtlpriv->link_info.roam_times = 0;
ieee80211_connection_loss(
rtlpriv->mac80211.vif);
@@ -1731,6 +1735,93 @@ void rtl_fwevt_wq_callback(void *data)
rtlpriv->cfg->ops->c2h_command_handle(hw);
}
+
+void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flags;
+ struct rtl_c2hcmd *c2hcmd;
+
+ c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL);
+
+ if (!c2hcmd)
+ goto label_err;
+
+ c2hcmd->val = kmalloc(len, GFP_KERNEL);
+
+ if (!c2hcmd->val)
+ goto label_err2;
+
+ /* fill data */
+ c2hcmd->tag = tag;
+ c2hcmd->len = len;
+ memcpy(c2hcmd->val, val, len);
+
+ /* enqueue */
+ spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
+
+ list_add_tail(&c2hcmd->list, &rtlpriv->c2hcmd_list);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
+
+ /* wake up wq */
+ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0);
+
+ return;
+
+label_err2:
+ kfree(c2hcmd);
+
+label_err:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING,
+ "C2H cmd enqueue fail.\n");
+}
+EXPORT_SYMBOL(rtl_c2hcmd_enqueue);
+
+void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flags;
+ struct rtl_c2hcmd *c2hcmd;
+ int i;
+
+ for (i = 0; i < 200; i++) {
+ /* dequeue a task */
+ spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
+
+ c2hcmd = list_first_entry_or_null(&rtlpriv->c2hcmd_list,
+ struct rtl_c2hcmd, list);
+
+ if (c2hcmd)
+ list_del(&c2hcmd->list);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
+
+ /* do it */
+ if (!c2hcmd)
+ break;
+
+ if (rtlpriv->cfg->ops->c2h_content_parsing && exec)
+ rtlpriv->cfg->ops->c2h_content_parsing(hw,
+ c2hcmd->tag, c2hcmd->len, c2hcmd->val);
+
+ /* free */
+ kfree(c2hcmd->val);
+
+ kfree(c2hcmd);
+ }
+}
+
+void rtl_c2hcmd_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+ struct rtl_works,
+ c2hcmd_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+
+ rtl_c2hcmd_launcher(hw, 1);
+}
+
void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
{
struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
@@ -1886,8 +1977,7 @@ void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
(u8 *)&iotype);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
+ pr_err("Unknown Scan Backup operation.\n");
break;
}
}
@@ -2086,65 +2176,6 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
}
EXPORT_SYMBOL_GPL(rtl_recognize_peer);
-/*********************************************************
- *
- * sysfs functions
- *
- *********************************************************/
-static ssize_t rtl_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct ieee80211_hw *hw = dev_get_drvdata(d);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel);
-}
-
-static ssize_t rtl_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ieee80211_hw *hw = dev_get_drvdata(d);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "%s is not in hex or decimal form.\n", buf);
- } else {
- rtlpriv->dbg.global_debuglevel = val;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "debuglevel:%x\n",
- rtlpriv->dbg.global_debuglevel);
- }
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- rtl_show_debug_level, rtl_store_debug_level);
-
-static struct attribute *rtl_sysfs_entries[] = {
-
- &dev_attr_debug_level.attr,
-
- NULL
-};
-
-/*
- * "name" is folder name witch will be
- * put in device directory like :
- * sys/devices/pci0000:00/0000:00:1c.4/
- * 0000:06:00.0/rtl_sysfs
- */
-struct attribute_group rtl_attribute_group = {
- .name = "rtlsysfs",
- .attrs = rtl_sysfs_entries,
-};
-EXPORT_SYMBOL_GPL(rtl_attribute_group);
-
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 74233d601a90..02ff0c5624a7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -136,6 +136,9 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid);
void rtl_watchdog_wq_callback(void *data);
void rtl_fwevt_wq_callback(void *data);
+void rtl_c2hcmd_wq_callback(void *data);
+void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
+void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val);
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
@@ -148,7 +151,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
-extern struct attribute_group rtl_attribute_group;
void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
index d1454d4f08a5..20582df0465c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
@@ -1,4 +1,8 @@
-btcoexist-objs := halbtc8723b2ant.o \
+btcoexist-objs := halbtc8192e2ant.o \
+ halbtc8723b1ant.o \
+ halbtc8723b2ant.o \
+ halbtc8821a1ant.o \
+ halbtc8821a2ant.o \
halbtcoutsrc.o \
rtl_btc.o
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index a30af6cc21f3..ffa1f438424d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -59,9 +59,11 @@ static u32 glcoex_ver_8192e_2ant = 0x34;
/**************************************************************
* local function start with halbtc8192e2ant_
**************************************************************/
-static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
int btrssi = 0;
u8 btrssi_state = coex_sta->pre_bt_rssi_state;
@@ -70,84 +72,46 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
if (level_num == 2) {
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = LOW\n");
- if (btrssi >= (rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ if (btrssi >=
+ (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
btrssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to High\n");
- } else {
+ else
btrssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Low\n");
- }
} else {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = HIGH\n");
- if (btrssi < rssi_thresh) {
+ if (btrssi < rssi_thresh)
btrssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Low\n");
- } else {
+ else
btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at High\n");
- }
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
-
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = LOW\n");
- if (btrssi >= (rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ if (btrssi >=
+ (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
btrssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Medium\n");
- } else {
+ else
btrssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Low\n");
- }
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
(coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_STAY_MEDIUM)) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi pre state = MEDIUM\n");
if (btrssi >= (rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
btrssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to High\n");
- } else if (btrssi < rssi_thresh) {
+ else if (btrssi < rssi_thresh)
btrssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Low\n");
- } else {
+ else
btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Medium\n");
- }
} else {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = HIGH\n");
- if (btrssi < rssi_thresh1) {
+ if (btrssi < rssi_thresh1)
btrssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Medium\n");
- } else {
+ else
btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at High\n");
- }
}
}
@@ -160,6 +124,7 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
u8 index, u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
int wifirssi = 0;
u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index];
@@ -171,30 +136,20 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_LOW)) {
if (wifirssi >= (rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
wifirssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to High\n");
- } else {
+ else
wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Low\n");
- }
} else {
- if (wifirssi < rssi_thresh) {
+ if (wifirssi < rssi_thresh)
wifirssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Low\n");
- } else {
+ else
wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at High\n");
- }
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -203,43 +158,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_LOW)) {
if (wifirssi >= (rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
wifirssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Medium\n");
- } else {
+ else
wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Low\n");
- }
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_MEDIUM)) {
if (wifirssi >= (rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
wifirssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to High\n");
- } else if (wifirssi < rssi_thresh) {
+ else if (wifirssi < rssi_thresh)
wifirssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Low\n");
- } else {
+ else
wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Medium\n");
- }
} else {
- if (wifirssi < rssi_thresh1) {
+ if (wifirssi < rssi_thresh1)
wifirssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Medium\n");
- } else {
+ else
wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at High\n");
- }
}
}
@@ -250,6 +188,7 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static bool pre_bt_disabled;
static u32 bt_disable_cnt;
bool bt_active = true, bt_disabled = false;
@@ -273,26 +212,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
@@ -469,6 +408,7 @@ static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
@@ -488,12 +428,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -501,15 +441,16 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
h2c_parameter[0] |= BIT0; /* trigger */
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -572,6 +513,7 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
bool bt_hson = false;
@@ -581,8 +523,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
if (!bt_link_info->bt_link_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "No BT link exists!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "No BT link exists!!!\n");
return algorithm;
}
@@ -597,27 +539,29 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (numdiffprofile == 1) {
if (bt_link_info->sco_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "SCO only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "HID only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "HID only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "A2DP only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "A2DP only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "PAN(HS) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "PAN(HS) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "PAN(EDR) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "PAN(EDR) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR;
}
@@ -626,21 +570,23 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (numdiffprofile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + A2DP ==> SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + A2DP ==> SCO\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
@@ -649,38 +595,44 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
if (stack_info->num_of_hid >= 2) {
- btc_alg_dbg(ALGO_TRACE,
- "HID*2 + A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID*2 + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "HID + A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
}
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -690,30 +642,34 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + HID + A2DP ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + HID + A2DP ==> HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + A2DP + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "SCO + A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -723,13 +679,15 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "HID + A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -741,12 +699,14 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- btc_alg_dbg(ALGO_TRACE,
- "ErrorSCO+HID+A2DP+PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "ErrorSCO+HID+A2DP+PAN(HS)\n");
} else {
- btc_alg_dbg(ALGO_TRACE,
- "SCO+HID+A2DP+PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO+HID+A2DP+PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -760,6 +720,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
u8 dac_swinglvl)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
/* There are several type of dacswing
@@ -767,10 +728,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swinglvl;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -778,13 +739,14 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
u8 dec_btpwr_lvl)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = dec_btpwr_lvl;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
- dec_btpwr_lvl, h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+ dec_btpwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -792,15 +754,17 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
bool force_exec, u8 dec_btpwr_lvl)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power level = %d\n",
- (force_exec ? "force to" : ""), dec_btpwr_lvl);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Dec BT power level = %d\n",
+ force_exec ? "force to" : "", dec_btpwr_lvl);
coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
}
halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
@@ -810,6 +774,7 @@ static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
bool enable_autoreport)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = 0;
@@ -817,10 +782,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
if (enable_autoreport)
h2c_parameter[0] |= BIT0;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_autoreport ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -829,17 +794,19 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_autoreport)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
- ((enable_autoreport) ? "Enabled" : "Disabled"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
+ ((enable_autoreport) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_autoreport;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -853,16 +820,18 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
bool force_exec, u8 fw_dac_swinglvl)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swinglvl);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swinglvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -878,10 +847,12 @@ static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
bool rx_rf_shrink_on)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -889,8 +860,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
if (btcoexist->initilized) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -901,17 +872,19 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""),
+ ((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -926,10 +899,11 @@ static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
u32 level)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -947,22 +921,24 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swingon,
u32 dac_swinglvl)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
coex_dm->cur_dac_swing_on = dac_swingon;
coex_dm->cur_dac_swing_lvl = dac_swinglvl;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl);
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -978,10 +954,12 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
bool agc_table_en)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
/* BB AGC Gain Table */
if (agc_table_en) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table On!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -989,8 +967,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
} else {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table Off!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -1003,17 +981,19 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
bool force_exec, bool agc_table_en)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- ((agc_table_en) ? "Enable" : "Disable"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ ((agc_table_en) ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en,
- coex_dm->cur_agc_table_en);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -1027,20 +1007,22 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -1049,30 +1031,32 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
- (force_exec ? "force to" : ""), val0x6c0);
- btc_alg_dbg(ALGO_TRACE_SW,
- "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- val0x6c4, val0x6c8, val0x6cc);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+ (force_exec ? "force to" : ""), val0x6c0);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1121,14 +1105,15 @@ static void btc8192e2ant_coex_tbl_w_type(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
bool enable)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1136,18 +1121,20 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d ",
- coex_dm->pre_ignore_wlan_act);
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "bCurIgnoreWlanAct = %d!!\n",
- coex_dm->cur_ignore_wlan_act);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d ",
+ coex_dm->pre_ignore_wlan_act);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1161,6 +1148,8 @@ static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
u8 byte2, u8 byte3, u8 byte4, u8 byte5)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
u8 h2c_parameter[5] = {0};
h2c_parameter[0] = byte1;
@@ -1175,11 +1164,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1203,20 +1192,22 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1340,11 +1331,12 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
u8 sstype)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 mimops = BTC_MIMO_PS_DYNAMIC;
u32 disra_mask = 0x0;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], REAL set SS Type = %d\n", sstype);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], REAL set SS Type = %d\n", sstype);
disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
coex_dm->curra_masktype);
@@ -1376,9 +1368,11 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
bool force_exec, u8 new_sstype)
{
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], %s Switch SS Type = %d\n",
- (force_exec ? "force to" : ""), new_sstype);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Switch SS Type = %d\n",
+ (force_exec ? "force to" : ""), new_sstype);
coex_dm->cur_sstype = new_sstype;
if (!force_exec) {
@@ -1440,6 +1434,7 @@ static void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool common = false, wifi_connected = false, wifi_busy = false;
bool bt_hson = false, low_pwr_disable = false;
@@ -1459,8 +1454,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non-connected idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) ||
@@ -1496,8 +1491,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- btc_alg_dbg(ALGO_TRACE,
- "Wifi connected + BT non connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi connected + BT non connected-idle!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 2);
@@ -1524,8 +1519,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hson)
return false;
- btc_alg_dbg(ALGO_TRACE,
- "Wifi connected + BT connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi connected + BT connected-idle!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 2);
@@ -1550,12 +1545,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- btc_alg_dbg(ALGO_TRACE,
- "Wifi Connected-Busy + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "Wifi Connected-Idle + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi Connected-Idle + BT Busy!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 1);
@@ -1581,9 +1576,11 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1678,8 +1675,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 71);
@@ -1782,9 +1779,11 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 6);
@@ -1873,8 +1872,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 2);
@@ -1968,9 +1967,11 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 7);
@@ -2059,8 +2060,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 3);
@@ -2155,6 +2156,7 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
bool sco_hid, bool tx_pause,
u8 max_interval)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static int up, dn, m, n, wait_cnt;
/* 0: no change, +1: increase WiFi duration,
* -1: decrease WiFi duration
@@ -2162,13 +2164,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
int result;
u8 retry_cnt = 0;
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2181,11 +2183,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 14);
coex_dm->tdma_adj_type = 14;
- } else if (max_interval == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
} else {
halbtc8192e2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2203,11 +2200,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 10);
coex_dm->tdma_adj_type = 10;
- } else if (max_interval == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
} else {
halbtc8192e2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2227,11 +2219,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 6);
coex_dm->tdma_adj_type = 6;
- } else if (max_interval == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
} else {
halbtc8192e2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2249,11 +2236,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 2);
coex_dm->tdma_adj_type = 2;
- } else if (max_interval == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
} else {
halbtc8192e2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2272,11 +2254,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_cnt = coex_sta->bt_retry_cnt;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_cnt = %d\n", retry_cnt);
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
- up, dn, m, n, wait_cnt);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_cnt = %d\n", retry_cnt);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+ up, dn, m, n, wait_cnt);
result = 0;
wait_cnt++;
/* no retry in the last 2-second duration */
@@ -2293,8 +2275,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex]Increase wifi duration!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]Increase wifi duration!!\n");
}
} else if (retry_cnt <= 3) {
up--;
@@ -2317,8 +2299,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_cnt = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "Reduce wifi duration for retry<3\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Reduce wifi duration for retry<3\n");
}
} else {
if (wait_cnt == 1)
@@ -2334,12 +2316,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_cnt = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "Decrease wifi duration for retryCounter>3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Decrease wifi duration for retryCounter>3!!\n");
}
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
btc8192e_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2355,11 +2337,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, ");
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type dismatch!!!, ");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2370,8 +2352,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
true,
coex_dm->tdma_adj_type);
else
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -2390,7 +2372,7 @@ static void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist)
btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
(btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
@@ -2452,7 +2434,7 @@ static void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
(btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
@@ -2506,7 +2488,7 @@ static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2564,19 +2546,20 @@ static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
bool long_dist = false;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
if ((btrssi_state == BTC_RSSI_STATE_LOW ||
btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
(wifirssi_state == BTC_RSSI_STATE_LOW ||
wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
long_dist = true;
}
if (long_dist) {
@@ -2656,7 +2639,7 @@ static void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2717,7 +2700,7 @@ static void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2778,7 +2761,7 @@ static void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2836,7 +2819,7 @@ static void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2899,7 +2882,7 @@ static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
@@ -2963,7 +2946,7 @@ static void btc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -3024,7 +3007,7 @@ static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
u32 wifi_bw;
wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+ btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -3079,107 +3062,108 @@ static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], return for Manual CTRL <===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
halbtc8192e2ant_action_bt_inquiry(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (halbtc8192e2ant_is_common_action(btcoexist)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant common\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8192E_2ANT_COEX_ALGO_SCO:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = SCO\n");
halbtc8192e2ant_action_sco(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
halbtc8192e2ant_action_sco_pan(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID\n");
halbtc8192e2ant_action_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = A2DP\n");
halbtc8192e2ant_action_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN(EDR)\n");
halbtc8192e2ant_action_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = HS mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HS mode\n");
halbtc8192e2ant_action_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN+A2DP\n");
halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
halbtc8192e2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = HID+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID+A2DP\n");
halbtc8192e2ant_action_hid_a2dp(btcoexist);
break;
default:
- btc_alg_dbg(ALGO_TRACE,
- "Action 2-Ant, algorithm = unknown!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = unknown!!\n");
/* halbtc8192e2ant_coex_alloff(btcoexist); */
break;
}
@@ -3190,11 +3174,12 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
bool backup)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u16 u16tmp = 0;
u8 u8tmp = 0;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
if (backup) {
/* backup rf 0x1e value */
@@ -3277,8 +3262,10 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
halbtc8192e2ant_init_coex_dm(btcoexist);
}
@@ -3298,13 +3285,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
u32 fw_ver = 0, bt_patch_ver = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ "\r\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ===========[Under Manual Control]===========");
+ "\r\n ===========[Under Manual Control]===========");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ "\r\n ==========================================");
}
if (!board_info->bt_exist) {
@@ -3313,43 +3300,43 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
board_info->pg_ant_num, board_info->btdm_ant_num);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
+ "BT stack/ hci ext ver",
((stack_info->profile_notified) ? "Yes" : "No"),
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsMode(HsChnl)",
- wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
+ "Dot11 channel / HsMode(HsChnl)",
+ wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
+ "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
@@ -3357,7 +3344,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
((!wifi_busy) ? "idle" :
@@ -3365,7 +3352,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
"uplink" : "downlink")));
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt]",
+ "BT [status/ rssi/ retryCnt]",
((btcoexist->bt_info.bt_disabled) ? ("disabled") :
((coex_sta->c2h_bt_inquiry_page) ?
("inquiry/page scan") :
@@ -3376,127 +3363,127 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+ "SCO/HID/PAN/A2DP", stack_info->sco_exist,
stack_info->hid_exist, stack_info->pan_exist,
stack_info->a2dp_exist);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
bt_info_ext = coex_sta->bt_info_ext;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
+ "BT Info A2DP rate",
(bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- GLBtInfoSrc8192e2Ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ "\r\n %-35s = %7ph(%d)",
+ GLBtInfoSrc8192e2Ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
- coex_dm->cur_sstype);
+ coex_dm->cur_sstype);
/* Sw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
+ "============[Sw mechanism]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
- coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask",
- btcoexist->bt_info.ra_mask);
+ btcoexist->bt_info.ra_mask);
/* Fw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA", coex_dm->ps_tdma_para,
- ps_tdma_case, coex_dm->auto_tdma_adjust);
+ "\r\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para,
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "DecBtPwr/ IgnWlanAct",
- coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+ "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
/* Hw setting */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ "============[Hw setting]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
- coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
- coex_dm->backup_ampdu_maxtime);
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
+ coex_dm->backup_ampdu_maxtime);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+ "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778",
- u8tmp[0]);
+ u8tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+ "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+ "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)",
- u32tmp[0]);
+ u32tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(hp rx[31:16]/tx[15:0])",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ "0x770(hp rx[31:16]/tx[15:0])",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(lp rx[31:16]/tx[15:0])",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ "0x774(lp rx[31:16]/tx[15:0])",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
halbtc8192e2ant_monitor_bt_ctr(btcoexist);
#endif
@@ -3505,54 +3492,63 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_IPS_ENTER == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8192e2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
}
}
void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_LPS_ENABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_SCAN_START == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_ASSOCIATE_START == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
u8 wifi_center_chnl;
@@ -3563,11 +3559,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
else
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -3587,10 +3583,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3598,14 +3594,17 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (type == BTC_PACKET_DHCP)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmp_buf, u8 length)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 bt_info = 0;
u8 i, rsp_source = 0;
bool bt_busy = false, limited_dig = false;
@@ -3618,19 +3617,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1)
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
else
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3647,8 +3646,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- btc_alg_dbg(ALGO_TRACE,
- "bit1, send wifi BW&Chnl to BT!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bit1, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3664,8 +3663,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3)) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- btc_alg_dbg(ALGO_TRACE,
- "bit3, BT NOT ignore Wlan active!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bit3, BT NOT ignore Wlan active!\n");
halbtc8192e2ant_IgnoreWlanAct(btcoexist,
FORCE_EXEC,
false);
@@ -3723,25 +3722,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Non-Connected idle!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
(bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3769,7 +3768,9 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3777,34 +3778,35 @@ void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- btc_alg_dbg(ALGO_TRACE,
- "=======================Periodical=======================\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "=======================Periodical=======================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- btc_iface_dbg(INTF_INIT,
- "************************************************\n");
- btc_iface_dbg(INTF_INIT,
- "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- btc_iface_dbg(INTF_INIT,
- "BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- btc_iface_dbg(INTF_INIT,
- "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- btc_iface_dbg(INTF_INIT,
- "************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 16add42a62af..d67bbfb6ad8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -60,9 +60,11 @@ static u32 glcoex_ver_8723b_1ant = 0x47;
/***************************************************************
* local function start with halbtc8723b1ant_
***************************************************************/
-static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8723b1ant_bt_rssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
s32 bt_rssi = 0;
u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
@@ -74,28 +76,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -104,12 +106,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -118,26 +120,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -151,6 +153,7 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
u8 index, u8 level_num,
u8 rssi_thresh, u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
s32 wifi_rssi = 0;
u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
@@ -165,28 +168,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -197,12 +200,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -211,26 +214,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -418,15 +421,16 @@ static void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
h2c_parameter[0] |= BIT0; /* trigger*/
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -513,6 +517,7 @@ static void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool bt_hs_on = false;
u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
@@ -521,8 +526,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -537,27 +542,29 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
if (numdiffprofile == 1) {
if (bt_link_info->sco_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR;
}
@@ -566,21 +573,23 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (numdiffprofile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -588,32 +597,36 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -623,31 +636,35 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -657,13 +674,15 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -675,11 +694,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -693,6 +714,7 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[6] = {0};
h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */
@@ -706,9 +728,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */
}
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -732,20 +754,22 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -754,10 +778,12 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c4, u32 val0x6c8,
u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6cc);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
@@ -823,14 +849,15 @@ static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
bool enable)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -838,16 +865,18 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -862,6 +891,7 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
u8 byte1, u8 byte2, u8 byte3,
u8 byte4, u8 byte5)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[5] = {0};
u8 real_byte1 = byte1, real_byte5 = byte5;
bool ap_enable = false;
@@ -871,8 +901,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
if (ap_enable) {
if ((byte1 & BIT4) && !(byte1 & BIT5)) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], FW for 1Ant AP mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW for 1Ant AP mode\n");
real_byte1 &= ~BIT4;
real_byte1 |= BIT5;
@@ -893,8 +923,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = real_byte5;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
h2c_parameter[0],
h2c_parameter[1] << 24 |
h2c_parameter[2] << 16 |
@@ -918,22 +948,24 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
bool force_exec,
u8 lps_val, u8 rpwm_val)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -947,8 +979,10 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1153,6 +1187,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_busy = false;
u8 rssi_adjust_val = 0;
@@ -1163,13 +1198,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on)
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ******** TDMA(on, %d) *********\n",
- coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ******** TDMA(on, %d) *********\n",
+ coex_dm->cur_ps_tdma);
else
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ******** TDMA(off, %d) ********\n",
- coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ******** TDMA(off, %d) ********\n",
+ coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1374,6 +1409,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool commom = false, wifi_connected = false;
bool wifi_busy = false;
@@ -1383,45 +1419,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (!wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (!wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else {
if (wifi_busy)
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
else
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
commom = false;
}
@@ -1432,6 +1468,7 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
u8 wifi_status)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static s32 up, dn, m, n, wait_count;
/* 0: no change, +1: increase WiFi duration,
* -1: decrease WiFi duration
@@ -1440,8 +1477,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
u8 retry_count = 0, bt_info_ext;
bool wifi_busy = false;
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
wifi_busy = true;
@@ -1470,8 +1507,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1502,8 +1539,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
up--;
@@ -1526,8 +1563,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
if (wait_count == 1)
@@ -1543,8 +1580,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1589,9 +1626,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
}
} else { /*no change */
/*if busy / idle change */
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex],********* TDMA(on, %d) ********\n",
- coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex],********* TDMA(on, %d) ********\n",
+ coex_dm->cur_ps_tdma);
}
if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
@@ -1807,7 +1844,7 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(btcoexist, 2, 28, 0);
if (bt_link_info->hid_only) { /*HID */
btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status);
@@ -1835,16 +1872,8 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
}
} else if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) { /*HID+A2DP */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->auto_tdma_adjust = false;
- } else { /*for low BT RSSI*/
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->auto_tdma_adjust = false;
- }
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ coex_dm->auto_tdma_adjust = false;
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
/*PAN(OPP,FTP), HID+PAN(OPP,FTP) */
@@ -1993,19 +2022,20 @@ static void halbtc8723b1ant_action_wifi_connected_special_packet(
static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_busy = false;
bool scan = false, link = false, roam = false;
bool under_4way = false, ap_enable = false;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
&under_4way);
if (under_4way) {
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -2019,8 +2049,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
else
halbtc8723b1ant_action_wifi_connected_special_packet(
btcoexist);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -2081,6 +2111,7 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
algorithm = halbtc8723b1ant_action_algorithm(btcoexist);
@@ -2089,58 +2120,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!halbtc8723b1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8723B_1ANT_COEX_ALGO_SCO:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = SCO\n");
halbtc8723b1ant_action_sco(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID\n");
halbtc8723b1ant_action_hid(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP\n");
halbtc8723b1ant_action_a2dp(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
halbtc8723b1ant_action_pan_edr(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HS mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HS mode\n");
halbtc8723b1ant_action_pan_hs(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
halbtc8723b1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
halbtc8723b1ant_action_hid_a2dp(btcoexist);
break;
default:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
break;
}
coex_dm->pre_algorithm = coex_dm->cur_algorithm;
@@ -2149,6 +2180,7 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool wifi_connected = false, bt_hs_on = false;
bool increase_scan_dev_num = false;
@@ -2158,24 +2190,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
@@ -2210,16 +2242,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
wifi_rssi_state =
halbtc8723b1ant_wifi_rssi_state(btcoexist,
1, 2, 30, 0);
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8723b1ant_limited_tx(btcoexist,
- NORMAL_EXEC,
- 1, 1, 1, 1);
- } else {
- halbtc8723b1ant_limited_tx(btcoexist,
- NORMAL_EXEC,
- 1, 1, 1, 1);
- }
+ halbtc8723b1ant_limited_tx(btcoexist,
+ NORMAL_EXEC, 1, 1, 1, 1);
} else {
halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
0, 0, 0, 0);
@@ -2254,8 +2278,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2288,12 +2312,13 @@ static void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
bool backup)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u32 u32tmp = 0;
u8 u8tmp = 0;
u32 cnt_bt_cal_chk = 0;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (backup) {/* backup rf 0x1e value */
coex_dm->backup_arfr_cnt1 =
@@ -2320,13 +2345,13 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
cnt_bt_cal_chk++;
if (u32tmp & BIT0) {
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
cnt_bt_cal_chk);
mdelay(50);
} else {
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
cnt_bt_cal_chk);
break;
}
@@ -2370,8 +2395,10 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2398,19 +2425,19 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
u32 fw_ver = 0, bt_patch_ver = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ "\r\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Under Manual Control]==========");
+ "\r\n ============[Under Manual Control]==========");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ "\r\n ==========================================");
}
if (btcoexist->stop_coex_dm) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Coex is STOPPED]============");
+ "\r\n ============[Coex is STOPPED]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ "\r\n ==========================================");
}
if (!board_info->bt_exist) {
@@ -2419,45 +2446,45 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
- "Ant PG Num/ Ant Mech/ Ant Pos:",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
+ "Ant PG Num/ Ant Mech/ Ant Pos:",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info",
- coex_dm->wifi_chnl_info);
+ "H2C Wifi inform bt chnl Info",
+ coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G,
&wifi_under_5g);
@@ -2467,106 +2494,106 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
&wifi_traffic_dir);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
- ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
- (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
- ((!wifi_busy) ? "idle" :
- ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
- "uplink" : "downlink")));
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+ ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
+ ((!wifi_busy) ? "idle" :
+ ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+ "uplink" : "downlink")));
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
&wifi_link_status);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
- "sta/vwifi/hs/p2pGo/p2pGc",
- ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+ "sta/vwifi/hs/p2pGo/p2pGc",
+ ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt]",
- ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
- ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
- ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "non-connected idle" :
- ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "connected-idle" : "busy")))),
+ "BT [status/ rssi/ retryCnt]",
+ ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+ ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
+ ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) ?
+ "non-connected idle" :
+ ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) ?
+ "connected-idle" : "busy")))),
coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
- bt_link_info->hid_exist, bt_link_info->pan_exist,
- bt_link_info->a2dp_exist);
+ "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+ bt_link_info->hid_exist, bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
bt_info_ext = coex_sta->bt_info_ext;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- GLBtInfoSrc8723b1Ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ "\r\n %-35s = %7ph(%d)",
+ GLBtInfoSrc8723b1Ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/%s, (0x%x/0x%x)",
- "PS state, IPS/LPS, (lps/rpwm)",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
- btcoexist->bt_info.lps_val,
- btcoexist->bt_info.rpwm_val);
+ "\r\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
if (!btcoexist->manual_control) {
/* Sw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
+ "============[Sw mechanism]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/",
- "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
+ "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ",
- "DelBA/ BtCtrlAgg/ AggSize",
+ "DelBA/ BtCtrlAgg/ AggSize",
(btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
(btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
btcoexist->bt_info.agg_buf_size);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
- "Rate Mask", btcoexist->bt_info.ra_mask);
+ "Rate Mask", btcoexist->bt_info.ra_mask);
/* Fw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ "============[Fw mechanism]============");
pstdmacase = coex_dm->cur_ps_tdma;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
+ "\r\n %-35s = %5ph case-%d (auto:%d)",
"PS TDMA", coex_dm->ps_tdma_para,
pstdmacase, coex_dm->auto_tdma_adjust);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ",
- "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
+ "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
- "Latest error condition(should be 0)",
+ "Latest error condition(should be 0)",
coex_dm->error_condition);
}
/* Hw setting */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ "============[Hw setting]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
coex_dm->backup_ampdu_max_time);
@@ -2575,49 +2602,49 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
- (u32tmp[1] & 0x3e000000) >> 25);
+ "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+ (u32tmp[1] & 0x3e000000) >> 25);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x948/ 0x67[5] / 0x765",
- u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
- u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
- ((u8tmp[0] & 0x8)>>3), u8tmp[1],
- ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -2636,22 +2663,22 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
fa_cck = (u8tmp[0] << 8) + u8tmp[1];
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "OFDM-CCA/OFDM-FA/CCK-FA",
- u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2]);
+ "0x6c0/0x6c4/0x6c8(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
- coex_sta->high_priority_tx);
+ "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+ coex_sta->high_priority_tx);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
- coex_sta->low_priority_tx);
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 1)
halbtc8723b1ant_monitor_bt_ctr(btcoexist);
#endif
@@ -2660,12 +2687,14 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (btcoexist->manual_control || btcoexist->stop_coex_dm)
return;
if (BTC_IPS_ENTER == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
@@ -2676,8 +2705,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
NORMAL_EXEC, 0);
halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8723b1ant_init_hw_config(btcoexist, false);
@@ -2688,22 +2717,25 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (btcoexist->manual_control || btcoexist->stop_coex_dm)
return;
if (BTC_LPS_ENABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
@@ -2740,15 +2772,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) /* non-connected scan */
btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
else /* wifi is connected */
btc8723b1ant_action_wifi_conn_scan(btcoexist);
} else if (BTC_SCAN_FINISH == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) /* non-connected scan */
btc8723b1ant_action_wifi_not_conn(btcoexist);
else
@@ -2758,6 +2790,7 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
@@ -2789,12 +2822,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2808,6 +2841,7 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
u8 wifiCentralChnl;
@@ -2817,11 +2851,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
else
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2842,10 +2876,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2853,6 +2887,7 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool bt_hs_on = false;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
@@ -2887,8 +2922,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], special Packet(%d) notify\n", type);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet(%d) notify\n", type);
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
}
}
@@ -2896,6 +2931,7 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmp_buf, u8 length)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 bt_info = 0;
u8 i, rsp_source = 0;
bool wifi_connected = false;
@@ -2908,19 +2944,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1)
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
else
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
@@ -2937,8 +2973,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if (coex_sta->bt_info_ext & BIT1) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -2952,8 +2988,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (coex_sta->bt_info_ext & BIT3) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
halbtc8723b1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -3008,30 +3044,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status =
BT_8723B_1ANT_BT_STATUS_MAX;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
}
if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3047,7 +3083,9 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -3065,11 +3103,13 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
- btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Pnp notify to SLEEP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
btcoexist->stop_coex_dm = true;
halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
true);
@@ -3079,8 +3119,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8723b1ant_init_hw_config(btcoexist, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3090,8 +3130,10 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
{
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], *****************Coex DM Reset****************\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], *****************Coex DM Reset****************\n");
halbtc8723b1ant_init_hw_config(btcoexist, false);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -3101,36 +3143,37 @@ void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- stack_info->profile_notified ? "Yes" : "No",
- stack_info->hci_version);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8723b_1ant,
- glcoex_ver_8723b_1ant, fw_ver,
- bt_patch_ver, bt_patch_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_1ant,
+ glcoex_ver_8723b_1ant, fw_ver,
+ bt_patch_ver, bt_patch_ver);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 5f488ecaef70..12125966a911 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -58,9 +58,11 @@ static u32 glcoex_ver_8723b_2ant = 0x3f;
/**************************************************************
* local function start with btc8723b2ant_
**************************************************************/
-static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
s32 bt_rssi = 0;
u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
@@ -72,28 +74,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -102,12 +104,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -116,26 +118,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -149,6 +151,7 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
u8 index, u8 level_num,
u8 rssi_thresh, u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
s32 wifi_rssi = 0;
u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
@@ -162,28 +165,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -194,12 +197,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -208,26 +211,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -239,6 +242,7 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
u32 reg_hp_tx = 0, reg_hp_rx = 0;
u32 reg_lp_tx = 0, reg_lp_rx = 0;
@@ -259,12 +263,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -272,15 +276,16 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
h2c_parameter[0] |= BIT0; /* trigger */
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -386,6 +391,7 @@ static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool bt_hs_on = false;
u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
@@ -394,8 +400,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -410,27 +416,29 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], A2DP only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], PAN(HS) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(HS) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], PAN(EDR) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(EDR) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR;
}
@@ -439,21 +447,23 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -461,31 +471,35 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex],A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex],A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -495,32 +509,36 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -530,13 +548,15 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -548,11 +568,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -564,6 +586,7 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool ret = false;
bool bt_hs_on = false, wifi_connected = false;
s32 bt_hs_rssi = 0;
@@ -577,20 +600,20 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
return false;
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
if (wifi_connected) {
if (bt_hs_on) {
if (bt_hs_rssi > 37) {
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for HS mode!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Need to decrease bt power for HS mode!!\n");
ret = true;
}
} else {
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
ret = true;
}
}
@@ -602,6 +625,7 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
u8 dac_swing_lvl)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
/* There are several type of dacswing
@@ -609,10 +633,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -620,6 +644,7 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
bool dec_bt_pwr)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = 0;
@@ -627,8 +652,8 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
if (dec_bt_pwr)
h2c_parameter[0] |= BIT1;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
(dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
@@ -637,14 +662,16 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
bool force_exec, bool dec_bt_pwr)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power = %s\n",
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Dec BT power = %s\n",
force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
@@ -658,14 +685,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
bool force_exec, u8 fw_dac_swing_lvl)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
(force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
coex_dm->pre_fw_dac_swing_lvl,
coex_dm->cur_fw_dac_swing_lvl);
@@ -682,18 +711,20 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
bool rx_rf_shrink_on)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
/* Resume RF Rx LPF corner */
/* After initialized, we can use coex_dm->btRf0x1eBackup */
if (btcoexist->initilized) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -704,15 +735,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
(force_exec ? "force to" : ""), (rx_rf_shrink_on ?
"ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
coex_dm->pre_rf_rx_lpf_shrink,
coex_dm->cur_rf_rx_lpf_shrink);
@@ -729,6 +762,7 @@ static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[6] = {0};
h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
@@ -742,9 +776,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
}
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -752,18 +786,20 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
bool force_exec, bool low_penalty_ra)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
/*return; */
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""), (low_penalty_ra ?
- "ON" : "OFF"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""), (low_penalty_ra ?
+ "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -776,9 +812,11 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
u32 level)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8) level;
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -796,20 +834,22 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swing_on,
u32 dac_swing_lvl)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
- (force_exec ? "force to" : ""),
- (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""),
+ (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -826,12 +866,13 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
bool agc_table_en)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 rssi_adjust_val = 0;
/* BB AGC Gain Table */
if (agc_table_en) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table On!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
@@ -840,8 +881,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
} else {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table Off!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -854,15 +895,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
/* RF Gain */
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
if (agc_table_en) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table On!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Agc Table On!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x38fff);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x38ffe);
} else {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table Off!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Agc Table Off!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x380c3);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
@@ -873,15 +914,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
if (agc_table_en) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table On!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Agc Table On!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x38fff);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x38ffe);
} else {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table Off!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Agc Table Off!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x380c3);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
@@ -899,17 +940,19 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
bool force_exec, bool agc_table_en)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- (agc_table_en ? "Enable" : "Disable"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ (agc_table_en ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en,
- coex_dm->cur_agc_table_en);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -923,20 +966,22 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -945,24 +990,26 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c4, u32 val0x6c8,
u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
- force_exec ? "force to" : "",
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ force_exec ? "force to" : "",
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1043,14 +1090,15 @@ static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
bool enable)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
if (enable)
h2c_parameter[0] |= BIT0;/* function enable*/
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1058,16 +1106,18 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1081,6 +1131,7 @@ static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
u8 byte2, u8 byte3, u8 byte4, u8 byte5)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[5];
h2c_parameter[0] = byte1;
@@ -1095,11 +1146,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1208,20 +1259,22 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
bool turn_on, u8 type)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1405,6 +1458,7 @@ static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool common = false, wifi_connected = false;
bool wifi_busy = false;
bool bt_hs_on = false, low_pwr_disable = false;
@@ -1419,8 +1473,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non-connected idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
@@ -1443,8 +1497,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1470,8 +1524,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1495,15 +1549,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
if (bt_hs_on)
return false;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
0x1, 0xfffff, 0x0);
@@ -1539,10 +1593,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
s32 result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
/* Set PS TDMA for max interval == 1 */
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1638,8 +1694,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
coex_dm->tdma_adj_type = 71;
@@ -1735,10 +1791,12 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
s32 result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
/* Set PS TDMA for max interval == 2 */
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
coex_dm->tdma_adj_type = 6;
@@ -1819,8 +1877,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1906,10 +1964,12 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
s32 result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
/* Set PS TDMA for max interval == 3 */
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
coex_dm->tdma_adj_type = 7;
@@ -1990,8 +2050,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
coex_dm->tdma_adj_type = 3;
@@ -2078,18 +2138,19 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
bool sco_hid, bool tx_pause,
u8 max_interval)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static s32 up, dn, m, n, wait_count;
/*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
s32 result;
u8 retry_count = 0;
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2102,11 +2163,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 14);
coex_dm->tdma_adj_type = 14;
- } else if (max_interval == 3) {
- btc8723b2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2124,11 +2180,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 10);
coex_dm->tdma_adj_type = 10;
- } else if (max_interval == 3) {
- btc8723b2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2148,11 +2199,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 6);
coex_dm->tdma_adj_type = 6;
- } else if (max_interval == 3) {
- btc8723b2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2170,11 +2216,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 2);
coex_dm->tdma_adj_type = 2;
- } else if (max_interval == 3) {
- btc8723b2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2193,11 +2234,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/*accquire the BT TRx retry count from BT_Info byte2*/
retry_count = coex_sta->bt_retry_cnt;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_count = %d\n", retry_count);
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
- up, dn, m, n, wait_count);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
result = 0;
wait_count++;
/* no retry in the last 2-second duration*/
@@ -2214,8 +2255,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
} /* <=3 retry in the last 2-second duration*/
} else if (retry_count <= 3) {
up--;
@@ -2238,8 +2279,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
}
} else {
if (wait_count == 1)
@@ -2255,12 +2296,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
}
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
set_tdma_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2274,9 +2315,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
*/
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2286,8 +2327,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->tdma_adj_type);
else
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -2357,7 +2398,7 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -2422,7 +2463,7 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist,
1, 2, 40, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
@@ -2561,7 +2602,7 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -2672,7 +2713,7 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -2736,7 +2777,7 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (btc8723b_need_dec_pwr(btcoexist))
@@ -2806,7 +2847,7 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -2870,7 +2911,7 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -2923,28 +2964,29 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8723b2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8723b2ant_action_bt_inquiry(btcoexist);
return;
} else {
@@ -2956,75 +2998,76 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
}
coex_dm->cur_algorithm = algorithm;
- btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
- coex_dm->cur_algorithm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n",
+ coex_dm->cur_algorithm);
if (btc8723b2ant_is_common_action(btcoexist)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant common\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8723B_2ANT_COEX_ALGO_SCO:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
btc8723b2ant_action_sco(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
btc8723b2ant_action_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
btc8723b2ant_action_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8723b2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
btc8723b2ant_action_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
btc8723b2ant_action_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
btc8723b2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8723b2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
btc8723b2ant_action_hid_a2dp(btcoexist);
break;
default:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
btc8723b2ant_coex_alloff(btcoexist);
break;
}
@@ -3050,10 +3093,11 @@ static void btc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist)
*********************************************************************/
void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp = 0;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
coex_dm->bt_rf0x1e_backup =
btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
@@ -3078,8 +3122,10 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8723b2ant_init_coex_dm(btcoexist);
}
@@ -3101,13 +3147,13 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
u8 ap_num = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ "\r\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========[Under Manual Control]============");
+ "\r\n ==========[Under Manual Control]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ "\r\n ==========================================");
}
if (!board_info->bt_exist) {
@@ -3116,21 +3162,21 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "Ant PG number/ Ant mechanism:",
- board_info->pg_ant_num, board_info->btdm_ant_num);
+ "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
@@ -3138,23 +3184,23 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
- "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+ "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
@@ -3162,112 +3208,112 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
- ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
- (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
- ((!wifi_busy) ? "idle" :
- ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
- "uplink" : "downlink")));
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+ (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+ "uplink" : "downlink")));
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP",
- bt_link_info->sco_exist, bt_link_info->hid_exist,
- bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+ "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist, bt_link_info->hid_exist,
+ bt_link_info->pan_exist, bt_link_info->a2dp_exist);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
bt_info_ext = coex_sta->bt_info_ext;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8723b_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ "\r\n %-35s = %7ph(%d)",
+ glbt_info_src_8723b_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
/* Sw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Sw mechanism]============");
+ "\r\n %-35s", "============[Sw mechanism]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
- coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
/* Fw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA", coex_dm->ps_tdma_para,
- ps_tdma_case, coex_dm->auto_tdma_adjust);
+ "\r\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para,
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
- coex_dm->cur_ignore_wlan_act);
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
/* Hw setting */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ "============[Hw setting]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x778/0x880[29:25]", u8tmp[0],
- (u32tmp[0]&0x3e000000) >> 25);
+ "0x778/0x880[29:25]", u8tmp[0],
+ (u32tmp[0] & 0x3e000000) >> 25);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x948/ 0x67[5] / 0x765",
- u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]);
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
- u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
- ((u8tmp[0] & 0x8)>>3), u8tmp[1],
- ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -3286,24 +3332,24 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
fa_cck = (u8tmp[0] << 8) + u8tmp[1];
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "OFDM-CCA/OFDM-FA/CCK-FA",
- u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(high-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
- coex_sta->low_priority_tx);
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
btc8723b2ant_monitor_bt_ctr(btcoexist);
#endif
@@ -3313,16 +3359,18 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_IPS_ENTER == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8723b2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
ex_btc8723b2ant_init_hwconfig(btcoexist);
btc8723b2ant_init_coex_dm(btcoexist);
@@ -3332,50 +3380,57 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_LPS_ENABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_SCAN_START == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_ASSOCIATE_START == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
u8 wifi_central_chnl;
if (BTC_MEDIA_CONNECT == type)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
else
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist,
@@ -3396,10 +3451,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66=0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3407,14 +3462,17 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (type == BTC_PACKET_DHCP)
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmpbuf, u8 length)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 bt_info = 0;
u8 i, rsp_source = 0;
bool bt_busy = false, limited_dig = false;
@@ -3427,24 +3485,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data=[",
- rsp_source, length);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
if (i == 1)
bt_info = tmpbuf[i];
if (i == length-1)
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x]\n", tmpbuf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmpbuf[i]);
else
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x, ", tmpbuf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmpbuf[i]);
}
if (btcoexist->manual_control) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
return;
}
@@ -3462,8 +3520,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3477,8 +3535,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if ((coex_sta->bt_info_ext & BIT3)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
false);
} else {
@@ -3531,26 +3589,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3573,7 +3631,9 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -3582,36 +3642,37 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
board_info->pg_ant_num,
board_info->btdm_ant_num,
board_info->btdm_ant_pos);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
stack_info->profile_notified ? "Yes" : "No",
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
fw_ver, bt_patch_ver, bt_patch_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 3ce47c70bfa4..8b689ed9a629 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -62,9 +62,11 @@ static u32 glcoex_ver_8821a_1ant = 0x41;
* local function start with halbtc8821a1ant_
*============================================================
*/
-static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
long bt_rssi = 0;
u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
@@ -76,28 +78,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -106,12 +108,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -120,26 +122,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -152,6 +154,7 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
u8 index, u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
long wifi_rssi = 0;
u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
@@ -165,28 +168,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -197,12 +200,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -212,26 +215,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -414,15 +417,16 @@ static void halbtc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
h2c_parameter[0] |= BIT0; /* trigger*/
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -485,6 +489,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool bt_hs_on = false;
u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED;
@@ -493,8 +498,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -509,26 +514,28 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
}
}
@@ -536,50 +543,56 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -588,29 +601,33 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -619,12 +636,14 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -635,12 +654,14 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -652,6 +673,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
bool enable_auto_report)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = 0;
@@ -659,10 +681,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -671,14 +693,17 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_auto_report)
{
- btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""), ((enable_auto_report) ?
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""), ((enable_auto_report) ?
"Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
coex_dm->pre_bt_auto_report,
coex_dm->cur_bt_auto_report);
@@ -693,6 +718,7 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[6] = {0};
h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
@@ -706,9 +732,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
}
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -731,20 +757,22 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -752,8 +780,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
bool force_exec, u32 val0x6c0,
u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
(force_exec ? "force to" : ""), val0x6c0, val0x6c4,
val0x6c8, val0x6cc);
coex_dm->cur_val_0x6c0 = val0x6c0;
@@ -822,14 +852,15 @@ static void halbtc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist,
static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
bool enable)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
if (enable)
h2c_parameter[0] |= BIT0; /* function enable*/
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -837,16 +868,18 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -861,6 +894,7 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
u8 byte1, u8 byte2, u8 byte3,
u8 byte4, u8 byte5)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[5] = {0};
h2c_parameter[0] = byte1;
@@ -875,13 +909,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -898,22 +932,24 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
bool force_exec, u8 lps_val, u8 rpwm_val)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -927,8 +963,10 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1017,6 +1055,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 rssi_adjust_val = 0;
coex_dm->cur_ps_tdma_on = turn_on;
@@ -1024,13 +1063,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** TDMA(off, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1232,6 +1271,7 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool common = false, wifi_connected = false, wifi_busy = false;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
@@ -1241,50 +1281,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else {
if (wifi_busy) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
}
common = false;
@@ -1296,13 +1336,14 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
u8 wifi_status)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static long up, dn, m, n, wait_count;
/*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
long result;
u8 retry_count = 0, bt_info_ext;
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
wifi_status) ||
@@ -1330,8 +1371,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1366,8 +1407,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration*/
@@ -1397,8 +1438,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -1419,8 +1460,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1465,9 +1506,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
}
} else {
/*no change*/
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if (coex_dm->cur_ps_tdma != 1 &&
@@ -1566,6 +1607,7 @@ static void halbtc8821a1ant_action_wifi_only(struct btc_coexist *btcoexist)
static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static bool pre_bt_disabled;
static u32 bt_disable_cnt;
bool bt_active = true, bt_disabled = false;
@@ -1589,25 +1631,25 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is disabled !!\n");
halbtc8821a1ant_action_wifi_only(btcoexist);
}
}
if (pre_bt_disabled != bt_disabled) {
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
(pre_bt_disabled ? "disabled" : "enabled"),
(bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
@@ -1726,11 +1768,7 @@ static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
/* tdma and coex table*/
halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
- wifi_status)
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- else
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
}
static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
@@ -1740,7 +1778,7 @@ static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bt_rssi_state = halbtc8821a1ant_bt_rssi_state(2, 28, 0);
+ bt_rssi_state = halbtc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0);
if (bt_link_info->hid_only) {
/*HID*/
@@ -1879,19 +1917,20 @@ static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist)
static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_busy = false;
bool scan = false, link = false, roam = false;
bool under_4way = false;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
if (under_4way) {
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -1900,8 +1939,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -1954,6 +1993,7 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
algorithm = halbtc8821a1ant_action_algorithm(btcoexist);
@@ -1962,58 +2002,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!halbtc8821a1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8821A_1ANT_COEX_ALGO_SCO:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = SCO\n");
halbtc8821a1ant_action_sco(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID\n");
halbtc8821a1ant_action_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP\n");
halbtc8821a1ant_action_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
halbtc8821a1ant_action_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HS mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HS mode\n");
halbtc8821a1ant_action_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
halbtc8821a1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
halbtc8821a1ant_action_hid_a2dp(btcoexist);
break;
default:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
/*halbtc8821a1ant_coex_all_off(btcoexist);*/
break;
}
@@ -2023,6 +2063,7 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool wifi_connected = false, bt_hs_on = false;
bool increase_scan_dev_num = false;
@@ -2031,31 +2072,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
bool wifi_under_5g = false;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
halbtc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2078,16 +2119,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
wifi_rssi_state =
halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2,
30, 0);
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a1ant_limited_tx(btcoexist,
- NORMAL_EXEC, 1, 1,
- 1, 1);
- } else {
- halbtc8821a1ant_limited_tx(btcoexist,
- NORMAL_EXEC, 1, 1,
- 1, 1);
- }
+ halbtc8821a1ant_limited_tx(btcoexist,
+ NORMAL_EXEC, 1, 1, 1, 1);
} else {
halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
0, 0, 0, 0);
@@ -2121,8 +2154,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2151,11 +2184,12 @@ static void halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
bool back_up)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1_tmp = 0;
bool wifi_under_5g = false;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (back_up) {
coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
@@ -2206,8 +2240,10 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2233,19 +2269,19 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
u32 fw_ver = 0, bt_patch_ver = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ "\r\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Under Manual Control]============");
+ "\r\n ============[Under Manual Control]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ "\r\n ==========================================");
}
if (btcoexist->stop_coex_dm) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Coex is STOPPED]============");
+ "\r\n ============[Coex is STOPPED]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ "\r\n ==========================================");
}
if (!board_info->bt_exist) {
@@ -2254,27 +2290,27 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d",
- "Ant PG Num/ Ant Mech/ Ant Pos:",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
+ "\r\n %-35s = %d/ %d/ %d",
+ "Ant PG Num/ Ant Mech/ Ant Pos:",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8821a_1ant,
- glcoex_ver_8821a_1ant,
- fw_ver, bt_patch_ver,
- bt_patch_ver);
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8821a_1ant,
+ glcoex_ver_8821a_1ant,
+ fw_ver, bt_patch_ver,
+ bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION,
&bt_hs_on);
@@ -2283,27 +2319,27 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL,
&wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info",
- coex_dm->wifi_chnl_info);
+ "\r\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info",
+ coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
- (int)wifi_rssi, (int)bt_hs_rssi);
+ "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
+ (int)wifi_rssi, (int)bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
- link, roam, scan);
+ "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+ link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
&wifi_under_5g);
@@ -2314,13 +2350,13 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %s/ %s ", "Wifi status",
- (wifi_under_5g ? "5G" : "2.4G"),
- ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
- (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
- ((!wifi_busy) ? "idle" :
- ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
- "uplink" : "downlink")));
+ "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+ (wifi_under_5g ? "5G" : "2.4G"),
+ ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+ (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+ "uplink" : "downlink")));
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
((btcoexist->bt_info.bt_disabled) ? ("disabled") :
@@ -2334,161 +2370,162 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
- bt_link_info->sco_exist,
- bt_link_info->hid_exist,
- bt_link_info->pan_exist,
- bt_link_info->a2dp_exist);
+ "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist,
+ bt_link_info->hid_exist,
+ bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
bt_info_ext = coex_sta->bt_info_ext;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext&BIT0) ?
- "Basic rate" : "EDR rate");
+ "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ?
+ "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8821a_1ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ "\r\n %-35s = %7ph(%d)",
+ glbt_info_src_8821a_1ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/%s, (0x%x/0x%x)",
- "PS state, IPS/LPS, (lps/rpwm)",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")),
- btcoexist->bt_info.lps_val,
- btcoexist->bt_info.rpwm_val);
+ "\r\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")),
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
if (!btcoexist->manual_control) {
/* Sw mechanism*/
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Sw mechanism]============");
+ "\r\n %-35s",
+ "============[Sw mechanism]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d", "SM[LowPenaltyRA]",
- coex_dm->cur_low_penalty_ra);
+ "\r\n %-35s = %d", "SM[LowPenaltyRA]",
+ coex_dm->cur_low_penalty_ra);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/ %s/ %d ",
- "DelBA/ BtCtrlAgg/ AggSize",
- (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
- (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
- btcoexist->bt_info.agg_buf_size);
+ "\r\n %-35s = %s/ %s/ %d ",
+ "DelBA/ BtCtrlAgg/ AggSize",
+ (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
+ (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
+ btcoexist->bt_info.agg_buf_size);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x ", "Rate Mask",
- btcoexist->bt_info.ra_mask);
+ "\r\n %-35s = 0x%x ", "Rate Mask",
+ btcoexist->bt_info.ra_mask);
/* Fw mechanism*/
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA",
- coex_dm->ps_tdma_para,
- ps_tdma_case,
- coex_dm->auto_tdma_adjust);
+ "\r\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA",
+ coex_dm->ps_tdma_para,
+ ps_tdma_case,
+ coex_dm->auto_tdma_adjust);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x ",
- "Latest error condition(should be 0)",
+ "\r\n %-35s = 0x%x ",
+ "Latest error condition(should be 0)",
coex_dm->error_condition);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d ", "IgnWlanAct",
- coex_dm->cur_ignore_wlan_act);
+ "\r\n %-35s = %d ", "IgnWlanAct",
+ coex_dm->cur_ignore_wlan_act);
}
/* Hw setting*/
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Hw setting]============");
+ "\r\n %-35s", "============[Hw setting]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime",
- coex_dm->backup_arfr_cnt1,
- coex_dm->backup_arfr_cnt2,
- coex_dm->backup_retry_limit,
- coex_dm->backup_ampdu_max_time);
+ "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime",
+ coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2,
+ coex_dm->backup_retry_limit,
+ coex_dm->backup_ampdu_max_time);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
+ "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
- u1_tmp[0], (u4_tmp[0]&0x3e000000) >> 25);
+ "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
+ u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "0x8db[6:5]",
- ((u1_tmp[0]&0x60)>>5));
+ "\r\n %-35s = 0x%x", "0x8db[6:5]",
+ ((u1_tmp[0] & 0x60) >> 5));
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
- (u4_tmp[0] & 0x30000000)>>28,
- u4_tmp[0] & 0xff,
- u1_tmp[0] & 0x3);
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
+ (u4_tmp[0] & 0x30000000) >> 28,
+ u4_tmp[0] & 0xff,
+ u1_tmp[0] & 0x3);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x40/0x4c[24:23]/0x64[0]",
- u1_tmp[0], ((u4_tmp[0]&0x01800000)>>23), u1_tmp[1]&0x1);
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x40/0x4c[24:23]/0x64[0]",
+ u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
- u4_tmp[0], u1_tmp[0]);
+ "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
+ u4_tmp[0], u1_tmp[0]);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "0xc50(dig)",
- u4_tmp[0]&0xff);
+ "\r\n %-35s = 0x%x", "0xc50(dig)",
+ u4_tmp[0] & 0xff);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d);
u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
- u4_tmp[0], (u1_tmp[0]<<8) + u1_tmp[1]);
+ "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
+ u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1)
halbtc8821a1ant_monitor_bt_ctr(btcoexist);
#endif
@@ -2497,12 +2534,14 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (btcoexist->manual_control || btcoexist->stop_coex_dm)
return;
if (BTC_IPS_ENTER == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8821a1ant_set_ant_path(btcoexist,
BTC_ANT_PATH_BT, false, true);
@@ -2511,8 +2550,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8821a1ant_run_coexist_mechanism(btcoexist);
@@ -2521,22 +2560,25 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (btcoexist->manual_control || btcoexist->stop_coex_dm)
return;
if (BTC_LPS_ENABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_Lps = true;
} else if (BTC_LPS_DISABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_Lps = false;
}
}
void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
if (btcoexist->manual_control ||
@@ -2560,8 +2602,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) {
/* non-connected scan*/
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2570,8 +2612,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
}
} else if (BTC_SCAN_FINISH == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) {
/* non-connected scan*/
halbtc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2583,6 +2625,7 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
if (btcoexist->manual_control ||
@@ -2600,12 +2643,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2621,6 +2664,7 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
u8 wifi_central_chnl;
@@ -2631,11 +2675,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask*/
@@ -2658,11 +2702,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 |
- h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2670,6 +2714,7 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool bt_hs_on = false;
if (btcoexist->manual_control ||
@@ -2690,8 +2735,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], special Packet(%d) notify\n", type);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet(%d) notify\n", type);
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
}
}
@@ -2699,6 +2744,7 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmp_buf, u8 length)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 bt_info = 0;
u8 i, rsp_source = 0;
bool wifi_connected = false;
@@ -2715,19 +2761,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1) {
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -2744,8 +2790,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
/* Here we need to resend some wifi info to BT*/
/* because bt is reset and loss of the info.*/
if (coex_sta->bt_info_ext & BIT1) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2761,8 +2807,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
halbtc8821a1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -2770,8 +2816,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
}
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
if (!(coex_sta->bt_info_ext & BIT4)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
halbtc8821a1ant_bt_auto_report(btcoexist,
FORCE_EXEC, true);
}
@@ -2816,28 +2862,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
/* connection exists but no busy*/
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
(bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2854,8 +2900,10 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Halt notify\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -2873,20 +2921,22 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Pnp notify\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Pnp notify to SLEEP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
btcoexist->stop_coex_dm = true;
halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8821a1ant_init_hw_config(btcoexist, false);
halbtc8821a1ant_init_coex_dm(btcoexist);
@@ -2894,41 +2944,41 @@ void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
}
}
-void
-ex_halbtc8821a1ant_periodical(
- struct btc_coexist *btcoexist) {
+void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
board_info->pg_ant_num,
board_info->btdm_ant_num,
board_info->btdm_ant_pos);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
stack_info->profile_notified ? "Yes" : "No",
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
glcoex_ver_date_8821a_1ant,
glcoex_ver_8821a_1ant,
fw_ver, bt_patch_ver,
bt_patch_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 81f843bba771..1717e9ce96ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -65,9 +65,11 @@ static u32 glcoex_ver_8821a_2ant = 0x5050;
* local function start with halbtc8821a2ant_
*============================================================
*/
-static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
long bt_rssi = 0;
u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
@@ -80,28 +82,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
if (bt_rssi >= tmp) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -110,12 +112,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -125,26 +127,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -158,6 +160,7 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
u8 index, u8 level_num,
u8 rssi_thresh, u8 rssi_thresh1)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
long wifi_rssi = 0;
u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
@@ -171,28 +174,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -203,12 +206,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -217,26 +220,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -247,6 +250,7 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static bool pre_bt_disabled;
static u32 bt_disable_cnt;
bool bt_active = true, bt_disabled = false;
@@ -268,32 +272,33 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u32 reg_hp_txrx, reg_lp_txrx, u4tmp;
u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
@@ -313,12 +318,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- btc_alg_dbg(ALGO_BT_MONITOR,
- "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -326,21 +331,23 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
{
- u8 h2c_parameter[1] = {0};
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
h2c_parameter[0] |= BIT0; /* trigger */
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
bool bt_hs_on = false;
u8 algorithm = BT_8821A_2ANT_COEX_ALGO_UNDEFINED;
@@ -357,8 +364,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
stack_info->bt_link_exist = coex_sta->bt_link_exist;
if (!coex_sta->bt_link_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], No profile exists!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No profile exists!!!\n");
return algorithm;
}
@@ -373,26 +380,28 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (coex_sta->sco_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
if (coex_sta->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else if (coex_sta->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], A2DP only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
} else if (coex_sta->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], PAN(HS) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(HS) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], PAN(EDR) only\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(EDR) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
}
}
@@ -400,50 +409,56 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (coex_sta->sco_exist) {
if (coex_sta->hid_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (coex_sta->hid_exist &&
coex_sta->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else if (coex_sta->hid_exist &&
coex_sta->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -452,29 +467,33 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (coex_sta->sco_exist) {
if (coex_sta->hid_exist &&
coex_sta->a2dp_exist) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->hid_exist &&
coex_sta->pan_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -483,12 +502,14 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -499,12 +520,14 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -515,6 +538,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool ret = false;
bool bt_hs_on = false, wifi_connected = false;
long bt_hs_rssi = 0;
@@ -528,20 +552,20 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
return false;
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
if (wifi_connected) {
if (bt_hs_on) {
if (bt_hs_rssi > 37) {
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for HS mode!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Need to decrease bt power for HS mode!!\n");
ret = true;
}
} else {
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
ret = true;
}
}
@@ -552,17 +576,18 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
u8 dac_swing_lvl)
{
- u8 h2c_parameter[1] = {0};
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 h2c_parameter[1] = {0};
/* There are several type of dacswing
* 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
*/
h2c_parameter[0] = dac_swing_lvl;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -570,16 +595,17 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
bool dec_bt_pwr)
{
- u8 h2c_parameter[1] = {0};
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 h2c_parameter[1] = {0};
h2c_parameter[0] = 0;
if (dec_bt_pwr)
h2c_parameter[0] |= BIT1;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -587,15 +613,17 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
bool force_exec, bool dec_bt_pwr)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power = %s\n",
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Dec BT power = %s\n",
(force_exec ? "force to" : ""),
((dec_bt_pwr) ? "ON" : "OFF"));
coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
@@ -609,6 +637,7 @@ static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
bool bt_lna_cons_on)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[2] = {0};
h2c_parameter[0] = 0x3; /* opCode, 0x3 = BT_SET_LNA_CONSTRAIN */
@@ -616,10 +645,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
if (bt_lna_cons_on)
h2c_parameter[1] |= BIT0;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
- bt_lna_cons_on ? "ON!!" : "OFF!!",
- h2c_parameter[0] << 8 | h2c_parameter[1]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
+ bt_lna_cons_on ? "ON!!" : "OFF!!",
+ h2c_parameter[0] << 8 | h2c_parameter[1]);
btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
}
@@ -627,15 +656,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
bool force_exec, bool bt_lna_cons_on)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s BT Constrain = %s\n",
- (force_exec ? "force" : ""),
- ((bt_lna_cons_on) ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s BT Constrain = %s\n",
+ (force_exec ? "force" : ""),
+ ((bt_lna_cons_on) ? "ON" : "OFF"));
coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
coex_dm->pre_bt_lna_constrain,
coex_dm->cur_bt_lna_constrain);
@@ -652,16 +683,17 @@ static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
u8 bt_psd_mode)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[2] = {0};
h2c_parameter[0] = 0x2; /* opCode, 0x2 = BT_SET_PSD_MODE */
h2c_parameter[1] = bt_psd_mode;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
- h2c_parameter[1],
- h2c_parameter[0] << 8 | h2c_parameter[1]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
+ h2c_parameter[1],
+ h2c_parameter[0] << 8 | h2c_parameter[1]);
btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
}
@@ -669,15 +701,17 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
bool force_exec, u8 bt_psd_mode)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s BT PSD mode = 0x%x\n",
- (force_exec ? "force" : ""), bt_psd_mode);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s BT PSD mode = 0x%x\n",
+ (force_exec ? "force" : ""), bt_psd_mode);
coex_dm->cur_bt_psd_mode = bt_psd_mode;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
- coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
+ coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
return;
@@ -691,6 +725,7 @@ static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
bool enable_auto_report)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = 0;
@@ -698,10 +733,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -710,15 +745,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_auto_report)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
- ((enable_auto_report) ? "Enabled" : "Disabled"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
+ ((enable_auto_report) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
coex_dm->pre_bt_auto_report,
coex_dm->cur_bt_auto_report);
@@ -735,16 +772,18 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
bool force_exec,
u8 fw_dac_swing_lvl)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -760,10 +799,12 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
bool rx_rf_shrink_on)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -771,8 +812,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->bt_rf0x1e_backup
*/
if (btcoexist->initilized) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
0x1e, 0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -783,17 +824,19 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
(force_exec ? "force to" : ""),
((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -808,6 +851,7 @@ static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[6] = {0};
h2c_parameter[0] = 0x6; /* opCode, 0x6 = Retry_Penalty */
@@ -824,9 +868,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9;
}
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -834,17 +878,19 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
bool force_exec, bool low_penalty_ra)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
/*return;*/
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""),
- ((low_penalty_ra) ? "ON" : "OFF"));
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""),
+ ((low_penalty_ra) ? "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
- coex_dm->pre_low_penalty_ra,
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+ coex_dm->pre_low_penalty_ra,
coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
@@ -859,10 +905,11 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
u32 level)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
}
@@ -880,21 +927,23 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swing_on,
u32 dac_swing_lvl)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swing_on) ? "ON" : "OFF"),
- dac_swing_lvl);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swing_on) ? "ON" : "OFF"),
+ dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl ==
@@ -912,13 +961,15 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
bool adc_back_off)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (adc_back_off) {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB BackOff Level On!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB BackOff Level On!\n");
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
} else {
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB BackOff Level Off!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB BackOff Level Off!\n");
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
}
}
@@ -926,17 +977,19 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
bool force_exec, bool adc_back_off)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s turn AdcBackOff = %s\n",
- (force_exec ? "force to" : ""),
- ((adc_back_off) ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec ? "force to" : ""),
+ ((adc_back_off) ? "ON" : "OFF"));
coex_dm->cur_adc_back_off = adc_back_off;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
- coex_dm->pre_adc_back_off,
- coex_dm->cur_adc_back_off);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
+ coex_dm->pre_adc_back_off,
+ coex_dm->cur_adc_back_off);
if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
return;
@@ -950,20 +1003,22 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- btc_alg_dbg(ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -971,28 +1026,30 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
bool force_exec, u32 val0x6c0,
u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
{
- btc_alg_dbg(ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c0,
- coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8,
- coex_dm->pre_val0x6cc);
- btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
- "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c0,
- coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8,
- coex_dm->cur_val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c0,
+ coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8,
+ coex_dm->pre_val0x6cc);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c0,
+ coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8,
+ coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1012,14 +1069,15 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
bool enable)
{
+ struct rtl_priv *rtlpriv = btcoex->adapter;
u8 h2c_parameter[1] = {0};
if (enable)
h2c_parameter[0] |= BIT0;/* function enable */
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
}
@@ -1027,16 +1085,18 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1051,6 +1111,7 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
u8 byte1, u8 byte2, u8 byte3,
u8 byte4, u8 byte5)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[5];
h2c_parameter[0] = byte1;
@@ -1065,13 +1126,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1165,20 +1226,22 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type = %d\n",
- (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
- type);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+ (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+ type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1348,6 +1411,7 @@ static void halbtc8821a2ant_bt_inquiry_page(struct btc_coexist *btcoexist)
static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool common = false, wifi_connected = false, wifi_busy = false;
bool low_pwr_disable = false;
@@ -1364,8 +1428,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT IPS!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi IPS + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1382,13 +1446,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT IPS!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Busy + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT IPS!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi LPS + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
}
@@ -1406,8 +1470,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT LPS!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi IPS + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1423,13 +1487,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
if (wifi_busy) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT LPS!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Busy + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT LPS!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi LPS + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
}
@@ -1448,8 +1512,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist,
BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi IPS + BT Busy!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1468,12 +1532,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Busy + BT Busy!!\n");
common = false;
} else {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT Busy!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi LPS + BT Busy!!\n");
halbtc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 21);
@@ -1494,9 +1558,11 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1591,8 +1657,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 71);
@@ -1695,9 +1761,11 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 6);
@@ -1786,8 +1854,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 2);
@@ -1881,9 +1949,11 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (tx_pause) {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 7);
@@ -1972,8 +2042,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 3);
@@ -2068,6 +2138,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
bool sco_hid, bool tx_pause,
u8 max_interval)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static long up, dn, m, n, wait_count;
/* 0: no change, +1: increase WiFi duration,
* -1: decrease WiFi duration
@@ -2075,13 +2146,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
int result;
u8 retry_count = 0;
- btc_alg_dbg(ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (coex_dm->reset_tdma_adjust) {
coex_dm->reset_tdma_adjust = false;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2094,11 +2165,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 14);
coex_dm->tdma_adj_type = 14;
- } else if (max_interval == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
} else {
halbtc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2116,11 +2182,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 10);
coex_dm->tdma_adj_type = 10;
- } else if (max_interval == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
} else {
halbtc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2140,11 +2201,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 6);
coex_dm->tdma_adj_type = 6;
- } else if (max_interval == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
} else {
halbtc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2162,11 +2218,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
NORMAL_EXEC,
true, 2);
coex_dm->tdma_adj_type = 2;
- } else if (max_interval == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
} else {
halbtc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
@@ -2185,10 +2236,10 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_count = %d\n", retry_count);
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
(int)up, (int)dn, (int)m, (int)n, (int)wait_count);
result = 0;
wait_count++;
@@ -2210,8 +2261,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration */
@@ -2240,8 +2291,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -2262,12 +2313,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
btc8821a2_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2283,8 +2334,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
@@ -2295,8 +2346,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->tdma_adj_type);
} else {
- btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -2311,7 +2362,7 @@ static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist)
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
@@ -2337,14 +2388,8 @@ static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist)
* halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
*/
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0); /*for voice quality*/
- } else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0); /*for voice quality*/
- }
+ halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 0); /*for voice quality*/
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2395,7 +2440,7 @@ static void halbtc8821a2ant_action_hid(struct btc_coexist *btcoexist)
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2475,7 +2520,7 @@ static void halbtc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
/* fw dac swing is called in btc8821a2ant_tdma_dur_adj()
* halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2543,7 +2588,7 @@ static void halbtc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
bt_info_ext = coex_sta->bt_info_ext;
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
/*fw dac swing is called in btc8821a2ant_tdma_dur_adj()
*halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2612,7 +2657,7 @@ static void halbtc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2692,7 +2737,7 @@ static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2734,14 +2779,7 @@ static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
NORMAL_EXEC, false);
}
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 1);
- } else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 1);
- }
+ halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2768,7 +2806,7 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
bt_info_ext = coex_sta->bt_info_ext;
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2779,40 +2817,18 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) {
- /* for HID at 11b/g mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5afa5afa, 0xffff, 0x3);
- } else {
- /* for HID quality & wifi performance balance at 11n mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5afa5afa, 0xffff, 0x3);
- }
+ halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5afa5afa, 0xffff, 0x3);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
/* fw mechanism */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- false, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- false, 3);
- }
- } else {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- true, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- true, 3);
- }
- }
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8821a2ant_tdma_dur_adj(btcoexist, false,
+ false, 3);
+ else
+ btc8821a2ant_tdma_dur_adj(btcoexist, false,
+ true, 3);
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2826,31 +2842,14 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
false, false);
btc8821a2ant_sw_mech2(btcoexist, false, false,
false, 0x18);
- };
+ }
} else {
/* fw mechanism */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- false, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- false, 3);
- }
- } else {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- true, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- true, 3);
- }
- }
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 3);
+ else
+ btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 3);
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2875,7 +2874,7 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2886,15 +2885,8 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) {
- /* for HID at 11b/g mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5f5a5f, 0xffff, 0x3);
- } else {
- /* for HID quality & wifi performance balance at 11n mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5f5a5f, 0xffff, 0x3);
- }
+ halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5a5f5a5f, 0xffff, 0x3);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3);
@@ -2958,7 +2950,7 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
bt_info_ext = coex_sta->bt_info_ext;
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
0, 2, 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2969,40 +2961,12 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) {
- /* for HID at 11b/g mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5a5a5a, 0xffff, 0x3);
- } else {
- /* for HID quality & wifi performance balance at 11n mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5a5a5a, 0xffff, 0x3);
- }
+ halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffff, 0x3);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
/* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- true, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- true, 3);
- }
- } else {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- true, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- true, 3);
- }
- }
+ btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 3);
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -3066,7 +3030,7 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
bt_info_ext = coex_sta->bt_info_ext;
wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+ bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
@@ -3075,40 +3039,12 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) {
- /* for HID at 11b/g mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5f5b5f5b, 0xffffff, 0x3);
- } else {
- /*for HID quality & wifi performance balance at 11n mode*/
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5f5b5f5b, 0xffffff, 0x3);
- }
+ halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5f5b5f5b, 0xffffff, 0x3);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
/* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
- }
- } else {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
- }
- }
+ btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -3125,29 +3061,7 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
}
} else {
/* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
-
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
- }
- } else {
- if (bt_info_ext&BIT0) {
- /*a2dp basic rate*/
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
- } else {
- /*a2dp edr rate*/
- btc8821a2ant_tdma_dur_adj(btcoexist,
- true, true, 2);
- }
- }
+ btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -3167,12 +3081,13 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_under_5g = false;
u8 algorithm = 0;
if (btcoexist->manual_control) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Manual control!!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Manual control!!!\n");
return;
}
@@ -3180,8 +3095,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
halbtc8821a2ant_coex_under_5g(btcoexist);
return;
}
@@ -3189,82 +3104,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
halbtc8821a2ant_bt_inquiry_page(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (halbtc8821a2ant_is_common_action(btcoexist)) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant common\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->reset_tdma_adjust = true;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
coex_dm->pre_algorithm,
coex_dm->cur_algorithm);
coex_dm->reset_tdma_adjust = true;
}
switch (coex_dm->cur_algorithm) {
case BT_8821A_2ANT_COEX_ALGO_SCO:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
halbtc8821a2ant_action_sco(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
halbtc8821a2ant_action_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
halbtc8821a2ant_action_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
halbtc8821a2ant_action_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANHS:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
halbtc8821a2ant_action_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
halbtc8821a2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
halbtc8821a2ant_action_hid_a2dp(btcoexist);
break;
default:
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
halbtc8821a2ant_coex_all_off(btcoexist);
break;
}
@@ -3281,10 +3196,11 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
*/
void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1tmp = 0;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
/* backup rf 0x1e value */
coex_dm->bt_rf0x1e_backup =
@@ -3312,13 +3228,12 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
}
-void
-ex_halbtc8821a2ant_init_coex_dm(
- struct btc_coexist *btcoexist
- )
+void ex_halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
halbtc8821a2ant_init_coex_dm(btcoexist);
}
@@ -3341,7 +3256,7 @@ ex_halbtc8821a2ant_display_coex_info(
u32 fw_ver = 0, bt_patch_ver = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ "\r\n ============[BT Coexist info]============");
if (!board_info->bt_exist) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
@@ -3349,23 +3264,23 @@ ex_halbtc8821a2ant_display_coex_info(
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
- board_info->pg_ant_num, board_info->btdm_ant_num);
+ "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
if (btcoexist->manual_control) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "[Action Manual control]!!");
+ "\r\n %-35s", "[Action Manual control]!!");
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+ "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
((stack_info->profile_notified) ? "Yes" : "No"),
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
"CoexVer/ FwVer/ PatchVer",
glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
fw_ver, bt_patch_ver, bt_patch_ver);
@@ -3377,26 +3292,26 @@ ex_halbtc8821a2ant_display_coex_info(
btcoexist->btc_get(btcoexist,
BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d(%d)",
+ "\r\n %-35s = %d / %d(%d)",
"Dot11 channel / HsMode(HsChnl)",
wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %3ph ",
+ "\r\n %-35s = %3ph ",
"H2C Wifi inform bt chnl Info",
coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
+ "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+ "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
link, roam, scan);
btcoexist->btc_get(btcoexist,
@@ -3408,7 +3323,7 @@ ex_halbtc8821a2ant_display_coex_info(
btcoexist->btc_get(btcoexist,
BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+ "\r\n %-35s = %s / %s/ %s ", "Wifi status",
(wifi_under_5g ? "5G" : "2.4G"),
((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
@@ -3417,7 +3332,7 @@ ex_halbtc8821a2ant_display_coex_info(
"uplink" : "downlink")));
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
+ "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
((BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status)
? "idle" : ((BT_8821A_2ANT_BT_STATUS_CON_IDLE ==
@@ -3426,7 +3341,7 @@ ex_halbtc8821a2ant_display_coex_info(
if (stack_info->profile_notified) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+ "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
stack_info->sco_exist, stack_info->hid_exist,
stack_info->pan_exist, stack_info->a2dp_exist);
@@ -3436,117 +3351,117 @@ ex_halbtc8821a2ant_display_coex_info(
bt_info_ext = coex_sta->bt_info_ext;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
+ "BT Info A2DP rate",
(bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8821a_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ "\r\n %-35s = %7ph(%d)",
+ glbt_info_src_8821a_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
/* Sw mechanism*/
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
+ "============[Sw mechanism]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig/ btLna]",
- coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra,
- coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain);
+ "\r\n %-35s = %d/ %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig/ btLna]",
+ coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra,
+ coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
/* Fw mechanism*/
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ "============[Fw mechanism]============");
if (!btcoexist->manual_control) {
ps_tdma_case = coex_dm->cur_ps_tdma;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d",
- "PS TDMA",
- coex_dm->ps_tdma_para, ps_tdma_case);
+ "\r\n %-35s = %5ph case-%d",
+ "PS TDMA",
+ coex_dm->ps_tdma_para, ps_tdma_case);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
- coex_dm->cur_dec_bt_pwr,
- coex_dm->cur_ignore_wlan_act);
+ "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
}
/* Hw setting*/
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Hw setting]============");
+ "\r\n %-35s", "============[Hw setting]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
- coex_dm->bt_rf0x1e_backup);
+ "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
+ coex_dm->bt_rf0x1e_backup);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ",
- "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
- u1tmp[0], u1tmp[1]);
+ "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
+ u1tmp[0], u1tmp[1]);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x8db(ADC)/0xc5b[29:25](DAC)",
- ((u1tmp[0]&0x60)>>5), ((u1tmp[1]&0x3e)>>1));
+ "0x8db(ADC)/0xc5b[29:25](DAC)",
+ ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
- u4tmp[0]&0xff, ((u4tmp[0]&0x30000000)>>28));
+ "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
+ u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x40/ 0x4c[24:23]/ 0x974",
- u1tmp[0], ((u4tmp[0]&0x01800000)>>23), u4tmp[1]);
+ "0x40/ 0x4c[24:23]/ 0x974",
+ u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522",
- u4tmp[0], u1tmp[0]);
+ "0x550(bcn ctrl)/0x522",
+ u4tmp[0], u1tmp[0]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(DIG)/0xa0a(CCK-TH)",
- u4tmp[0], u1tmp[0]);
+ "0xc50(DIG)/0xa0a(CCK-TH)",
+ u4tmp[0], u1tmp[0]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "OFDM-FA/ CCK-FA",
- u4tmp[0], (u1tmp[0]<<8) + u1tmp[1]);
+ "OFDM-FA/ CCK-FA",
+ u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8",
- u4tmp[0], u4tmp[1], u4tmp[2]);
+ "0x6c0/0x6c4/0x6c8",
+ u4tmp[0], u4tmp[1], u4tmp[2]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770 (hi-pri Rx/Tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ "0x770 (hi-pri Rx/Tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
"0x774(low-pri Rx/Tx)",
coex_sta->low_priority_rx, coex_sta->low_priority_tx);
@@ -3554,22 +3469,24 @@ ex_halbtc8821a2ant_display_coex_info(
/* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "0x41b (mgntQ hang chk == 0xf)",
- u1tmp[0]);
+ "0x41b (mgntQ hang chk == 0xf)",
+ u1tmp[0]);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_IPS_ENTER == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8821a2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
/*halbtc8821a2ant_init_coex_dm(btcoexist);*/
}
@@ -3577,52 +3494,59 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_LPS_ENABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_SCAN_START == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
} else if (BTC_SCAN_FINISH == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
}
void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (BTC_ASSOCIATE_START == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
} else if (BTC_ASSOCIATE_FINISH == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
}
void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 type)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
u8 wifi_central_chnl;
if (BTC_MEDIA_CONNECT == type) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask*/
@@ -3643,26 +3567,29 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- btc_alg_dbg(ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 |
- h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type) {
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
if (type == BTC_PACKET_DHCP) {
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
}
void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmp_buf, u8 length)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 bt_info = 0;
u8 i, rsp_source = 0;
static u32 set_bt_lna_cnt, set_bt_psd_mode;
@@ -3676,19 +3603,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1) {
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- btc_iface_dbg(INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -3814,8 +3741,10 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
{
- btc_iface_dbg(INTF_NOTIFY,
- "[BTCoex], Halt notify\n");
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Halt notify\n");
halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3823,36 +3752,37 @@ void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- btc_alg_dbg(ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- stack_info->profile_notified ? "Yes" : "No",
- stack_info->hci_version);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- btc_iface_dbg(INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
}
halbtc8821a2ant_query_bt_info(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 91cc1397b150..150aeb8e79d1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -141,11 +141,40 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
if (rtlphy->current_channel != 0)
chnl = rtlphy->current_channel;
- btc_alg_dbg(ALGO_TRACE,
- "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "static halbtc_get_wifi_central_chnl:%d\n", chnl);
return chnl;
}
+u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.single_ant_path;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+ u8 num;
+
+ if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+ num = 2;
+ else
+ num = 1;
+
+ return num;
+}
+
+u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ return rtlhal->package_type;
+}
+
static void halbtc_leave_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
@@ -335,6 +364,9 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
case BTC_GET_U4_BT_PATCH_VER:
*u32_tmp = halbtc_get_bt_patch_version(btcoexist);
break;
+ case BTC_GET_U4_VENDOR:
+ *u32_tmp = BTC_VENDOR_OTHER;
+ break;
case BTC_GET_U1_WIFI_DOT11_CHNL:
*u8_tmp = rtlphy->current_channel;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 3d308ebbe048..601bbe1d22b3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -116,18 +116,6 @@ extern u32 btc_dbg_type[];
#define WIFI_P2P_GO_CONNECTED BIT3
#define WIFI_P2P_GC_CONNECTED BIT4
-#define btc_alg_dbg(dbgflag, fmt, ...) \
-do { \
- if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag)) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
-} while (0)
-#define btc_iface_dbg(dbgflag, fmt, ...) \
-do { \
- if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag)) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
-} while (0)
-
-
#define BTC_RSSI_HIGH(_rssi_) \
((_rssi_ == BTC_RSSI_STATE_HIGH || \
_rssi_ == BTC_RSSI_STATE_STAY_HIGH) ? true : false)
@@ -228,6 +216,7 @@ enum btc_get_type {
BTC_GET_U4_WIFI_FW_VER,
BTC_GET_U4_WIFI_LINK_STATUS,
BTC_GET_U4_BT_PATCH_VER,
+ BTC_GET_U4_VENDOR,
/* type u1Byte */
BTC_GET_U1_WIFI_DOT11_CHNL,
@@ -245,6 +234,12 @@ enum btc_get_type {
BTC_GET_MAX
};
+enum btc_vendor {
+ BTC_VENDOR_LENOVO,
+ BTC_VENDOR_ASUS,
+ BTC_VENDOR_OTHER
+};
+
enum btc_set_type {
/* type bool */
BTC_SET_BL_BT_DISABLE,
@@ -263,6 +258,7 @@ enum btc_set_type {
/* type trigger some action */
BTC_SET_ACT_GET_BT_RSSI,
BTC_SET_ACT_AGGREGATE_CTRL,
+ BTC_SET_ACT_ANTPOSREGRISTRY_CTRL,
/********* for 1Ant **********/
/* type bool */
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index d3fd9211b3a4..46e0fa6be273 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -178,17 +178,6 @@ struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
}
EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
-{
- u8 num;
-
- if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
- num = 2;
- else
- num = 1;
-
- return num;
-}
enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
{
@@ -209,11 +198,6 @@ u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
return rtlpriv->btcoexist.btc_info.btcoexist;
}
-u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
-{
- return rtlpriv->btcoexist.btc_info.bt_type;
-}
-
MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
index ccd5a0f91e3b..fff5117e1c4e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
@@ -46,9 +46,12 @@ void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv);
+
enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 8fe8b4cfae6c..f7a7dcbf945e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -45,12 +45,13 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
u32 target_command;
u32 target_content = 0;
- u8 entry_i;
+ int entry_i;
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :",
key_cont_128, 16);
- for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ /* 0-1 config + mac, 2-5 fill 128key,6-7 are reserved */
+ for (entry_i = CAM_CONTENT_COUNT - 1; entry_i >= 0; entry_i--) {
target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
target_command = target_command | BIT(31) | BIT(16);
@@ -102,7 +103,6 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
target_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- udelay(100);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
"WRITE A4: %x\n", target_content);
@@ -285,8 +285,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
u8 i, *addr;
if (NULL == sta_addr) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- "sta_addr is NULL.\n");
+ pr_err("sta_addr is NULL.\n");
return TOTAL_CAM_ENTRY;
}
/* Does STA already exist? */
@@ -298,9 +297,8 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Get a free CAM entry. */
for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
if ((bitmap & BIT(0)) == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- "-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
- rtlpriv->sec.hwsec_cam_bitmap, entry_idx);
+ pr_err("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
+ rtlpriv->sec.hwsec_cam_bitmap, entry_idx);
rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
sta_addr, ETH_ALEN);
@@ -319,14 +317,12 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
u8 i, *addr;
if (NULL == sta_addr) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- "sta_addr is NULL.\n");
+ pr_err("sta_addr is NULL.\n");
return;
}
if (is_zero_ether_addr(sta_addr)) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- "sta_addr is %pM\n", sta_addr);
+ pr_err("sta_addr is %pM\n", sta_addr);
return;
}
/* Does STA already exist? */
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index ded1493fee9c..a4f8e326a2bc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -117,8 +117,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is too big!\n");
+ pr_err("Firmware is too big!\n");
release_firmware(firmware);
return;
}
@@ -234,6 +233,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
int err = 0;
+ u8 retry_limit = 0x30;
if (mac->vif) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -272,6 +272,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
+ retry_limit = 0x07;
break;
case NL80211_IFTYPE_P2P_GO:
mac->p2p = P2P_ROLE_GO;
@@ -288,6 +289,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
mac->basic_rates = 0xff0;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
+
+ retry_limit = 0x07;
break;
case NL80211_IFTYPE_MESH_POINT:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
@@ -301,10 +304,12 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
mac->basic_rates = 0xff0;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
+
+ retry_limit = 0x07;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "operation mode %d is not support!\n", vif->type);
+ pr_err("operation mode %d is not supported!\n",
+ vif->type);
err = -EOPNOTSUPP;
goto out;
}
@@ -322,6 +327,10 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ mac->retry_long = retry_limit;
+ mac->retry_short = retry_limit;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+ (u8 *)(&retry_limit));
out:
mutex_unlock(&rtlpriv->locks.conf_mutex);
return err;
@@ -646,10 +655,15 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
hw->conf.long_frame_max_tx_count);
- mac->retry_long = hw->conf.long_frame_max_tx_count;
- mac->retry_short = hw->conf.long_frame_max_tx_count;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+ /* brought up everything changes (changed == ~0) indicates first
+ * open, so use our default value instead of that of wiphy.
+ */
+ if (changed != ~0) {
+ mac->retry_long = hw->conf.long_frame_max_tx_count;
+ mac->retry_short = hw->conf.long_frame_max_tx_count;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
(u8 *)(&hw->conf.long_frame_max_tx_count));
+ }
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
@@ -764,9 +778,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
default:
mac->bw_40 = false;
mac->bw_80 = false;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- channel_type);
+ pr_err("switch case %#x not processed\n",
+ channel_type);
break;
}
}
@@ -1399,8 +1412,7 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
"IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
return rtl_rx_agg_stop(hw, sta, tid);
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "IEEE80211_AMPDU_ERR!!!!:\n");
+ pr_err("IEEE80211_AMPDU_ERR!!!!:\n");
return -EOPNOTSUPP;
}
return 0;
@@ -1532,12 +1544,11 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = AESCMAC_ENCRYPTION;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "HW don't support CMAC encrypiton, use software CMAC encrypiton\n");
+ "HW don't support CMAC encryption, use software CMAC encryption\n");
err = -EOPNOTSUPP;
goto out_unlock;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "alg_err:%x!!!!:\n", key->cipher);
+ pr_err("alg_err:%x!!!!:\n", key->cipher);
goto out_unlock;
}
if (key_type == WEP40_ENCRYPTION ||
@@ -1613,8 +1624,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"set pairwise key\n");
if (!sta) {
- RT_ASSERT(false,
- "pairwise key without mac_addr\n");
+ WARN_ONCE(true,
+ "rtlwifi: pairwise key without mac_addr\n");
err = -EOPNOTSUPP;
goto out_unlock;
@@ -1662,8 +1673,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "cmd_err:%x!!!!:\n", cmd);
+ pr_err("cmd_err:%x!!!!:\n", cmd);
}
out_unlock:
mutex_unlock(&rtlpriv->locks.conf_mutex);
@@ -1804,8 +1814,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
return true;
default:
- RT_ASSERT(false,
- "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
+ WARN_ONCE(true,
+ "rtlwifi: rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 33905bbacad2..7ecac6116d5d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -26,35 +26,32 @@
#include <linux/moduleparam.h>
-void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
+#ifdef CONFIG_RTLWIFI_DEBUG
+void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
+ const char *fmt, ...)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 i;
+ if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
+ (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ struct va_format vaf;
+ va_list args;
- rtlpriv->dbg.global_debugcomponents =
- COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND |
- COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC |
- COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC |
- COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS |
- COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD |
- COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN |
- COMP_EASY_CONCURRENT | COMP_EFUSE | COMP_QOS | COMP_MAC80211 |
- COMP_REGD | COMP_CHAN | COMP_BT_COEXIST;
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
- for (i = 0; i < DBGP_TYPE_MAX; i++)
- rtlpriv->dbg.dbgp_type[i] = 0;
+ pr_info(":<%lx> %pV", in_interrupt(), &vaf);
- /*Init Debug flag enable condition */
+ va_end(args);
+ }
}
-EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
+EXPORT_SYMBOL_GPL(_rtl_dbg_trace);
-#ifdef CONFIG_RTLWIFI_DEBUG
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
- const char *modname, const char *fmt, ...)
+void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
+ const char *fmt, ...)
{
- if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) &&
- (level <= rtlpriv->dbg.global_debuglevel))) {
+ if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
+ (level <= rtlpriv->cfg->mod_params->debug_level))) {
struct va_format vaf;
va_list args;
@@ -63,13 +60,25 @@ void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
vaf.fmt = fmt;
vaf.va = &args;
- printk(KERN_DEBUG "%s:%ps:<%lx-%x> %pV",
- modname, __builtin_return_address(0),
- in_interrupt(), in_atomic(),
- &vaf);
+ pr_info("%pV", &vaf);
va_end(args);
}
}
-EXPORT_SYMBOL_GPL(_rtl_dbg_trace);
+EXPORT_SYMBOL_GPL(_rtl_dbg_print);
+
+void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
+ const char *titlestring,
+ const void *hexdata, int hexdatalen)
+{
+ if (unlikely(((comp) & rtlpriv->cfg->mod_params->debug_mask) &&
+ ((level) <= rtlpriv->cfg->mod_params->debug_level))) {
+ pr_info("In process \"%s\" (pid %i): %s\n",
+ current->comm, current->pid, titlestring);
+ print_hex_dump_bytes("", DUMP_PREFIX_NONE,
+ hexdata, hexdatalen);
+ }
+}
+EXPORT_SYMBOL_GPL(_rtl_dbg_print_data);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 6156a79328c1..bf5339f1c1bc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -36,7 +36,7 @@
*unexpected HW behavior, HW BUG
*and so on.
*/
-#define DBG_EMERG 0
+/*#define DBG_EMERG 0 */
/*
*Abnormal, rare, or unexpeted cases.
@@ -166,55 +166,36 @@ enum dbgp_flag_e {
#ifdef CONFIG_RTLWIFI_DEBUG
-#define RT_ASSERT(_exp, fmt, ...) \
-do { \
- if (!(_exp)) { \
- printk(KERN_DEBUG KBUILD_MODNAME ":%s(): " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
-} while (0)
-
-
struct rtl_priv;
-__printf(5, 6)
+__printf(4, 5)
void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
- const char *modname, const char *fmt, ...);
+ const char *fmt, ...);
+
+__printf(4, 5)
+void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
+ const char *fmt, ...);
+
+void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
+ const char *titlestring,
+ const void *hexdata, int hexdatalen);
#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
_rtl_dbg_trace(rtlpriv, comp, level, \
- KBUILD_MODNAME, fmt, ##__VA_ARGS__)
+ fmt, ##__VA_ARGS__)
#define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \
-do { \
- if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
- printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
- ##__VA_ARGS__); \
- } \
-} while (0)
+ _rtl_dbg_print(rtlpriv, dbgtype, dbgflag, fmt, ##__VA_ARGS__)
#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
_hexdatalen) \
-do { \
- if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) && \
- (_level <= rtlpriv->dbg.global_debuglevel))) { \
- printk(KERN_DEBUG "%s: In process \"%s\" (pid %i): %s\n", \
- KBUILD_MODNAME, current->comm, current->pid, \
- _titlestring); \
- print_hex_dump_bytes("", DUMP_PREFIX_NONE, \
- _hexdata, _hexdatalen); \
- } \
-} while (0)
+ _rtl_dbg_print_data(rtlpriv, _comp, _level, \
+ _titlestring, _hexdata, _hexdatalen)
#else
struct rtl_priv;
-__printf(2, 3)
-static inline void RT_ASSERT(int exp, const char *fmt, ...)
-{
-}
-
__printf(4, 5)
static inline void RT_TRACE(struct rtl_priv *rtlpriv,
int comp, int level,
@@ -237,6 +218,4 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
}
#endif
-
-void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 7becfef6cd5c..ef9acd466cca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -31,6 +31,9 @@ static const u8 MAX_PGPKT_SIZE = 9;
static const u8 PGPKT_DATA_SIZE = 8;
static const int EFUSE_MAX_SIZE = 512;
+#define START_ADDRESS 0x1000
+#define REG_MCUFWDL 0x0080
+
static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
{0, 0, 0, 2},
{0, 1, 0, 2},
@@ -70,8 +73,6 @@ static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
u8 *targetdata);
static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
u16 efuse_addr, u8 word_en, u8 *data);
-static void efuse_power_switch(struct ieee80211_hw *hw, u8 write,
- u8 pwrstate);
static u16 efuse_get_current_size(struct ieee80211_hw *hw);
static u8 efuse_calculate_word_cnts(u8 word_en);
@@ -1121,7 +1122,7 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
return badworden;
}
-static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
+void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1207,6 +1208,7 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
}
}
}
+EXPORT_SYMBOL(efuse_power_switch);
static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
@@ -1259,8 +1261,7 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
break;
case EEPROM_93C46:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL8XXX did not boot from eeprom, check it !!\n");
+ pr_err("RTL8XXX did not boot from eeprom, check it !!\n");
return 1;
default:
@@ -1321,3 +1322,45 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
return 0;
}
EXPORT_SYMBOL_GPL(rtl_get_hwinfo);
+
+void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *pu4byteptr = (u8 *)buffer;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i));
+}
+EXPORT_SYMBOL_GPL(rtl_fw_block_write);
+
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+ u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value8;
+ u8 u8page = (u8)(page & 0x07);
+
+ value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+ rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+ rtl_fw_block_write(hw, buffer, size);
+}
+EXPORT_SYMBOL_GPL(rtl_fw_page_write);
+
+void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+ u32 fwlen = *pfwlen;
+ u8 remain = (u8)(fwlen % 4);
+
+ remain = (remain == 0) ? 0 : (4 - remain);
+
+ while (remain > 0) {
+ pfwbuf[fwlen] = 0;
+ fwlen++;
+ remain--;
+ }
+
+ *pfwlen = fwlen;
+}
+EXPORT_SYMBOL_GPL(rtl_fill_dummy);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index 51aa1210def5..952fdc288f0e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -109,7 +109,12 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
int max_size, u8 *hwinfo, int *params);
+void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+ u32 size);
+void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 8bfe020edd3a..2e6b888bd417 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -174,9 +174,8 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- rtlpci->const_support_pciaspm);
+ pr_err("switch case %#x not processed\n",
+ rtlpci->const_support_pciaspm);
break;
}
@@ -1214,6 +1213,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
mac->current_ampdu_density = 7;
mac->current_ampdu_factor = 3;
+ /*Retry Limit*/
+ mac->retry_short = 7;
+ mac->retry_long = 7;
+
/*QOS*/
rtlpci->acm_method = EACMWAY2_SW;
@@ -1247,9 +1250,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
&buffer_desc_dma);
if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Cannot allocate TX ring (prio = %d)\n",
- prio);
+ pr_err("Cannot allocate TX ring (prio = %d)\n",
+ prio);
return -ENOMEM;
}
@@ -1266,8 +1268,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
sizeof(*desc) * entries, &desc_dma);
if (!desc || (unsigned long)desc & 0xFF) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Cannot allocate TX ring (prio = %d)\n", prio);
+ pr_err("Cannot allocate TX ring (prio = %d)\n", prio);
return -ENOMEM;
}
@@ -1314,8 +1315,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
&rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
(ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Cannot allocate RX ring\n");
+ pr_err("Cannot allocate RX ring\n");
return -ENOMEM;
}
@@ -1338,8 +1338,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
&rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Cannot allocate RX ring\n");
+ pr_err("Cannot allocate RX ring\n");
return -ENOMEM;
}
@@ -1799,15 +1798,13 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
_rtl_pci_init_struct(hw, pdev);
err = _rtl_pci_init_trx_ring(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "tx ring initialization failed\n");
+ pr_err("tx ring initialization failed\n");
return err;
}
@@ -1820,6 +1817,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
int err;
@@ -1837,6 +1835,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
"Failed to config hardware!\n");
return err;
}
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+ &rtlmac->retry_long);
rtlpriv->cfg->ops->enable_interrupt(hw);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
@@ -2174,15 +2174,15 @@ int rtl_pci_probe(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
+ WARN_ONCE(true, "%s : Cannot enable new PCI device\n",
pci_name(pdev));
return err;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- RT_ASSERT(false,
- "Unable to obtain 32bit DMA for consistent allocations\n");
+ WARN_ONCE(true,
+ "rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n");
err = -ENOMEM;
goto fail1;
}
@@ -2193,7 +2193,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
sizeof(struct rtl_priv), &rtl_ops);
if (!hw) {
- RT_ASSERT(false,
+ WARN_ONCE(true,
"%s : ieee80211 alloc failed\n", pci_name(pdev));
err = -ENOMEM;
goto fail1;
@@ -2219,20 +2219,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->intf_ops = &rtl_pci_ops;
rtlpriv->glb_var = &rtl_global_var;
- /*
- *init dbgp flags before all
- *other functions, because we will
- *use it in other funtions like
- *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
- *you can not use these macro
- *before this
- */
- rtl_dbgp_flag_init(hw);
-
/* MEM map */
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
- RT_ASSERT(false, "Can't obtain PCI resources\n");
+ WARN_ONCE(true, "rtlwifi: Can't obtain PCI resources\n");
goto fail1;
}
@@ -2245,7 +2235,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
(unsigned long)pci_iomap(pdev,
rtlpriv->cfg->bar_id, pmem_len);
if (rtlpriv->io.pci_mem_start == 0) {
- RT_ASSERT(false, "Can't map PCI mem\n");
+ WARN_ONCE(true, "rtlwifi: Can't map PCI mem\n");
err = -ENOMEM;
goto fail2;
}
@@ -2275,7 +2265,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+ pr_err("Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
@@ -2287,34 +2277,25 @@ int rtl_pci_probe(struct pci_dev *pdev,
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't allocate sw for mac80211\n");
+ pr_err("Can't allocate sw for mac80211\n");
goto fail3;
}
/* Init PCI sw */
err = rtl_pci_init(hw, pdev);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n");
+ pr_err("Failed to init PCI\n");
goto fail3;
}
err = ieee80211_register_hw(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't register mac80211 hw.\n");
+ pr_err("Can't register mac80211 hw.\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->mac80211.mac80211_registered = 1;
- err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "failed to create sysfs device attributes\n");
- goto fail3;
- }
-
/*init rfkill */
rtl_init_rfkill(hw); /* Init PCI sw */
@@ -2364,8 +2345,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
wait_for_completion(&rtlpriv->firmware_loading_complete);
clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
- sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
-
/*ieee80211_unregister_hw will call ops_stop */
if (rtlmac->mac80211_registered == 1) {
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index 578b1d900bfb..d9039ea10ba4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -271,10 +271,10 @@ struct mp_adapter {
};
struct rtl_pci_priv {
+ struct bt_coexist_info bt_coexist;
+ struct rtl_led_ctl ledctl;
struct rtl_pci dev;
struct mp_adapter ndis_adapter;
- struct rtl_led_ctl ledctl;
- struct bt_coexist_info bt_coexist;
};
#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index d0ffc4d508cf..0d152877d969 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -34,6 +34,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
/*<1> reset trx ring */
if (rtlhal->interface == INTF_PCI)
@@ -46,6 +47,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
return false;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+ &rtlmac->retry_long);
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
/*<3> Enable Interrupt */
@@ -150,8 +153,7 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", state_toset);
+ pr_err("switch case %#x not processed\n", state_toset);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index ce8621a0f7aa..951d257cd4c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -267,8 +267,7 @@ static void *rtl_rate_alloc_sta(void *ppriv,
rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
if (!rate_priv) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unable to allocate private rc structure\n");
+ pr_err("Unable to allocate private rc structure\n");
return NULL;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 6ee6bf8e7eaf..558c31bf5c80 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -440,7 +440,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM indicates invalid contry code, world wide 13 should be used\n");
+ "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 5360d5332359..21ed9ad3be7a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -27,6 +27,7 @@
#include "../pci.h"
#include "../base.h"
#include "../core.h"
+#include "../efuse.h"
#include "reg.h"
#include "def.h"
#include "fw.h"
@@ -53,63 +54,6 @@ static void _rtl88e_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
-static void _rtl88e_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blocksize = sizeof(u32);
- u8 *bufferptr = (u8 *)buffer;
- u32 *pu4BytePtr = (u32 *)buffer;
- u32 i, offset, blockcount, remainsize;
-
- blockcount = size / blocksize;
- remainsize = size % blocksize;
-
- for (i = 0; i < blockcount; i++) {
- offset = i * blocksize;
- rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
-
- if (remainsize) {
- offset = blockcount * blocksize;
- bufferptr += offset;
- for (i = 0; i < remainsize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferptr + i));
- }
- }
-}
-
-static void _rtl88e_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8) (page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- _rtl88e_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 fwlen = *pfwlen;
- u8 remain = (u8) (fwlen % 4);
-
- remain = (remain == 0) ? 0 : (4 - remain);
-
- while (remain > 0) {
- pfwbuf[fwlen] = 0;
- fwlen++;
- remain--;
- }
-
- *pfwlen = fwlen;
-}
-
static void _rtl88e_write_fw(struct ieee80211_hw *hw,
enum version_8188e version, u8 *buffer, u32 size)
{
@@ -120,27 +64,24 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
- _rtl88e_fill_dummy(bufferptr, &size);
+ rtl_fill_dummy(bufferptr, &size);
pagenums = size / FW_8192C_PAGE_SIZE;
remainsize = size % FW_8192C_PAGE_SIZE;
- if (pagenums > 8) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not greater then 8\n");
- }
+ if (pagenums > 8)
+ pr_err("Page numbers should not greater then 8\n");
for (page = 0; page < pagenums; page++) {
offset = page * FW_8192C_PAGE_SIZE;
- _rtl88e_fw_page_write(hw, page, (bufferptr + offset),
- FW_8192C_PAGE_SIZE);
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192C_PAGE_SIZE);
}
if (remainsize) {
offset = pagenums * FW_8192C_PAGE_SIZE;
page = pagenums;
- _rtl88e_fw_page_write(hw, page, (bufferptr + offset),
- remainsize);
+ rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
}
}
@@ -157,15 +98,10 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw)
(!(value32 & FWDL_CHKSUM_RPT)));
if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
- value32);
+ pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n",
+ value32);
goto exit;
}
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
value32 |= MCUFWDL_RDY;
value32 &= ~WINTINI_RDY;
@@ -176,20 +112,15 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw)
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x.\n",
- value32);
- err = 0;
- goto exit;
- }
+ if (value32 & WINTINI_RDY)
+ return 0;
udelay(FW_8192C_POLLING_DELAY);
} while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
+ pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+ value32);
exit:
return err;
@@ -234,13 +165,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
_rtl88e_enable_fw_download(hw, false);
err = _rtl88e_fw_free_to_go(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is not ready to run!\n");
- } else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "Firmware is ready to run!\n");
- }
+ if (err)
+ pr_err("Firmware is not ready to run!\n");
return 0;
}
@@ -309,8 +235,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
while (!write_sucess) {
wait_writeh2c_limit--;
if (wait_writeh2c_limit == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Write H2C fail because no trigger for FW INT!\n");
+ pr_err("Write H2C fail because no trigger for FW INT!\n");
break;
}
@@ -434,8 +359,8 @@ void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw,
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false,
- "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true,
+ "rtl8188ee: error H2C cmd because of Fw download fail!!!\n");
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 37d6efc3d240..0ba26d27d11c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -358,8 +358,7 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
}
@@ -572,9 +571,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- e_aci);
+ pr_err("switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -737,8 +735,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
2, array);
break; }
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
}
@@ -759,9 +756,8 @@ static bool _rtl88ee_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to polling write LLT done at address %d!\n",
- address);
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -821,19 +817,18 @@ static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw)
static void _rtl88ee_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpriv->rtlhal.up_first_time)
return;
if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
- rtl88ee_sw_led_on(hw, pLed0);
+ rtl88ee_sw_led_on(hw, pled0);
else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
- rtl88ee_sw_led_on(hw, pLed0);
+ rtl88ee_sw_led_on(hw, pled0);
else
- rtl88ee_sw_led_off(hw, pLed0);
+ rtl88ee_sw_led_off(hw, pled0);
}
static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
@@ -1096,7 +1091,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
rtstatus = _rtl88ee_init_mac(hw);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ pr_info("Init MAC failed\n");
err = 1;
goto exit;
}
@@ -1252,8 +1247,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not support!\n", type);
+ pr_err("Network type %d not support!\n", type);
return 1;
break;
}
@@ -1352,7 +1346,7 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ WARN_ONCE(true, "rtl8188ee: invalid aci: %d !\n", aci);
break;
}
}
@@ -1936,14 +1930,13 @@ exit:
static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
@@ -1987,7 +1980,7 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
_rtl88ee_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
}
_rtl88ee_hal_customized_behavior(hw);
}
@@ -2354,8 +2347,8 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", enc_algo);
+ pr_err("switch case %#x not processed\n",
+ enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2373,9 +2366,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
entry_id =
rtl_cam_get_free_entry(hw, p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
index 6ea7fd7bb527..df3e214460db 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
@@ -67,7 +67,6 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -79,7 +78,7 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
case LED_PIN_LED0:
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain) {
+ if (rtlpriv->ledctl.led_opendrain) {
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(3) | BIT(5) | BIT(6)));
ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -104,24 +103,26 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl88ee_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
- _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl88ee_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
- rtl88ee_sw_led_on(hw, pLed0);
+ rtl88ee_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
- rtl88ee_sw_led_off(hw, pLed0);
+ rtl88ee_sw_led_off(hw, pled0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index fffaa92eda81..14a256062614 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -176,7 +176,7 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
offset &= 0xff;
newoffset = offset;
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+ pr_err("return all one\n");
return 0xFFFFFFFF;
}
tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -220,7 +220,7 @@ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+ pr_err("stop\n");
return;
}
offset &= 0xff;
@@ -373,7 +373,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw)
rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
return false;
}
@@ -383,13 +383,13 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw)
phy_config_bb_with_pghdr(hw, BASEBAND_CONFIG_PHY_REG);
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus =
phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ pr_err("AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power =
@@ -1095,8 +1095,7 @@ void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
(u8 *)&iotype);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
+ pr_err("Unknown Scan Backup operation.\n");
break;
}
}
@@ -1137,8 +1136,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -1162,8 +1161,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
rtl88e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -1231,8 +1230,8 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if (rtlphy->set_bwmode_inprogress)
return 0;
- RT_ASSERT((rtlphy->current_channel <= 14),
- "WIRELESS_MODE_G but channel>14");
+ WARN_ONCE((rtlphy->current_channel > 14),
+ "rtl8188ee: WIRELESS_MODE_G but channel>14");
rtlphy->sw_chnl_inprogress = true;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
@@ -1280,8 +1279,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14),
- "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14),
+ "rtl8188ee: illegal channel for Zebra: %d\n", channel);
_rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -1303,8 +1302,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
currentcmd = &postcommoncmd[*step];
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Invalid 'stage' = %d, Check it!\n", *stage);
+ pr_err("Invalid 'stage' = %d, Check it!\n",
+ *stage);
return true;
}
@@ -1367,7 +1366,7 @@ static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ WARN_ONCE(true, "rtl8188ee: cmdtable cannot be NULL.\n");
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
index 26ac4c2903c7..30798b12a363 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
@@ -51,8 +51,7 @@ void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtlphy->rfreg_chnlval[0]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index f361808def47..7661cfa53032 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -131,8 +131,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0);
rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN);
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -165,8 +163,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw.\n");
+ pr_info("Can't alloc buffer for fw.\n");
return 1;
}
@@ -177,8 +174,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ pr_info("Failed to request firmware!\n");
return 1;
}
@@ -278,7 +274,8 @@ static struct rtl_mod_params rtl88ee_mod_params = {
.swctrl_lps = false,
.fwctrl_lps = false,
.msi_support = true,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
};
static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
@@ -394,7 +391,8 @@ MODULE_DESCRIPTION("Realtek 8188E 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8188efw.bin");
module_param_named(swenc, rtl88ee_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl88ee_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl88ee_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
@@ -406,7 +404,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 3e3b88664883..09c908d4cf91 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -760,7 +760,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -779,7 +779,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
@@ -799,7 +799,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -815,7 +815,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_RX_DESC_BUFF_ADDR(pdesc);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index bdc132bef822..0b5a06ffa482 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -638,7 +638,6 @@ EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
static u64 last_txok_cnt;
@@ -651,20 +650,20 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
u32 edca_be_dl = 0x5ea42b;
bool bt_change_edca = false;
- if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
- (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
+ if ((last_bt_edca_ul != rtlpriv->btcoexist.bt_edca_ul) ||
+ (last_bt_edca_dl != rtlpriv->btcoexist.bt_edca_dl)) {
rtlpriv->dm.current_turbo_edca = false;
- last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
- last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
+ last_bt_edca_ul = rtlpriv->btcoexist.bt_edca_ul;
+ last_bt_edca_dl = rtlpriv->btcoexist.bt_edca_dl;
}
- if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
- edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
+ if (rtlpriv->btcoexist.bt_edca_ul != 0) {
+ edca_be_ul = rtlpriv->btcoexist.bt_edca_ul;
bt_change_edca = true;
}
- if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
- edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+ edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
bt_change_edca = true;
}
@@ -673,7 +672,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
return;
}
- if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
+ if ((!mac->ht_enable) && (!rtlpriv->btcoexist.bt_coexistence)) {
if (!(edca_be_ul & 0xffff0000))
edca_be_ul |= 0x005e0000;
@@ -1471,7 +1470,6 @@ EXPORT_SYMBOL(rtl92c_dm_watchdog);
u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
long undec_sm_pwdb;
u8 curr_bt_rssi_state = 0x00;
@@ -1510,8 +1508,8 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
- if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) {
- rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state;
+ if (curr_bt_rssi_state != rtlpriv->btcoexist.bt_rssi_state) {
+ rtlpriv->btcoexist.bt_rssi_state = curr_bt_rssi_state;
return true;
} else {
return false;
@@ -1522,7 +1520,6 @@ EXPORT_SYMBOL(rtl92c_bt_rssi_state_change);
static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u32 polling, ratio_tx, ratio_pri;
u32 bt_tx, bt_pri;
@@ -1542,14 +1539,14 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
return false;
bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
- if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) {
- rtlpcipriv->bt_coexist.bt_cur_state = bt_state;
+ if (bt_state != rtlpriv->btcoexist.bt_cur_state) {
+ rtlpriv->btcoexist.bt_cur_state = bt_state;
- if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
- rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
+ if (rtlpriv->btcoexist.reg_bt_sco == 3) {
+ rtlpriv->btcoexist.bt_service = BT_IDLE;
bt_state = bt_state |
- ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
+ ((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
BIT_OFFSET_LEN_MASK_32(2, 1);
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
@@ -1559,10 +1556,10 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
ratio_tx = bt_tx * 1000 / polling;
ratio_pri = bt_pri * 1000 / polling;
- rtlpcipriv->bt_coexist.ratio_tx = ratio_tx;
- rtlpcipriv->bt_coexist.ratio_pri = ratio_pri;
+ rtlpriv->btcoexist.ratio_tx = ratio_tx;
+ rtlpriv->btcoexist.ratio_pri = ratio_pri;
- if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
+ if (bt_state && rtlpriv->btcoexist.reg_bt_sco == 3) {
if ((ratio_tx < 30) && (ratio_pri < 30))
cur_service_type = BT_IDLE;
@@ -1577,17 +1574,17 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
else
cur_service_type = BT_OTHER_ACTION;
- if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) {
- rtlpcipriv->bt_coexist.bt_service = cur_service_type;
+ if (cur_service_type != rtlpriv->btcoexist.bt_service) {
+ rtlpriv->btcoexist.bt_service = cur_service_type;
bt_state = bt_state |
- ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
+ ((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
- ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ?
+ ((rtlpriv->btcoexist.bt_service != BT_IDLE) ?
0 : BIT_OFFSET_LEN_MASK_32(2, 1));
/* Add interrupt migration when bt is not ini
* idle state (no traffic). */
- if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
+ if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
rtl_write_word(rtlpriv, 0x504, 0x0ccc);
rtl_write_byte(rtlpriv, 0x506, 0x54);
rtl_write_byte(rtlpriv, 0x507, 0x54);
@@ -1626,80 +1623,77 @@ static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw)
static void rtl92c_bt_set_normal(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-
-
- if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) {
- rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b;
- } else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) {
- rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f;
- } else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) {
- if (rtlpcipriv->bt_coexist.ratio_tx > 160) {
- rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f;
+
+ if (rtlpriv->btcoexist.bt_service == BT_OTHERBUSY) {
+ rtlpriv->btcoexist.bt_edca_ul = 0x5ea72b;
+ rtlpriv->btcoexist.bt_edca_dl = 0x5ea72b;
+ } else if (rtlpriv->btcoexist.bt_service == BT_BUSY) {
+ rtlpriv->btcoexist.bt_edca_ul = 0x5eb82f;
+ rtlpriv->btcoexist.bt_edca_dl = 0x5eb82f;
+ } else if (rtlpriv->btcoexist.bt_service == BT_SCO) {
+ if (rtlpriv->btcoexist.ratio_tx > 160) {
+ rtlpriv->btcoexist.bt_edca_ul = 0x5ea72f;
+ rtlpriv->btcoexist.bt_edca_dl = 0x5ea72f;
} else {
- rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b;
+ rtlpriv->btcoexist.bt_edca_ul = 0x5ea32b;
+ rtlpriv->btcoexist.bt_edca_dl = 0x5ea42b;
}
} else {
- rtlpcipriv->bt_coexist.bt_edca_ul = 0;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0;
+ rtlpriv->btcoexist.bt_edca_ul = 0;
+ rtlpriv->btcoexist.bt_edca_dl = 0;
}
- if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) &&
- (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
+ if ((rtlpriv->btcoexist.bt_service != BT_IDLE) &&
+ (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
(rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) &&
- (rtlpcipriv->bt_coexist.bt_rssi_state &
+ (rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_BG_EDCA_LOW)) {
- rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b;
+ rtlpriv->btcoexist.bt_edca_ul = 0x5eb82b;
+ rtlpriv->btcoexist.bt_edca_dl = 0x5eb82b;
}
}
static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
/* Only enable HW BT coexist when BT in "Busy" state. */
if (rtlpriv->mac80211.vendor == PEER_CISCO &&
- rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) {
+ rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else {
- if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) &&
- (rtlpcipriv->bt_coexist.bt_rssi_state &
+ if ((rtlpriv->btcoexist.bt_service == BT_BUSY) &&
+ (rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- } else if ((rtlpcipriv->bt_coexist.bt_service ==
+ } else if ((rtlpriv->btcoexist.bt_service ==
BT_OTHER_ACTION) && (rtlpriv->mac80211.mode <
WIRELESS_MODE_N_24G) &&
- (rtlpcipriv->bt_coexist.bt_rssi_state &
+ (rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_SPECIAL_LOW)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) {
+ } else if (rtlpriv->btcoexist.bt_service == BT_PAN) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
}
}
- if (rtlpcipriv->bt_coexist.bt_service == BT_PAN)
+ if (rtlpriv->btcoexist.bt_service == BT_PAN)
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100);
else
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0);
- if (rtlpcipriv->bt_coexist.bt_rssi_state &
+ if (rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER) {
rtl92c_bt_set_normal(hw);
} else {
- rtlpcipriv->bt_coexist.bt_edca_ul = 0;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0;
+ rtlpriv->btcoexist.bt_edca_ul = 0;
+ rtlpriv->btcoexist.bt_edca_dl = 0;
}
- if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
+ if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A,
0x1e,
@@ -1707,12 +1701,12 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
} else {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A, 0x1e, 0xf0,
- rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
+ rtlpriv->btcoexist.bt_rfreg_origin_1e);
}
if (!rtlpriv->dm.dynamic_txpower_enable) {
- if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
- if (rtlpcipriv->bt_coexist.bt_rssi_state &
+ if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
+ if (rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_TXPOWER_LOW) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_BT2;
@@ -1732,37 +1726,34 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
static void rtl92c_check_bt_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 tmp1byte = 0;
if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version) &&
- rtlpcipriv->bt_coexist.bt_coexistence)
+ rtlpriv->btcoexist.bt_coexistence)
tmp1byte |= BIT(5);
- if (rtlpcipriv->bt_coexist.bt_cur_state) {
- if (rtlpcipriv->bt_coexist.bt_ant_isolation)
+ if (rtlpriv->btcoexist.bt_cur_state) {
+ if (rtlpriv->btcoexist.bt_ant_isolation)
rtl92c_bt_ant_isolation(hw, tmp1byte);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0,
- rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
+ rtlpriv->btcoexist.bt_rfreg_origin_1e);
- rtlpcipriv->bt_coexist.bt_edca_ul = 0;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0;
+ rtlpriv->btcoexist.bt_edca_ul = 0;
+ rtlpriv->btcoexist.bt_edca_dl = 0;
}
}
void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
bool wifi_connect_change;
bool bt_state_change;
bool rssi_state_change;
- if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
- (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
-
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) {
wifi_connect_change = rtl92c_bt_wifi_connect_change(hw);
bt_state_change = rtl92c_bt_state_change(hw);
rssi_state_change = rtl92c_bt_rssi_state_change(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index 7d152466152b..c7a77467b20e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -27,6 +27,7 @@
#include "../pci.h"
#include "../base.h"
#include "../core.h"
+#include "../efuse.h"
#include "../rtl8192ce/reg.h"
#include "../rtl8192ce/def.h"
#include "fw_common.h"
@@ -68,63 +69,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
-static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blocksize = sizeof(u32);
- u8 *bufferptr = (u8 *)buffer;
- u32 *pu4byteptr = (u32 *)buffer;
- u32 i, offset, blockcount, remainsize;
-
- blockcount = size / blocksize;
- remainsize = size % blocksize;
-
- for (i = 0; i < blockcount; i++) {
- offset = i * blocksize;
- rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4byteptr + i));
- }
-
- if (remainsize) {
- offset = blockcount * blocksize;
- bufferptr += offset;
- for (i = 0; i < remainsize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferptr + i));
- }
- }
-}
-
-static void _rtl92c_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8) (page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- _rtl92c_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 fwlen = *pfwlen;
- u8 remain = (u8) (fwlen % 4);
-
- remain = (remain == 0) ? 0 : (4 - remain);
-
- while (remain > 0) {
- pfwbuf[fwlen] = 0;
- fwlen++;
- remain--;
- }
-
- *pfwlen = fwlen;
-}
-
static void _rtl92c_write_fw(struct ieee80211_hw *hw,
enum version_8192c version, u8 *buffer, u32 size)
{
@@ -140,30 +84,28 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
u32 page, offset;
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
- _rtl92c_fill_dummy(bufferptr, &size);
+ rtl_fill_dummy(bufferptr, &size);
pageNums = size / FW_8192C_PAGE_SIZE;
remainsize = size % FW_8192C_PAGE_SIZE;
- if (pageNums > 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not greater then 4\n");
- }
+ if (pageNums > 4)
+ pr_err("Page numbers should not greater then 4\n");
for (page = 0; page < pageNums; page++) {
offset = page * FW_8192C_PAGE_SIZE;
- _rtl92c_fw_page_write(hw, page, (bufferptr + offset),
- FW_8192C_PAGE_SIZE);
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192C_PAGE_SIZE);
}
if (remainsize) {
offset = pageNums * FW_8192C_PAGE_SIZE;
page = pageNums;
- _rtl92c_fw_page_write(hw, page, (bufferptr + offset),
- remainsize);
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ remainsize);
}
} else {
- _rtl92c_fw_block_write(hw, buffer, size);
+ rtl_fw_block_write(hw, buffer, size);
}
}
@@ -180,15 +122,10 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
(!(value32 & FWDL_ChkSum_rpt)));
if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
- value32);
+ pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n",
+ value32);
goto exit;
}
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
value32 |= MCUFWDL_RDY;
value32 &= ~WINTINI_RDY;
@@ -198,20 +135,15 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
- value32);
- err = 0;
- goto exit;
- }
+ if (value32 & WINTINI_RDY)
+ return 0;
mdelay(FW_8192C_POLLING_DELAY);
} while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
+ pr_err("Polling FW ready fail! REG_MCUFWDL:0x%08x.\n",
+ value32);
exit:
return err;
@@ -250,13 +182,8 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
_rtl92c_enable_fw_download(hw, false);
err = _rtl92c_fw_free_to_go(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is not ready to run!\n");
- } else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Firmware is ready to run!\n");
- }
+ if (err)
+ pr_err("Firmware is not ready to run!\n");
return 0;
}
@@ -327,8 +254,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
while (!bwrite_sucess) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Write H2C fail because no trigger for FW INT!\n");
+ pr_err("Write H2C fail because no trigger for FW INT!\n");
break;
}
@@ -485,8 +411,8 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false,
- "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true,
+ "rtl8192c-common: return H2C cmd because of Fw download fail!!!\n");
return;
}
@@ -510,7 +436,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
while (u1b_tmp & BIT(2)) {
delay--;
if (delay == 0) {
- RT_ASSERT(false, "8051 reset fail.\n");
+ WARN_ONCE(true, "rtl8192c-common: 8051 reset fail.\n");
break;
}
udelay(50);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 94dd25cf1ca8..7c6e5d91439d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset)
{
- RT_ASSERT(false, "deprecated!\n");
+ WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_read deprecated!\n");
return 0;
}
EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
@@ -86,7 +86,7 @@ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data)
{
- RT_ASSERT(false, "deprecated!\n");
+ WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_write deprecated!\n");
}
EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
@@ -104,7 +104,7 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
offset &= 0x3f;
newoffset = offset;
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+ pr_err("return all one\n");
return 0xFFFFFFFF;
}
tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -152,7 +152,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+ pr_err("stop\n");
return;
}
offset &= 0x3f;
@@ -209,7 +209,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
return false;
}
if (rtlphy->rf_type == RF_1T2R) {
@@ -222,13 +222,13 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ pr_err("AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power =
@@ -745,8 +745,8 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if (rtlphy->set_bwmode_inprogress)
return 0;
- RT_ASSERT((rtlphy->current_channel <= 14),
- "WIRELESS_MODE_G but channel>14");
+ WARN_ONCE((rtlphy->current_channel > 14),
+ "rtl8192c-common: WIRELESS_MODE_G but channel>14");
rtlphy->sw_chnl_inprogress = true;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
@@ -792,7 +792,7 @@ static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ WARN_ONCE(true, "rtl8192c-common: cmdtable cannot be NULL.\n");
return false;
}
@@ -837,8 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14),
- "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14),
+ "rtl8192c-common: illegal channel for Zebra: %d\n", channel);
_rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -860,8 +860,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
currentcmd = &postcommoncmd[*step];
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Invalid 'stage' = %d, Check it!\n", *stage);
+ pr_err("Invalid 'stage' = %d, Check it!\n",
+ *stage);
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 4483d40ecad1..9956026bae0a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -140,8 +140,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
}
@@ -149,7 +148,6 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -277,8 +275,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 *p_regtoset = NULL;
u8 index = 0;
- if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
- (rtlpcipriv->bt_coexist.bt_coexist_type ==
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type ==
BT_CSR_BC4))
p_regtoset = regtoset_bt;
else
@@ -364,9 +362,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~AcmHw_VoqEn);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- e_aci);
+ pr_err("switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -551,8 +548,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array);
break; }
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %d not processed\n", variable);
+ pr_err("switch case %d not processed\n", variable);
break;
}
}
@@ -573,9 +569,8 @@ static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to polling write LLT done at address %d!\n",
- address);
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -659,26 +654,25 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpci->up_first_time)
return;
if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
- rtl92ce_sw_led_on(hw, pLed0);
+ rtl92ce_sw_led_on(hw, pled0);
else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
- rtl92ce_sw_led_on(hw, pLed0);
+ rtl92ce_sw_led_on(hw, pled0);
else
- rtl92ce_sw_led_off(hw, pLed0);
+ rtl92ce_sw_led_off(hw, pled0);
}
static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -687,7 +681,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
u16 retry;
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
- if (rtlpcipriv->bt_coexist.bt_coexistence) {
+ if (rtlpriv->btcoexist.bt_coexistence) {
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO);
value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK);
@@ -696,7 +690,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F);
- if (rtlpcipriv->bt_coexist.bt_coexistence) {
+ if (rtlpriv->btcoexist.bt_coexistence) {
u32 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
u4b_tmp &= (~0x00024800);
@@ -730,7 +724,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82);
udelay(2);
- if (rtlpcipriv->bt_coexist.bt_coexistence) {
+ if (rtlpriv->btcoexist.bt_coexistence) {
bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2) & 0xfd;
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, bytetmp);
}
@@ -802,7 +796,6 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u8 reg_bw_opmode;
u32 reg_prsr;
@@ -832,8 +825,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
- if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
- (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4))
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431);
else
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
@@ -852,8 +845,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
- if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
- (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) {
rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402);
} else {
@@ -861,8 +854,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
}
- if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
- (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4))
rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
else
rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
@@ -963,7 +956,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ pr_err("Init MAC failed\n");
err = 1;
goto exit;
}
@@ -1128,8 +1121,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "Chip Version ID: %s\n", versionid);
+ pr_info("Chip Version ID: %s\n", versionid);
switch (version & 0x3) {
case CHIP_88C:
@@ -1143,8 +1135,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
break;
default:
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "ERROR RF_Type is set!!\n");
+ pr_err("ERROR RF_Type is set!!\n");
break;
}
@@ -1193,8 +1184,7 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
"Set Network type to Mesh Point!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not supported!\n", type);
+ pr_err("Network type %d not supported!\n", type);
return 1;
}
@@ -1292,7 +1282,7 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ WARN_ONCE(true, "rtl8192ce: invalid aci: %d !\n", aci);
break;
}
}
@@ -1320,7 +1310,6 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 u1b_tmp;
u32 u4b_tmp;
@@ -1338,9 +1327,9 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000);
u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL);
- if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
- ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
- (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8))) {
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ ((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) ||
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8))) {
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00F30000 |
(u1b_tmp << 8));
} else {
@@ -1352,7 +1341,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
if (!IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
- if (rtlpcipriv->bt_coexist.bt_coexistence) {
+ if (rtlpriv->btcoexist.bt_coexistence) {
u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
u4b_tmp |= 0x03824800;
rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp);
@@ -1731,12 +1720,11 @@ exit:
static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
@@ -1780,7 +1768,7 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
_rtl92ce_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
}
_rtl92ce_hal_customized_behavior(hw);
}
@@ -1789,7 +1777,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1845,12 +1832,12 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
break;
}
- if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
- (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) &&
- (rtlpcipriv->bt_coexist.bt_cur_state) &&
- (rtlpcipriv->bt_coexist.bt_ant_isolation) &&
- ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ||
- (rtlpcipriv->bt_coexist.bt_service == BT_BUSY)))
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+ (rtlpriv->btcoexist.bt_cur_state) &&
+ (rtlpriv->btcoexist.bt_ant_isolation) &&
+ ((rtlpriv->btcoexist.bt_service == BT_SCO) ||
+ (rtlpriv->btcoexist.bt_service == BT_BUSY)))
ratr_value &= 0x0fffcfc0;
else
ratr_value &= 0x0FFFFFFF;
@@ -2152,8 +2139,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", enc_algo);
+ pr_err("switch case %#x not processed\n",
+ enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2171,9 +2158,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
entry_id = rtl_cam_get_free_entry(hw,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
@@ -2246,65 +2231,64 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-
- rtlpcipriv->bt_coexist.bt_coexistence =
- rtlpcipriv->bt_coexist.eeprom_bt_coexist;
- rtlpcipriv->bt_coexist.bt_ant_num =
- rtlpcipriv->bt_coexist.eeprom_bt_ant_num;
- rtlpcipriv->bt_coexist.bt_coexist_type =
- rtlpcipriv->bt_coexist.eeprom_bt_type;
-
- if (rtlpcipriv->bt_coexist.reg_bt_iso == 2)
- rtlpcipriv->bt_coexist.bt_ant_isolation =
- rtlpcipriv->bt_coexist.eeprom_bt_ant_isol;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->btcoexist.bt_coexistence =
+ rtlpriv->btcoexist.eeprom_bt_coexist;
+ rtlpriv->btcoexist.bt_ant_num =
+ rtlpriv->btcoexist.eeprom_bt_ant_num;
+ rtlpriv->btcoexist.bt_coexist_type =
+ rtlpriv->btcoexist.eeprom_bt_type;
+
+ if (rtlpriv->btcoexist.reg_bt_iso == 2)
+ rtlpriv->btcoexist.bt_ant_isolation =
+ rtlpriv->btcoexist.eeprom_bt_ant_isol;
else
- rtlpcipriv->bt_coexist.bt_ant_isolation =
- rtlpcipriv->bt_coexist.reg_bt_iso;
-
- rtlpcipriv->bt_coexist.bt_radio_shared_type =
- rtlpcipriv->bt_coexist.eeprom_bt_radio_shared;
-
- if (rtlpcipriv->bt_coexist.bt_coexistence) {
-
- if (rtlpcipriv->bt_coexist.reg_bt_sco == 1)
- rtlpcipriv->bt_coexist.bt_service = BT_OTHER_ACTION;
- else if (rtlpcipriv->bt_coexist.reg_bt_sco == 2)
- rtlpcipriv->bt_coexist.bt_service = BT_SCO;
- else if (rtlpcipriv->bt_coexist.reg_bt_sco == 4)
- rtlpcipriv->bt_coexist.bt_service = BT_BUSY;
- else if (rtlpcipriv->bt_coexist.reg_bt_sco == 5)
- rtlpcipriv->bt_coexist.bt_service = BT_OTHERBUSY;
+ rtlpriv->btcoexist.bt_ant_isolation =
+ rtlpriv->btcoexist.reg_bt_iso;
+
+ rtlpriv->btcoexist.bt_radio_shared_type =
+ rtlpriv->btcoexist.eeprom_bt_radio_shared;
+
+ if (rtlpriv->btcoexist.bt_coexistence) {
+ if (rtlpriv->btcoexist.reg_bt_sco == 1)
+ rtlpriv->btcoexist.bt_service = BT_OTHER_ACTION;
+ else if (rtlpriv->btcoexist.reg_bt_sco == 2)
+ rtlpriv->btcoexist.bt_service = BT_SCO;
+ else if (rtlpriv->btcoexist.reg_bt_sco == 4)
+ rtlpriv->btcoexist.bt_service = BT_BUSY;
+ else if (rtlpriv->btcoexist.reg_bt_sco == 5)
+ rtlpriv->btcoexist.bt_service = BT_OTHERBUSY;
else
- rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
+ rtlpriv->btcoexist.bt_service = BT_IDLE;
- rtlpcipriv->bt_coexist.bt_edca_ul = 0;
- rtlpcipriv->bt_coexist.bt_edca_dl = 0;
- rtlpcipriv->bt_coexist.bt_rssi_state = 0xff;
+ rtlpriv->btcoexist.bt_edca_ul = 0;
+ rtlpriv->btcoexist.bt_edca_dl = 0;
+ rtlpriv->btcoexist.bt_rssi_state = 0xff;
}
}
void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
bool auto_load_fail, u8 *hwinfo)
{
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 val;
if (!auto_load_fail) {
- rtlpcipriv->bt_coexist.eeprom_bt_coexist =
+ rtlpriv->btcoexist.eeprom_bt_coexist =
((hwinfo[RF_OPTION1] & 0xe0) >> 5);
val = hwinfo[RF_OPTION4];
- rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1);
- rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1);
- rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
- rtlpcipriv->bt_coexist.eeprom_bt_radio_shared =
+ rtlpriv->btcoexist.eeprom_bt_type = ((val & 0xe) >> 1);
+ rtlpriv->btcoexist.eeprom_bt_ant_num = (val & 0x1);
+ rtlpriv->btcoexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
+ rtlpriv->btcoexist.eeprom_bt_radio_shared =
((val & 0x20) >> 5);
} else {
- rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0;
- rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE;
- rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
- rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
- rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
+ rtlpriv->btcoexist.eeprom_bt_coexist = 0;
+ rtlpriv->btcoexist.eeprom_bt_type = BT_2WIRE;
+ rtlpriv->btcoexist.eeprom_bt_ant_num = ANT_X2;
+ rtlpriv->btcoexist.eeprom_bt_ant_isol = 0;
+ rtlpriv->btcoexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
}
rtl8192ce_bt_var_init(hw);
@@ -2312,14 +2296,14 @@ void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
/* 0:Low, 1:High, 2:From Efuse. */
- rtlpcipriv->bt_coexist.reg_bt_iso = 2;
+ rtlpriv->btcoexist.reg_bt_iso = 2;
/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
- rtlpcipriv->bt_coexist.reg_bt_sco = 3;
+ rtlpriv->btcoexist.reg_bt_sco = 3;
/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
- rtlpcipriv->bt_coexist.reg_bt_sco = 0;
+ rtlpriv->btcoexist.reg_bt_sco = 0;
}
@@ -2327,23 +2311,22 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u8 u1_tmp;
- if (rtlpcipriv->bt_coexist.bt_coexistence &&
- ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
- rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8)) {
+ if (rtlpriv->btcoexist.bt_coexistence &&
+ ((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) ||
+ rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8)) {
- if (rtlpcipriv->bt_coexist.bt_ant_isolation)
+ if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
BIT_OFFSET_LEN_MASK_32(0, 1);
u1_tmp = u1_tmp |
- ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
+ ((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
- ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ?
+ ((rtlpriv->btcoexist.bt_service == BT_SCO) ?
0 : BIT_OFFSET_LEN_MASK_32(2, 1));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
index 833193b751f7..7edf5af9046e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
@@ -57,8 +57,8 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = true;
@@ -67,7 +67,6 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@@ -80,7 +79,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain)
+ if (rtlpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
@@ -92,8 +91,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_info("switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
@@ -101,24 +99,26 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
- _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
- rtl92ce_sw_led_on(hw, pLed0);
+ rtl92ce_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
- rtl92ce_sw_led_off(hw, pLed0);
+ rtl92ce_sw_led_off(hw, pled0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index d1b6a8fe7b6a..7c6d7fc1ef9a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -297,10 +297,10 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_C:
case RF90_PATH_D:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpath);
+ pr_info("Incorrect rfpath %#x\n", rfpath);
break;
default:
+ pr_info("switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -340,8 +340,7 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_info("unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
break;
}
@@ -365,8 +364,8 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -546,8 +545,8 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpwr_state);
+ pr_err("switch case %#x not processed\n",
+ rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
index 7cae6350437c..e68ed7f37c79 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
@@ -51,8 +51,7 @@ void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtlphy->rfreg_chnlval[0]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 691ddef1ae28..bcbb0c60f1f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- char *fw_name = "rtlwifi/rtl8192cfwU.bin";
+ char *fw_name;
rtl8192ce_bt_reg_init(hw);
@@ -130,8 +130,6 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -158,14 +156,18 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw\n");
+ pr_err("Can't alloc buffer for fw\n");
return 1;
}
/* request fw */
- if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+ !IS_92C_SERIAL(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU.bin";
+ else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ else
+ fw_name = "rtlwifi/rtl8192cfw.bin";
rtlpriv->max_fw_size = 0x4000;
pr_info("Using firmware %s\n", fw_name);
@@ -173,8 +175,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ pr_err("Failed to request firmware!\n");
return 1;
}
@@ -249,7 +250,8 @@ static struct rtl_mod_params rtl92ce_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
};
static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
@@ -366,7 +368,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cfwU.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cfwU_B.bin");
module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92ce_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92ce_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92ce_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444);
@@ -374,7 +377,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 2ab4a00246cc..3616ba21959d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -670,7 +670,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -690,7 +690,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
@@ -710,7 +710,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -726,7 +726,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_RX_DESC_BUFF_ADDR(p_desc);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 5c7da0cfc684..f95a64507f17 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -393,12 +393,11 @@ exit:
static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
- usb_priv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
@@ -452,8 +451,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
break;
}
if (pollingCount++ > 100) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n");
+ pr_err("Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n");
return -ENODEV;
}
} while (true);
@@ -486,8 +484,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
break;
}
if (pollingCount++ > 1000) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
+ pr_err("Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
return -ENODEV;
}
} while (true);
@@ -687,7 +684,6 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
u8 queue_sel)
{
u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
if (!wmm_enable) { /* typical setting */
beQ = QUEUE_LOW;
@@ -705,8 +701,7 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
hiQ = QUEUE_HIGH;
}
_rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n",
- queue_sel);
+ pr_info("Tx queue select :0x%02x..\n", queue_sel);
}
static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
@@ -765,8 +760,7 @@ static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
break;
}
rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n",
- hq_sele);
+ pr_info("Tx queue select :0x%02x..\n", hq_sele);
}
static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
@@ -848,8 +842,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
err = _rtl92cu_init_power_on(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to init power on!\n");
+ pr_err("Failed to init power on!\n");
return err;
}
if (!wmm_enable) {
@@ -860,8 +853,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
: WMM_CHIP_A_TX_PAGE_BOUNDARY;
}
if (false == rtl92c_init_llt_table(hw, boundary)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to init LLT Table!\n");
+ pr_err("Failed to init LLT Table!\n");
return -EINVAL;
}
_rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
@@ -986,7 +978,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
err = _rtl92cu_init_mac(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
+ pr_err("init mac failed!\n");
goto exit;
}
err = rtl92c_download_fw(hw);
@@ -1099,8 +1091,7 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
udelay(50);
}
if (retry_cnts >= 100) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "#####=> 8051 reset failed!.........................\n");
+ pr_err("8051 reset failed!.........................\n");
/* if 8051 reset fail, reset MAC. */
rtl_write_byte(rtlpriv,
REG_SYS_FUNC_EN + 1,
@@ -1340,8 +1331,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not supported!\n", type);
+ pr_err("Network type %d not supported!\n", type);
goto error_out;
}
rtl_write_byte(rtlpriv, MSR, bt_msr);
@@ -1555,8 +1545,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
}
@@ -1790,7 +1779,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u4b_ac_param);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n",
+ WARN_ONCE(true, "rtl8192cu: invalid aci: %d !\n",
e_aci);
break;
}
@@ -1926,8 +1915,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
index c6240813ff7b..66d2784de67d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
@@ -57,8 +57,8 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = true;
@@ -67,7 +67,6 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@@ -78,7 +77,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (usbpriv->ledctl.led_opendrain)
+ if (rtlpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
@@ -90,8 +89,8 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = false;
@@ -99,16 +98,18 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
- _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
- _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
- _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
- _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led0);
+ _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led1);
}
static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index cf212f694db5..1b124eade846 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -157,9 +157,8 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n",
- address, _LLT_OP_VALUE(value));
+ pr_err("Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n",
+ address, _LLT_OP_VALUE(value));
status = false;
break;
}
@@ -262,8 +261,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "illegal switch case\n");
+ pr_err("illegal switch case\n");
enc_algo = CAM_TKIP;
break;
}
@@ -280,9 +278,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
entry_id = rtl_cam_get_free_entry(hw,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index f35f435c094e..f068dd5317a7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -274,8 +274,7 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_C:
case RF90_PATH_D:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpath);
+ pr_err("switch case %#x not processed\n", rfpath);
break;
default:
break;
@@ -314,8 +313,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
switch (rtlphy->current_chan_bw) {
@@ -336,8 +335,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -509,8 +508,8 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
_rtl92c_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpwr_state);
+ pr_err("switch case %#x not processed\n",
+ rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index 5e3183024aa0..9cff6bc4049c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -51,8 +51,7 @@ void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtlphy->rfreg_chnlval[0]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index b84e13ac6ead..96c923b3feb4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -61,15 +61,13 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_flag = 0;
rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
rtlpriv->cfg->mod_params->sw_crypto =
rtlpriv->cfg->mod_params->sw_crypto;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw\n");
+ pr_err("Can't alloc buffer for fw\n");
return 1;
}
if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
@@ -158,13 +156,16 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
static struct rtl_mod_params rtl92cu_mod_params = {
.sw_crypto = 0,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
};
module_param_named(swenc, rtl92cu_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92cu_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92cu_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92cu_mod_params.debug_mask, ullong, 0644);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
/* rx */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 1ea878fa7901..1611e42479d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -241,7 +241,7 @@ u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
break;
default:
hw_queue_index = RTL_TXQ_BE;
- RT_ASSERT(false, "QSLT_BE queue, skb_queue:%d\n",
+ WARN_ONCE(true, "rtl8192cu: QSLT_BE queue, skb_queue:%d\n",
mac80211_queue_index);
break;
}
@@ -477,14 +477,14 @@ static void _rtl_fill_usb_tx_desc(u8 *txdesc)
*/
static void _rtl_tx_desc_checksum(u8 *txdesc)
{
- u16 *ptr = (u16 *)txdesc;
+ __le16 *ptr = (__le16 *)txdesc;
u16 checksum = 0;
u32 index;
/* Clear first */
SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
for (index = 0; index < 16; index++)
- checksum = checksum ^ (*(ptr + index));
+ checksum = checksum ^ le16_to_cpu(*(ptr + index));
SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
index df88e39301c2..487eec89bc29 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
@@ -92,129 +92,107 @@ struct rx_drv_info_92c {
u8 reserve:4;
} __packed;
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__bits))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read various fields in RX descriptor */
/* DWORD 0 */
#define GET_RX_DESC_PKT_LEN(__rxdesc) \
- SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
+ LE_BITS_TO_4BYTE((__rxdesc), 0, 14)
#define GET_RX_DESC_CRC32(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 14, 1)
#define GET_RX_DESC_ICV(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 15, 1)
#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
+ LE_BITS_TO_4BYTE(__rxdesc, 16, 4)
#define GET_RX_DESC_SECURITY(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
+ LE_BITS_TO_4BYTE(__rxdesc, 20, 3)
#define GET_RX_DESC_QOS(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 23, 1)
#define GET_RX_DESC_SHIFT(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
+ LE_BITS_TO_4BYTE(__rxdesc, 24, 2)
#define GET_RX_DESC_PHY_STATUS(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 26, 1)
#define GET_RX_DESC_SWDEC(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 27, 1)
#define GET_RX_DESC_LAST_SEG(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 28, 1)
#define GET_RX_DESC_FIRST_SEG(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 29, 1)
#define GET_RX_DESC_EOR(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 30, 1)
#define GET_RX_DESC_OWN(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
+ LE_BITS_TO_4BYTE(__rxdesc, 31, 1)
/* DWORD 1 */
#define GET_RX_DESC_MACID(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 0, 5)
#define GET_RX_DESC_TID(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 5, 4)
#define GET_RX_DESC_PAGGR(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 14, 1)
#define GET_RX_DESC_FAGGR(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 15, 1)
#define GET_RX_DESC_A1_FIT(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 16, 4)
#define GET_RX_DESC_A2_FIT(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 20, 4)
#define GET_RX_DESC_PAM(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 24, 1)
#define GET_RX_DESC_PWR(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 25, 1)
#define GET_RX_DESC_MORE_DATA(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 26, 1)
#define GET_RX_DESC_MORE_FRAG(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 27, 1)
#define GET_RX_DESC_TYPE(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 28, 2)
#define GET_RX_DESC_MC(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 30, 1)
#define GET_RX_DESC_BC(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 4, 31, 1)
/* DWORD 2 */
#define GET_RX_DESC_SEQ(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
+ LE_BITS_TO_4BYTE(__rxdesc + 8, 0, 12)
#define GET_RX_DESC_FRAG(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
+ LE_BITS_TO_4BYTE(__rxdesc + 8, 12, 4)
#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
+ LE_BITS_TO_4BYTE(__rxdesc + 8, 16, 8)
#define GET_RX_DESC_NEXT_IND(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 8, 30, 1)
/* DWORD 3 */
#define GET_RX_DESC_RX_MCS(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 0, 6)
#define GET_RX_DESC_RX_HT(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 6, 1)
#define GET_RX_DESC_AMSDU(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 7, 1)
#define GET_RX_DESC_SPLCP(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 8, 1)
#define GET_RX_DESC_BW(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 9, 1)
#define GET_RX_DESC_HTC(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 10, 1)
#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 11, 1)
#define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 12, 1)
#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 13, 1)
#define GET_RX_DESC_HWPC_ERR(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 14, 1)
#define GET_RX_DESC_HWPC_IND(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 15, 1)
#define GET_RX_DESC_IV0(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
+ LE_BITS_TO_4BYTE(__rxdesc + 12, 16, 16)
/* DWORD 4 */
#define GET_RX_DESC_IV1(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
+ LE_BITS_TO_4BYTE(__rxdesc + 16, 0, 32)
/* DWORD 5 */
#define GET_RX_DESC_TSFL(__rxdesc) \
- SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
+ LE_BITS_TO_4BYTE(__rxdesc + 20, 0, 32)
/*======================= tx desc ============================================*/
@@ -222,182 +200,182 @@ struct rx_drv_info_92c {
/* Dword 0 */
#define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 0, 16, __value)
#define SET_TX_DESC_OFFSET(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 16, 8, __value)
#define SET_TX_DESC_BMC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 24, 1, __value)
#define SET_TX_DESC_HTC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 25, 1, __value)
#define SET_TX_DESC_LAST_SEG(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 26, 1, __value)
#define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 27, 1, __value)
#define SET_TX_DESC_LINIP(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 28, 1, __value)
#define SET_TX_DESC_NO_ACM(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 29, 1, __value)
#define SET_TX_DESC_GF(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 30, 1, __value)
#define SET_TX_DESC_OWN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value)
/* Dword 1 */
#define SET_TX_DESC_MACID(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value)
#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 5, 1, __value)
#define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 6, 1, __value)
#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 7, 1, __value)
#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 8, 5, __value)
#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 13, 1, __value)
#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 14, 1, __value)
#define SET_TX_DESC_PIFS(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 15, 1, __value)
#define SET_TX_DESC_RATE_ID(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value)
#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value)
#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 20, 1, __value)
#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 21, 1, __value)
#define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 22, 2, __value)
#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 26, 5, __value)
/* Dword 2 */
#define SET_TX_DESC_RTS_RC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 0, 6, __value)
#define SET_TX_DESC_DATA_RC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 6, 6, __value)
#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 14, 2, __value)
#define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 17, 1, __value)
#define SET_TX_DESC_RAW(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 18, 1, __value)
#define SET_TX_DESC_CCX(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 19, 1, __value)
#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 20, 3, __value)
#define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 24, 1, __value)
#define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 25, 1, __value)
#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 26, 2, __value)
#define SET_TX_DESC_TX_ANTL(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 28, 2, __value)
#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 8, 30, 2, __value)
/* Dword 3 */
#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 12, 0, 8, __value)
#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 12, 8, 8, __value)
#define SET_TX_DESC_SEQ(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 12, 16, 12, __value)
#define SET_TX_DESC_PKT_ID(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 12, 28, 4, __value)
/* Dword 4 */
#define SET_TX_DESC_RTS_RATE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 0, 5, __value)
#define SET_TX_DESC_AP_DCFE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 5, 1, __value)
#define SET_TX_DESC_QOS(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 6, 1, __value)
#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 7, 1, __value)
#define SET_TX_DESC_USE_RATE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 8, 1, __value)
#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 9, 1, __value)
#define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 10, 1, __value)
#define SET_TX_DESC_CTS2SELF(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 11, 1, __value)
#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 12, 1, __value)
#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 13, 1, __value)
#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 18, 1, __value)
#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 19, 1, __value)
#define SET_TX_DESC_DATA_SC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 20, 2, __value)
#define SET_TX_DESC_DATA_STBC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 22, 2, __value)
#define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 24, 1, __value)
#define SET_TX_DESC_DATA_BW(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 25, 1, __value)
#define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 26, 1, __value)
#define SET_TX_DESC_RTS_BW(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 27, 1, __value)
#define SET_TX_DESC_RTS_SC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 28, 2, __value)
#define SET_TX_DESC_RTS_STBC(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 16, 30, 2, __value)
/* Dword 5 */
#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+ SET_BITS_TO_LE_4BYTE(__pdesc + 20, 0, 6, __val)
#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+ SET_BITS_TO_LE_4BYTE(__pdesc + 20, 6, 1, __val)
#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+ SET_BITS_TO_LE_4BYTE(__pdesc + 20, 7, 1, __val)
#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 20, 8, 5, __value)
#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 20, 13, 4, __value)
#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 20, 17, 1, __value)
#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 20, 18, 6, __value)
#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 20, 24, 8, __value)
/* Dword 6 */
#define SET_TX_DESC_TXAGC_A(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 0, 5, __value)
#define SET_TX_DESC_TXAGC_B(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 5, 5, __value)
#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 10, 1, __value)
#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 11, 5, __value)
#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 16, 4, __value)
#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 20, 4, __value)
#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 24, 4, __value)
#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 24, 28, 4, __value)
/* Dword 7 */
#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 28, 0, 16, __value)
#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 28, 16, 4, __value)
#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 28, 20, 4, __value)
#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 28, 24, 4, __value)
#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \
- SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
+ SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value)
int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 17f6903c14bb..88faeab2574f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -26,6 +26,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../base.h"
+#include "../efuse.h"
#include "reg.h"
#include "def.h"
#include "fw.h"
@@ -59,86 +60,31 @@ static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
-static void _rtl92d_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blocksize = sizeof(u32);
- u8 *bufferptr = (u8 *) buffer;
- u32 *pu4BytePtr = (u32 *) buffer;
- u32 i, offset, blockCount, remainSize;
-
- blockCount = size / blocksize;
- remainSize = size % blocksize;
- for (i = 0; i < blockCount; i++) {
- offset = i * blocksize;
- rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
- if (remainSize) {
- offset = blockCount * blocksize;
- bufferptr += offset;
- for (i = 0; i < remainSize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS +
- offset + i), *(bufferptr + i));
- }
- }
-}
-
-static void _rtl92d_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8) (page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- _rtl92d_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 fwlen = *pfwlen;
- u8 remain = (u8) (fwlen % 4);
-
- remain = (remain == 0) ? 0 : (4 - remain);
- while (remain > 0) {
- pfwbuf[fwlen] = 0;
- fwlen++;
- remain--;
- }
- *pfwlen = fwlen;
-}
-
static void _rtl92d_write_fw(struct ieee80211_hw *hw,
enum version_8192d version, u8 *buffer, u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferPtr = buffer;
- u32 pagenums, remainSize;
+ u8 *bufferptr = buffer;
+ u32 pagenums, remainsize;
u32 page, offset;
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
- _rtl92d_fill_dummy(bufferPtr, &size);
+ rtl_fill_dummy(bufferptr, &size);
pagenums = size / FW_8192D_PAGE_SIZE;
- remainSize = size % FW_8192D_PAGE_SIZE;
- if (pagenums > 8) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not greater then 8\n");
- }
+ remainsize = size % FW_8192D_PAGE_SIZE;
+ if (pagenums > 8)
+ pr_err("Page numbers should not greater then 8\n");
for (page = 0; page < pagenums; page++) {
offset = page * FW_8192D_PAGE_SIZE;
- _rtl92d_fw_page_write(hw, page, (bufferPtr + offset),
- FW_8192D_PAGE_SIZE);
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192D_PAGE_SIZE);
}
- if (remainSize) {
+ if (remainsize) {
offset = pagenums * FW_8192D_PAGE_SIZE;
page = pagenums;
- _rtl92d_fw_page_write(hw, page, (bufferPtr + offset),
- remainSize);
+ rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
}
}
@@ -153,13 +99,10 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
} while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
(!(value32 & FWDL_ChkSum_rpt)));
if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "chksum report faill ! REG_MCUFWDL:0x%08x\n",
- value32);
+ pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
+ value32);
return -EIO;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32);
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
value32 |= MCUFWDL_RDY;
rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
@@ -182,7 +125,7 @@ void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
udelay(50);
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
}
- RT_ASSERT((delay > 0), "8051 reset failed!\n");
+ WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"=====> 8051 reset success (%d)\n", delay);
}
@@ -326,13 +269,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
value &= (~BIT(5));
rtl_write_byte(rtlpriv, 0x1f, value);
spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "fw is not ready to run!\n");
- goto exit;
- } else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n");
- }
+ if (err)
+ pr_err("fw is not ready to run!\n");
exit:
err = _rtl92d_fw_init(hw);
return err;
@@ -407,8 +345,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
while (!bwrite_success) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Write H2C fail because no trigger for FW INT!\n");
+ pr_err("Write H2C fail because no trigger for FW INT!\n");
break;
}
boxnum = rtlhal->last_hmeboxnum;
@@ -430,8 +367,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", boxnum);
+ pr_err("switch case %#x not processed\n",
+ boxnum);
break;
}
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
@@ -507,8 +444,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
boxcontent[idx]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", cmd_len);
+ pr_err("switch case %#x not processed\n",
+ cmd_len);
break;
}
bwrite_success = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index fcb14c5db172..cf28d25c551f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -163,8 +163,7 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
}
@@ -358,9 +357,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- e_aci);
+ pr_err("switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -500,8 +498,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
}
@@ -520,9 +517,8 @@ static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to polling write LLT done at address %d!\n",
- address);
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -618,19 +614,19 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
static void _rtl92de_gen_refresh_led_state(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpci->up_first_time)
return;
if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
- rtl92de_sw_led_on(hw, pLed0);
+ rtl92de_sw_led_on(hw, pled0);
else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
- rtl92de_sw_led_on(hw, pLed0);
+ rtl92de_sw_led_on(hw, pled0);
else
- rtl92de_sw_led_off(hw, pLed0);
+ rtl92de_sw_led_off(hw, pled0);
}
static bool _rtl92de_init_mac(struct ieee80211_hw *hw)
@@ -920,7 +916,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
/* rtlpriv->intf_ops->disable_aspm(hw); */
rtstatus = _rtl92de_init_mac(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ pr_err("Init MAC failed\n");
err = 1;
spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
return err;
@@ -1119,11 +1115,8 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not supported!\n", type);
+ pr_err("Network type %d not supported!\n", type);
return 1;
- break;
-
}
rtl_write_byte(rtlpriv, MSR, bt_msr);
rtlpriv->cfg->ops->led_control(hw, ledaction);
@@ -1732,7 +1725,7 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
break;
default:
chipver |= CHIP_92D_D_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n");
+ pr_err("Unknown CUT!\n");
break;
}
rtlpriv->rtlhal.version = chipver;
@@ -1816,7 +1809,7 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
_rtl92de_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
}
return;
}
@@ -2169,8 +2162,8 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", enc_algo);
+ pr_err("switch case %#x not processed\n",
+ enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2186,9 +2179,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
entry_id = rtl_cam_get_free_entry(hw,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index c22b8a215c87..8851038c9eba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -66,8 +66,8 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = true;
@@ -76,7 +76,6 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@@ -89,7 +88,7 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain)
+ if (rtlpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
@@ -101,8 +100,8 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = false;
@@ -110,24 +109,26 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92de_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
- _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
- rtl92de_sw_led_on(hw, pLed0);
+ rtl92de_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
- rtl92de_sw_led_off(hw, pLed0);
+ rtl92de_sw_led_off(hw, pled0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 424f54babd03..de98d88199d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -716,7 +716,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
return false;
}
@@ -731,13 +731,13 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ pr_err("AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
@@ -833,8 +833,7 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_C:
case RF90_PATH_D:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpath);
+ pr_err("switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -987,8 +986,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
switch (rtlphy->current_chan_bw) {
@@ -1019,8 +1018,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -2700,7 +2699,7 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL\n");
+ WARN_ONCE(true, "rtl8192de: cmdtable cannot be NULL\n");
return false;
}
if (cmdtableidx >= cmdtablesz)
@@ -2842,9 +2841,8 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rtl92d_phy_reload_iqk_setting(hw, channel);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ pr_err("switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
@@ -2893,17 +2891,17 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
* 5G and 2.4G band. */
if (channel <= 14)
return 0;
- RT_ASSERT((channel > 14), "5G but channel<=14\n");
+ WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n");
break;
case BAND_ON_2_4G:
/* Get first channel error when change between
* 5G and 2.4G band. */
if (channel > 14)
return 0;
- RT_ASSERT((channel <= 14), "2G but channel>14\n");
+ WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n");
break;
default:
- RT_ASSERT(false, "Invalid WirelessMode(%#x)!!\n",
+ WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n",
rtlpriv->mac80211.mode);
break;
}
@@ -2956,9 +2954,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
rtl92d_dm_write_dig(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ pr_err("switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -2988,8 +2985,8 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", iotype);
+ pr_err("switch case %#x not processed\n",
+ iotype);
break;
}
} while (false);
@@ -3176,8 +3173,8 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
_rtl92d_phy_set_rfsleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpwr_state);
+ pr_err("switch case %#x not processed\n",
+ rfpwr_state);
bresult = false;
break;
}
@@ -3336,7 +3333,7 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
}
}
if (i == 200)
- RT_ASSERT(false, "Another mac power off over time\n");
+ WARN_ONCE(true, "rtl8192de: Another mac power off over time\n");
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 9dc9e915513e..021d3c538ac2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -63,8 +63,7 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 2d65e4095292..16132c66e5e1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -140,8 +140,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD);
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -171,8 +169,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw\n");
+ pr_err("Can't alloc buffer for fw\n");
return 1;
}
@@ -185,8 +182,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ pr_err("Failed to request firmware!\n");
return 1;
}
@@ -256,7 +252,8 @@ static struct rtl_mod_params rtl92de_mod_params = {
.inactiveps = true,
.swctrl_lps = true,
.fwctrl_lps = false,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
};
static const struct rtl_hal_cfg rtl92de_hal_cfg = {
@@ -366,15 +363,17 @@ MODULE_DESCRIPTION("Realtek 8192DE 802.11n Dual Mac PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192defw.bin");
module_param_named(swenc, rtl92de_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92de_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92de_mod_params.debug_level, int, 0644);
module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(debug_mask, rtl92de_mod_params.debug_mask, ullong, 0644);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
@@ -402,7 +401,7 @@ static int __init rtl92de_module_init(void)
ret = pci_register_driver(&rtl92de_driver);
if (ret)
- RT_ASSERT(false, "No device found\n");
+ WARN_ONCE(true, "rtl8192de: No device found\n");
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 5fb37564957c..5c9c8741134f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -794,7 +794,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -814,7 +814,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
@@ -834,7 +834,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -848,7 +848,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
ret = GET_RX_DESC_PKT_LEN(pdesc);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index b3f6a9ed15d4..9fec345a42a0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -27,6 +27,7 @@
#include "../pci.h"
#include "../base.h"
#include "../core.h"
+#include "../efuse.h"
#include "reg.h"
#include "def.h"
#include "fw.h"
@@ -48,64 +49,6 @@ static void _rtl92ee_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
-static void _rtl92ee_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blocksize = sizeof(u32);
- u8 *bufferptr = (u8 *)buffer;
- u32 *pu4byteptr = (u32 *)buffer;
- u32 i, offset, blockcount, remainsize;
-
- blockcount = size / blocksize;
- remainsize = size % blocksize;
-
- for (i = 0; i < blockcount; i++) {
- offset = i * blocksize;
- rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4byteptr + i));
- }
-
- if (remainsize) {
- offset = blockcount * blocksize;
- bufferptr += offset;
- for (i = 0; i < remainsize; i++) {
- rtl_write_byte(rtlpriv,
- (FW_8192C_START_ADDRESS + offset + i),
- *(bufferptr + i));
- }
- }
-}
-
-static void _rtl92ee_fw_page_write(struct ieee80211_hw *hw, u32 page,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8)(page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-
- _rtl92ee_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl92ee_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 fwlen = *pfwlen;
- u8 remain = (u8)(fwlen % 4);
-
- remain = (remain == 0) ? 0 : (4 - remain);
-
- while (remain > 0) {
- pfwbuf[fwlen] = 0;
- fwlen++;
- remain--;
- }
-
- *pfwlen = fwlen;
-}
-
static void _rtl92ee_write_fw(struct ieee80211_hw *hw,
enum version_8192e version,
u8 *buffer, u32 size)
@@ -117,28 +60,25 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "FW size is %d bytes,\n", size);
- _rtl92ee_fill_dummy(bufferptr, &size);
+ rtl_fill_dummy(bufferptr, &size);
pagenums = size / FW_8192C_PAGE_SIZE;
remainsize = size % FW_8192C_PAGE_SIZE;
- if (pagenums > 8) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not greater then 8\n");
- }
+ if (pagenums > 8)
+ pr_err("Page numbers should not greater then 8\n");
for (page = 0; page < pagenums; page++) {
offset = page * FW_8192C_PAGE_SIZE;
- _rtl92ee_fw_page_write(hw, page, (bufferptr + offset),
- FW_8192C_PAGE_SIZE);
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192C_PAGE_SIZE);
udelay(2);
}
if (remainsize) {
offset = pagenums * FW_8192C_PAGE_SIZE;
page = pagenums;
- _rtl92ee_fw_page_write(hw, page, (bufferptr + offset),
- remainsize);
+ rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
}
}
@@ -155,15 +95,10 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw)
(!(value32 & FWDL_CHKSUM_RPT)));
if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
- value32);
+ pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
+ value32);
goto exit;
}
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
value32 |= MCUFWDL_RDY;
value32 &= ~WINTINI_RDY;
@@ -174,21 +109,15 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw)
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD ,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x. count = %d\n",
- value32, counter);
- err = 0;
- goto exit;
- }
+ if (value32 & WINTINI_RDY)
+ return 0;
udelay(FW_8192C_POLLING_DELAY*10);
} while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n",
- value32, counter);
+ pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n",
+ value32, counter);
exit:
return err;
@@ -240,13 +169,6 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
_rtl92ee_enable_fw_download(hw, false);
err = _rtl92ee_fw_free_to_go(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is not ready to run!\n");
- } else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD ,
- "Firmware is ready to run!\n");
- }
return 0;
}
@@ -462,8 +384,8 @@ void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw,
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false,
- "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true,
+ "rtl8192ee: error H2C cmd because of Fw download fail!!!\n");
return;
}
@@ -842,8 +764,8 @@ static void _rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw,
rtl92ee_dm_dynamic_arfb_select(hw, rate, collision_state);
}
-static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
- u8 c2h_cmd_len, u8 *tmp_buf)
+void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
+ u8 c2h_cmd_len, u8 *tmp_buf)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -898,5 +820,14 @@ void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len)
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE,
"[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
- _rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+ switch (c2h_cmd_id) {
+ case C2H_8192E_BT_INFO:
+ case C2H_8192E_BT_MP:
+ rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+ break;
+ default:
+ rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len,
+ tmp_buf);
+ break;
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
index 069da1e7e80a..72da3f92f02c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
@@ -185,5 +185,6 @@ void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus);
void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len);
-
+void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
+ u8 c2h_cmd_len, u8 *tmp_buf);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index ebf663e1a81a..56ca7f5351ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -735,9 +735,8 @@ static bool _rtl92ee_llt_table_init(struct ieee80211_hw *hw)
static void _rtl92ee_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0;
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpriv->rtlhal.up_first_time)
return;
@@ -1006,7 +1005,7 @@ static void _rtl92ee_hw_configure(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a);
/* Note Data sheet don't define */
- rtl_write_word(rtlpriv, 0x4C7, 0x80);
+ rtl_write_byte(rtlpriv, 0x4C7, 0x80);
rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20);
@@ -1320,7 +1319,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x65, 1);
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ pr_err("Init MAC failed\n");
err = 1;
return err;
}
@@ -1485,8 +1484,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not support!\n", type);
+ pr_err("Network type %d not support!\n", type);
return 1;
}
@@ -1582,7 +1580,7 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ WARN_ONCE(true, "rtl8192ee: invalid aci: %d !\n", aci);
break;
}
}
@@ -2167,10 +2165,9 @@ exit:
static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"RT Customized ID: 0x%02X\n", rtlhal->oem_id);
@@ -2206,7 +2203,7 @@ void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
_rtl92ee_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
}
_rtl92ee_hal_customized_behavior(hw);
@@ -2484,9 +2481,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
entry_id = rtl_cam_get_free_entry(hw,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
index 47da05dd3076..96c64785108b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
@@ -99,26 +99,26 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92ee_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
- _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
- _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+ _rtl92ee_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl92ee_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl92ee_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
- rtl92ee_sw_led_on(hw, pLed0);
+ rtl92ee_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
- rtl92ee_sw_led_off(hw, pLed0);
+ rtl92ee_sw_led_off(hw, pled0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 5ad7e753c357..8b072ee8e0d5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -170,7 +170,7 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
offset &= 0xff;
newoffset = offset;
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+ pr_err("return all one\n");
return 0xFFFFFFFF;
}
tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -214,7 +214,7 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+ pr_err("stop\n");
return;
}
offset &= 0xff;
@@ -650,7 +650,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw)
rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
return false;
}
@@ -662,12 +662,12 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw)
}
_rtl92ee_phy_txpower_by_rate_configuration(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_AGC_TAB);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ pr_err("AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw,
@@ -1176,7 +1176,7 @@ static u8 _rtl92ee_phy_get_ratesection_intxpower_byrate(enum radio_path path,
rate_section = 7;
break;
default:
- RT_ASSERT(true, "Rate_Section is Illegal\n");
+ WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n");
break;
}
return rate_section;
@@ -1239,7 +1239,7 @@ static u8 _rtl92ee_get_txpower_by_rate(struct ieee80211_hw *hw,
shift = 24;
break;
default:
- RT_ASSERT(true, "Rate_Section is Illegal\n");
+ WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n");
break;
}
@@ -1675,8 +1675,7 @@ void rtl92ee_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
(u8 *)&iotype);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
+ pr_err("Unknown Scan Backup operation.\n");
break;
}
}
@@ -1717,8 +1716,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -1742,8 +1741,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
rtl92ee_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -1811,8 +1810,8 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if (rtlphy->set_bwmode_inprogress)
return 0;
- RT_ASSERT((rtlphy->current_channel <= 14),
- "WIRELESS_MODE_G but channel>14");
+ WARN_ONCE((rtlphy->current_channel > 14),
+ "rtl8192ee: WIRELESS_MODE_G but channel>14");
rtlphy->sw_chnl_inprogress = true;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
@@ -1860,8 +1859,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14),
- "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14),
+ "rtl8192ee: illegal channel for Zebra: %d\n", channel);
_rtl92ee_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT,
@@ -1884,8 +1883,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
currentcmd = &postcommoncmd[*step];
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Invalid 'stage' = %d, Check it!\n" , *stage);
+ pr_err("Invalid 'stage' = %d, Check it!\n",
+ *stage);
return true;
}
@@ -1948,7 +1947,7 @@ static bool _rtl92ee_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ WARN_ONCE(true, "rtl8192ee: cmdtable cannot be NULL.\n");
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
index 73716c07d433..bc76a91da762 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
@@ -55,8 +55,7 @@ void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtlphy->rfreg_chnlval[0]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index 46b605de36e7..48820bc497d8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -133,8 +133,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
0);
rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0);
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -165,8 +163,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw\n");
+ pr_err("Can't alloc buffer for fw\n");
return 1;
}
@@ -179,8 +176,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ pr_err("Failed to request firmware!\n");
return 1;
}
@@ -252,6 +248,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
.fill_h2c_cmd = rtl92ee_fill_h2c_cmd,
.get_btc_status = rtl92ee_get_btc_status,
.rx_command_packet = rtl92ee_rx_command_packet,
+ .c2h_content_parsing = rtl92ee_c2h_content_parsing,
};
static struct rtl_mod_params rtl92ee_mod_params = {
@@ -260,7 +257,8 @@ static struct rtl_mod_params rtl92ee_mod_params = {
.swctrl_lps = false,
.fwctrl_lps = true,
.msi_support = true,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
};
static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
@@ -370,7 +368,8 @@ MODULE_DESCRIPTION("Realtek 8192EE 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192eefw.bin");
module_param_named(swenc, rtl92ee_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92ee_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92ee_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92ee_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl92ee_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92ee_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92ee_mod_params.fwctrl_lps, bool, 0444);
@@ -382,7 +381,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 2d48ccd02ac8..07440e9a8ca2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -991,8 +991,9 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false,
- "ERR rxdesc :%d not process\n", desc_name);
+ WARN_ONCE(true,
+ "rtl8192ee: ERR rxdesc :%d not processed\n",
+ desc_name);
break;
}
}
@@ -1011,8 +1012,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_TXBUFFER_DESC_ADDR_LOW(pdesc, 1);
break;
default:
- RT_ASSERT(false,
- "ERR txdesc :%d not process\n", desc_name);
+ WARN_ONCE(true,
+ "rtl8192ee: ERR txdesc :%d not processed\n",
+ desc_name);
break;
}
} else {
@@ -1027,8 +1029,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_RX_DESC_BUFF_ADDR(pdesc);
break;
default:
- RT_ASSERT(false,
- "ERR rxdesc :%d not process\n", desc_name);
+ WARN_ONCE(true,
+ "rtl8192ee: ERR rxdesc :%d not processed\n",
+ desc_name);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 32f9207b5cf5..1922e78ad6bd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -113,8 +113,7 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw)
case RF_2T2R:
return 0x22;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n",
- rtlphy->rf_type);
+ pr_err("Unknown RF type(%x)\n", rtlphy->rf_type);
break;
}
return 0x22;
@@ -168,9 +167,7 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
_rtl92s_fw_set_rqpn(hw);
if (buffer_len >= MAX_FIRMWARE_CODE_SIZE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Size over FIRMWARE_CODE_SIZE!\n");
-
+ pr_err("Size over FIRMWARE_CODE_SIZE!\n");
return false;
}
@@ -239,9 +236,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
} while (pollingcnt--);
if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n",
- cpustatus);
+ pr_err("FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n",
+ cpustatus);
goto status_check_fail;
}
break;
@@ -257,17 +253,15 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
} while (pollingcnt--);
if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n",
- cpustatus);
+ pr_err("FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n",
+ cpustatus);
goto status_check_fail;
}
/* Turn On CPU */
rtstatus = _rtl92s_firmware_enable_cpu(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Enable CPU fail!\n");
+ pr_err("Enable CPU fail!\n");
goto status_check_fail;
}
break;
@@ -282,9 +276,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
} while (pollingcnt--);
if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling DMEM code done fail ! cpustatus(%#x)\n",
- cpustatus);
+ pr_err("Polling DMEM code done fail ! cpustatus(%#x)\n",
+ cpustatus);
goto status_check_fail;
}
@@ -308,9 +301,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) ||
(pollingcnt <= 0)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling Load Firmware ready fail ! cpustatus(%x)\n",
- cpustatus);
+ pr_err("Polling Load Firmware ready fail ! cpustatus(%x)\n",
+ cpustatus);
goto status_check_fail;
}
@@ -331,8 +323,7 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "Unknown status check!\n");
+ pr_err("Unknown status check!\n");
rtstatus = false;
break;
}
@@ -380,8 +371,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
/* 2. Retrieve IMEM image. */
if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size >
sizeof(firmware->fw_imem))) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "memory for data image is less than IMEM required\n");
+ pr_err("memory for data image is less than IMEM required\n");
goto fail;
} else {
puc_mappedfile += fwhdr_size;
@@ -393,8 +383,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
/* 3. Retriecve EMEM image. */
if (pfwheader->img_sram_size > sizeof(firmware->fw_emem)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "memory for data image is less than EMEM required\n");
+ pr_err("memory for data image is less than EMEM required\n");
goto fail;
} else {
puc_mappedfile += firmware->fw_imem_len;
@@ -428,8 +417,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unexpected Download step!!\n");
+ pr_err("Unexpected Download step!!\n");
goto fail;
}
@@ -438,14 +426,14 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
ul_filelength);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
+ pr_err("fail!\n");
goto fail;
}
/* <3> Check whether load FW process is ready */
rtstatus = _rtl92s_firmware_checkready(hw, fwstatus);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
+ pr_err("rtl8192se: firmware fail!\n");
goto fail;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 26e06b2837c3..ba1bd782238b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -75,11 +75,9 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HAL_DEF_WOWLAN:
break;
- default: {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
- break;
- }
+ default:
+ pr_err("switch case %#x not processed\n", variable);
+ break;
}
}
@@ -294,9 +292,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~AcmHw_VoqEn);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- e_aci);
+ pr_err("switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -431,8 +428,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break; }
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", variable);
+ pr_err("switch case %#x not processed\n", variable);
break;
}
@@ -745,9 +741,8 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
} while (pollingcnt--);
if (pollingcnt <= 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n",
- tmpu1b);
+ pr_err("Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n",
+ tmpu1b);
tmpu1b = rtl_read_byte(rtlpriv, CMDR);
rtl_write_byte(rtlpriv, CMDR, tmpu1b & (~TXDMA_EN));
udelay(2);
@@ -758,13 +753,12 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
/* After MACIO reset,we must refresh LED state. */
if ((ppsc->rfoff_reason == RF_CHANGE_BY_IPS) ||
(ppsc->rfoff_reason == 0)) {
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
enum rf_pwrstate rfpwr_state_toset;
rfpwr_state_toset = _rtl92se_rf_onoff_detect(hw);
if (rfpwr_state_toset == ERFON)
- rtl92se_sw_led_on(hw, pLed0);
+ rtl92se_sw_led_on(hw, pled0);
}
}
@@ -1004,7 +998,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
if (!rtl92s_phy_mac_config(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
+ pr_err("MAC Config failed\n");
err = rtstatus;
goto exit;
}
@@ -1024,7 +1018,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
if (!rtl92s_phy_bb_config(hw)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
+ pr_err("BB Config failed\n");
err = rtstatus;
goto exit;
}
@@ -1194,8 +1188,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not supported!\n", type);
+ pr_err("Network type %d not supported!\n", type);
return 1;
}
@@ -1251,7 +1244,7 @@ void rtl92se_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, EDCAPARA_VO, 0x2f3222);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ WARN_ONCE(true, "rtl8192se: invalid aci: %d !\n", aci);
break;
}
}
@@ -1401,16 +1394,15 @@ static void _rtl92se_gen_refreshledstate(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpci->up_first_time == 1)
return;
if (rtlpriv->psc.rfoff_reason == RF_CHANGE_BY_IPS)
- rtl92se_sw_led_on(hw, pLed0);
+ rtl92se_sw_led_on(hw, pled0);
else
- rtl92se_sw_led_off(hw, pLed0);
+ rtl92se_sw_led_off(hw, pled0);
}
@@ -1685,8 +1677,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
break;
case EEPROM_93C46:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!\n");
+ pr_err("RTL819X Not boot from eeprom, check it !!\n");
return;
default:
@@ -2030,7 +2021,7 @@ void rtl92se_read_eeprom_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
_rtl92se_read_adapter_info(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
rtlefuse->autoload_failflag = true;
}
}
@@ -2463,8 +2454,8 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", enc_algo);
+ pr_err("switch case %#x not processed\n",
+ enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2481,9 +2472,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
entry_id = rtl_cam_get_free_entry(hw,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv,
- COMP_SEC, DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
index 870007801f6b..33c307aca911 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
@@ -38,9 +38,10 @@ static void _rtl92se_init_led(struct ieee80211_hw *hw,
void rtl92se_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
- _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl92se_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl92se_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
@@ -63,8 +64,8 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0x0f);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = true;
@@ -73,7 +74,6 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv;
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
rtlpriv = rtl_priv(hw);
@@ -89,7 +89,7 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain)
+ if (rtlpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(1)));
else
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
@@ -99,8 +99,8 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = false;
@@ -109,16 +109,17 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
static void _rtl92se_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
- rtl92se_sw_led_on(hw, pLed0);
+ rtl92se_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
- rtl92se_sw_led_off(hw, pLed0);
+ rtl92se_sw_led_off(hw, pled0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index fcb9216af82d..86cb853f7169 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -235,7 +235,6 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw,
u8 operation)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (!is_hal_stop(rtlhal)) {
@@ -247,8 +246,7 @@ void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw,
rtl92s_phy_set_fw_cmd(hw, FW_CMD_RESUME_DM_BY_SCAN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown operation\n");
+ pr_err("Unknown operation\n");
break;
}
}
@@ -288,8 +286,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, BW_OPMODE, reg_bw_opmode);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -313,8 +311,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, RFPGA0_ANALOGPARAMETER2, 0x18);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -330,7 +328,7 @@ static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL\n");
+ WARN_ONCE(true, "rtl8192se: cmdtable cannot be NULL\n");
return false;
}
@@ -374,8 +372,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14),
- "invalid channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14),
+ "rtl8192se: invalid channel for Zebra: %d\n", channel);
_rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -437,9 +435,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ pr_err("switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -644,8 +641,8 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
_rtl92se_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpwr_state);
+ pr_err("switch case %#x not processed\n",
+ rfpwr_state);
bresult = false;
break;
}
@@ -937,8 +934,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
goto phy_BB8190_Config_ParaFile_Fail;
}
@@ -951,8 +947,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n");
+ pr_err("_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n");
goto phy_BB8190_Config_ParaFile_Fail;
}
@@ -1077,12 +1072,10 @@ bool rtl92s_phy_bb_config(struct ieee80211_hw *hw)
(rtlphy->rf_type == RF_1T2R && rf_num != 2) ||
(rtlphy->rf_type == RF_2T2R && rf_num != 2) ||
(rtlphy->rf_type == RF_2T2R_GREEN && rf_num != 2)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "RF_Type(%x) does not match RF_Num(%x)!!\n",
- rtlphy->rf_type, rf_num);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
- "path1 0x%x, path2 0x%x, pathmap 0x%x\n",
- path1, path2, pathmap);
+ pr_err("RF_Type(%x) does not match RF_Num(%x)!!\n",
+ rtlphy->rf_type, rf_num);
+ pr_err("path1 0x%x, path2 0x%x, pathmap 0x%x\n",
+ path1, path2, pathmap);
}
return rtstatus;
@@ -1221,7 +1214,7 @@ void rtl92s_phy_chk_fwcmd_iodone(struct ieee80211_hw *hw)
} while (--pollingcnt);
if (pollingcnt == 0)
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Set FW Cmd fail!!\n");
+ pr_err("Set FW Cmd fail!!\n");
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index bd2fa7735866..ea5b8ec45ec9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -523,8 +523,7 @@ void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtlphy->rfreg_chnlval[0]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 998cefbd7e89..2006b09ea74f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -96,8 +96,7 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
return;
}
if (firmware->size > rtlpriv->max_fw_size) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is too big!\n");
+ pr_err("Firmware is too big!\n");
rtlpriv->max_fw_size = 0;
release_firmware(firmware);
return;
@@ -179,8 +178,6 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->first_init = true;
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -218,8 +215,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl92se_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ pr_err("Failed to request firmware!\n");
return 1;
}
@@ -299,7 +295,8 @@ static struct rtl_mod_params rtl92se_mod_params = {
.inactiveps = true,
.swctrl_lps = true,
.fwctrl_lps = false,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
};
/* Because memory R/W bursting will cause system hang/crash
@@ -418,7 +415,8 @@ MODULE_DESCRIPTION("Realtek 8192S/8191S 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192sefw.bin");
module_param_named(swenc, rtl92se_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92se_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92se_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92se_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
@@ -426,7 +424,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index 9a5a11399221..12cef01e593b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -583,7 +583,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -603,7 +603,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
SET_RX_STATUS_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
@@ -623,7 +623,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -639,7 +639,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index e5505387260b..a954a87b0ed9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -99,8 +99,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (!bwrite_sucess) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Write H2C fail because no trigger for FW INT!\n");
+ pr_err("Write H2C fail because no trigger for FW INT!\n");
break;
}
@@ -123,8 +122,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", boxnum);
+ pr_err("switch case %#x not processed\n",
+ boxnum);
break;
}
@@ -229,8 +228,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", cmd_len);
+ pr_err("switch case %#x not processed\n",
+ cmd_len);
break;
}
@@ -259,8 +258,8 @@ void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw,
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false,
- "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true,
+ "rtl8723ae: error H2C cmd because of Fw download fail!!!\n");
return;
}
memset(tmp_cmdbuf, 0, 8);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index f8be0bd7e326..859c045bd37c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -570,9 +570,8 @@ static bool _rtl8723e_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to polling write LLT done at address %d!\n",
- address);
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -665,9 +664,8 @@ static bool _rtl8723e_llt_table_init(struct ieee80211_hw *hw)
static void _rtl8723e_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0;
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpriv->rtlhal.up_first_time)
return;
@@ -961,7 +959,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl8712e_init_mac(hw);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ pr_err("Init MAC failed\n");
err = 1;
goto exit;
}
@@ -1107,8 +1105,7 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw)
"Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Chip Version ID: Unknown. Bug?\n");
+ pr_err("Chip Version ID: Unknown. Bug?\n");
break;
}
@@ -1157,8 +1154,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not support!\n", type);
+ pr_err("Network type %d not support!\n", type);
return 1;
break;
}
@@ -1256,7 +1252,7 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ WARN_ONCE(true, "rtl8723ae: invalid aci: %d !\n", aci);
break;
}
}
@@ -1793,13 +1789,12 @@ exit:
static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
@@ -1852,7 +1847,7 @@ void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw)
} else {
rtlefuse->autoload_failflag = true;
_rtl8723e_read_adapter_info(hw, false);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
}
_rtl8723e_hal_customized_behavior(hw);
}
@@ -2245,9 +2240,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
entry_id =
rtl_cam_get_free_entry(hw, p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
index 77c10047cb20..d567b0df0e9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
@@ -58,8 +58,8 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = true;
@@ -68,7 +68,6 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -81,7 +80,7 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain) {
+ if (rtlpriv->ledctl.led_opendrain) {
ledcfg &= 0x90; /* Set to software control. */
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -100,8 +99,8 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = false;
@@ -109,24 +108,26 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl8723e_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
- _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl8723e_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl8723e_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl8723e_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
- rtl8723e_sw_led_on(hw, pLed0);
+ rtl8723e_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
- rtl8723e_sw_led_off(hw, pLed0);
+ rtl8723e_sw_led_off(hw, pled0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 17b58cb32d55..5cf29f5a4b54 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -133,7 +133,7 @@ static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data)
{
- RT_ASSERT(false, "deprecated!\n");
+ WARN_ONCE(true, "rtl8723ae: _rtl8723e_phy_fw_rf_serial_write deprecated!\n");
}
static void _rtl8723e_phy_bb_config_1t(struct ieee80211_hw *hw)
@@ -213,7 +213,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
return false;
}
@@ -227,13 +227,13 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus =
_rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ pr_err("AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
@@ -749,8 +749,7 @@ void rtl8723e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
(u8 *)&iotype);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
+ pr_err("Unknown Scan Backup operation.\n");
break;
}
}
@@ -791,8 +790,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -816,8 +815,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
rtl8723e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -885,8 +884,8 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if (rtlphy->set_bwmode_inprogress)
return 0;
- RT_ASSERT((rtlphy->current_channel <= 14),
- "WIRELESS_MODE_G but channel>14");
+ WARN_ONCE((rtlphy->current_channel > 14),
+ "rtl8723ae: WIRELESS_MODE_G but channel>14");
rtlphy->sw_chnl_inprogress = true;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
@@ -954,8 +953,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14),
- "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14),
+ "rtl8723ae: illegal channel for Zebra: %d\n", channel);
rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -977,8 +976,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
currentcmd = &postcommoncmd[*step];
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Invalid 'stage' = %d, Check it!\n", *stage);
+ pr_err("Invalid 'stage' = %d, Check it!\n",
+ *stage);
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
index 422771778e03..89958b64b52d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
@@ -51,8 +51,7 @@ void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtlphy->rfreg_chnlval[0]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index c51a9e8234e9..7bf9f2557920 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -145,8 +145,6 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
(u32)(PHIMR_RXFOVW |
0);
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -172,8 +170,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x6000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw.\n");
+ pr_err("Can't alloc buffer for fw.\n");
return 1;
}
@@ -186,8 +183,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ pr_err("Failed to request firmware!\n");
return 1;
}
return 0;
@@ -270,7 +266,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
.msi_support = false,
.disable_watchdog = false,
};
@@ -384,7 +381,8 @@ MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8723efw.bin");
module_param_named(swenc, rtl8723e_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl8723e_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl8723e_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
@@ -396,7 +394,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index e93125ebed81..c9838f52a7ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -617,7 +617,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -636,7 +636,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
@@ -656,7 +656,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n",
desc_name);
break;
}
@@ -672,7 +672,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_RX_DESC_BUFF_ADDR(pdesc);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index 8c5c27ce8e05..c7ee9ba5e26e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -97,8 +97,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (!bwrite_sucess) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Write H2C fail because no trigger for FW INT!\n");
+ pr_err("Write H2C fail because no trigger for FW INT!\n");
break;
}
@@ -121,8 +120,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", boxnum);
+ pr_err("switch case %#x not processed\n",
+ boxnum);
break;
}
@@ -194,8 +193,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", cmd_len);
+ pr_err("switch case %#x not processed\n",
+ cmd_len);
break;
}
@@ -224,8 +223,8 @@ void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false,
- "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true,
+ "rtl8723be: error H2C cmd because of Fw download fail!!!\n");
return;
}
@@ -586,9 +585,9 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
(u8 *)p2p_ps_offload);
}
-static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
- u8 c2h_cmd_id,
- u8 c2h_cmd_len, u8 *tmp_buf)
+void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
+ u8 c2h_cmd_id,
+ u8 c2h_cmd_len, u8 *tmp_buf)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -636,5 +635,15 @@ void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len)
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE,
"[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
- _rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+ switch (c2h_cmd_id) {
+ case C2H_8723B_BT_INFO:
+ case C2H_8723B_BT_MP:
+ rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+ break;
+
+ default:
+ rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len,
+ tmp_buf);
+ break;
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
index 067429669bda..c652fa1339a7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
@@ -148,5 +148,6 @@ void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus);
void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len);
-
+void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
+ u8 c2h_cmd_len, u8 *tmp_buf);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index aba60c3145c5..1acbfb86472c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -747,9 +747,8 @@ static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to polling write LLT done at address %d!\n",
- address);
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -810,9 +809,8 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpriv->rtlhal.up_first_time)
return;
@@ -1383,7 +1381,7 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw)
}
rtstatus = _rtl8723be_init_mac(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ pr_err("Init MAC failed\n");
err = 1;
goto exit;
}
@@ -1532,8 +1530,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not support!\n", type);
+ pr_err("Network type %d not support!\n", type);
return 1;
}
@@ -1631,7 +1628,7 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ WARN_ONCE(true, "rtl8723be: invalid aci: %d !\n", aci);
break;
}
}
@@ -2022,6 +2019,37 @@ static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
"eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
}
+static u8 _rtl8723be_read_package_type(struct ieee80211_hw *hw)
+{
+ u8 package_type;
+ u8 value;
+
+ efuse_power_switch(hw, false, true);
+ if (!efuse_one_byte_read(hw, 0x1FB, &value))
+ value = 0;
+ efuse_power_switch(hw, false, false);
+
+ switch (value & 0x7) {
+ case 0x4:
+ package_type = PACKAGE_TFBGA79;
+ break;
+ case 0x5:
+ package_type = PACKAGE_TFBGA90;
+ break;
+ case 0x6:
+ package_type = PACKAGE_QFN68;
+ break;
+ case 0x7:
+ package_type = PACKAGE_TFBGA80;
+ break;
+ default:
+ package_type = PACKAGE_DEFAULT;
+ break;
+ }
+
+ return package_type;
+}
+
static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
bool pseudo_test)
{
@@ -2080,6 +2108,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
rtlefuse->autoload_failflag,
hwinfo);
+ rtlhal->package_type = _rtl8723be_read_package_type(hw);
+
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
@@ -2197,13 +2227,12 @@ exit:
static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
@@ -2247,7 +2276,7 @@ void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
_rtl8723be_read_adapter_info(hw, false);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
}
_rtl8723be_hal_customized_behavior(hw);
}
@@ -2584,9 +2613,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
entry_id = rtl_cam_get_free_entry(hw,
p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC,
- DBG_EMERG,
- "Can not find free hw security cam entry\n");
+ pr_err("Can not find free hw security cam entry\n");
return;
}
} else {
@@ -2657,16 +2684,23 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
value = hwinfo[EEPROM_RF_BT_SETTING_8723B];
rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ rtlpriv->btcoexist.btc_info.single_ant_path =
+ (value & 0x40); /*0xc3[6]*/
} else {
rtlpriv->btcoexist.btc_info.btcoexist = 0;
rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ rtlpriv->btcoexist.btc_info.single_ant_path = 0;
}
/* override ant_num / ant_path */
- if (mod_params->ant_sel)
+ if (mod_params->ant_sel) {
rtlpriv->btcoexist.btc_info.ant_num =
(mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+
+ rtlpriv->btcoexist.btc_info.single_ant_path =
+ (mod_params->ant_sel == 1 ? 0 : 1);
+ }
}
void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
index 497913eb3b37..4f7890d62c21 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
@@ -57,8 +57,8 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = true;
@@ -67,7 +67,6 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -80,7 +79,7 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain) {
+ if (rtlpriv->ledctl.led_opendrain) {
ledcfg &= 0x90; /* Set to software control. */
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -99,8 +98,8 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", pled->ledpin);
+ pr_err("switch case %#x not processed\n",
+ pled->ledpin);
break;
}
pled->ledon = false;
@@ -108,16 +107,18 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl8723be_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
- _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl8723be_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl8723be_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 3cc2232f25ca..ab0f39e46e1b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -467,7 +467,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
return false;
}
_rtl8723be_phy_init_tx_power_by_rate(hw);
@@ -478,13 +478,13 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
}
phy_txpower_by_rate_config(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ pr_err("AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw,
@@ -939,7 +939,7 @@ static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
break;
default:
- RT_ASSERT(true, "Rate_Section is Illegal\n");
+ WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n");
break;
}
@@ -1004,7 +1004,7 @@ static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw,
shift = 24;
break;
default:
- RT_ASSERT(true, "Rate_Section is Illegal\n");
+ WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n");
break;
}
tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num]
@@ -1249,8 +1249,7 @@ void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
(u8 *)&iotype);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
+ pr_err("Unknown Scan Backup operation.\n");
break;
}
}
@@ -1291,8 +1290,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -1316,8 +1315,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -1387,8 +1386,8 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if (rtlphy->set_bwmode_inprogress)
return 0;
- RT_ASSERT((rtlphy->current_channel <= 14),
- "WIRELESS_MODE_G but channel>14");
+ WARN_ONCE((rtlphy->current_channel > 14),
+ "rtl8723be: WIRELESS_MODE_G but channel>14");
rtlphy->sw_chnl_inprogress = true;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
@@ -1438,8 +1437,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14),
- "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14),
+ "rtl8723be: illegal channel for Zebra: %d\n", channel);
rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
MAX_RFDEPENDCMD_CNT,
@@ -1462,8 +1461,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
currentcmd = &postcommoncmd[*step];
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Invalid 'stage' = %d, Check it!\n", *stage);
+ pr_err("Invalid 'stage' = %d, Check it!\n",
+ *stage);
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index 78f4f18d87b5..48491454b878 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -51,8 +51,7 @@ void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtlphy->rfreg_chnlval[0]);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 847644d1f5f5..92dbfa8f297f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -144,8 +144,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
HSIMR_RON_INT_EN |
0);
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -179,8 +177,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw.\n");
+ pr_err("Can't alloc buffer for fw.\n");
return 1;
}
@@ -190,8 +187,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request firmware!\n");
+ pr_err("Failed to request firmware!\n");
return 1;
}
return 0;
@@ -264,6 +260,7 @@ static struct rtl_hal_ops rtl8723be_hal_ops = {
.get_btc_status = rtl8723be_get_btc_status,
.rx_command_packet = rtl8723be_rx_command_packet,
.is_fw_header = is_fw_header,
+ .c2h_content_parsing = rtl8723be_c2h_content_parsing,
};
static struct rtl_mod_params rtl8723be_mod_params = {
@@ -273,7 +270,8 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.fwctrl_lps = true,
.msi_support = false,
.disable_watchdog = false,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
.ant_sel = 0,
};
@@ -388,7 +386,8 @@ MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl8723be_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl8723be_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
@@ -401,7 +400,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog,
"Set to 1 to disable the watchdog (default 0)\n");
MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 2175aecbb8f4..6f65003a895a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -666,8 +666,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
- desc_name);
+ WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not processed\n",
+ desc_name);
break;
}
} else {
@@ -685,8 +685,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
- desc_name);
+ WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not process\n",
+ desc_name);
break;
}
}
@@ -705,8 +705,8 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
break;
default:
- RT_ASSERT(false, "ERR txdesc :%d not process\n",
- desc_name);
+ WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not process\n",
+ desc_name);
break;
}
} else {
@@ -721,7 +721,7 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_RX_DESC_BUFF_ADDR(pdesc);
break;
default:
- RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not processed\n",
desc_name);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index 6e518625edbe..ac573d69f6d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -26,6 +26,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../base.h"
+#include "../efuse.h"
#include "fw_common.h"
#include <linux/module.h>
@@ -53,65 +54,6 @@ void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download);
-void rtl8723_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blocksize = sizeof(u32);
- u8 *bufferptr = (u8 *)buffer;
- u32 *pu4byteptr = (u32 *)buffer;
- u32 i, offset, blockcount, remainsize;
-
- blockcount = size / blocksize;
- remainsize = size % blocksize;
-
- for (i = 0; i < blockcount; i++) {
- offset = i * blocksize;
- rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4byteptr + i));
- }
- if (remainsize) {
- offset = blockcount * blocksize;
- bufferptr += offset;
- for (i = 0; i < remainsize; i++) {
- rtl_write_byte(rtlpriv,
- (FW_8192C_START_ADDRESS + offset + i),
- *(bufferptr + i));
- }
- }
-}
-EXPORT_SYMBOL_GPL(rtl8723_fw_block_write);
-
-void rtl8723_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8) (page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- rtl8723_fw_block_write(hw, buffer, size);
-}
-EXPORT_SYMBOL_GPL(rtl8723_fw_page_write);
-
-void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 fwlen = *pfwlen;
- u8 remain = (u8) (fwlen % 4);
-
- remain = (remain == 0) ? 0 : (4 - remain);
-
- while (remain > 0) {
- pfwbuf[fwlen] = 0;
- fwlen++;
- remain--;
- }
- *pfwlen = fwlen;
-}
-EXPORT_SYMBOL(rtl8723_fill_dummy);
-
void rtl8723_write_fw(struct ieee80211_hw *hw,
enum version_8723e version,
u8 *buffer, u32 size, u8 max_page)
@@ -123,26 +65,25 @@ void rtl8723_write_fw(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
- rtl8723_fill_dummy(bufferptr, &size);
+ rtl_fill_dummy(bufferptr, &size);
page_nums = size / FW_8192C_PAGE_SIZE;
remain_size = size % FW_8192C_PAGE_SIZE;
if (page_nums > max_page) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not greater than %d\n", max_page);
+ pr_err("Page numbers should not greater than %d\n",
+ max_page);
}
for (page = 0; page < page_nums; page++) {
offset = page * FW_8192C_PAGE_SIZE;
- rtl8723_fw_page_write(hw, page, (bufferptr + offset),
- FW_8192C_PAGE_SIZE);
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192C_PAGE_SIZE);
}
if (remain_size) {
offset = page_nums * FW_8192C_PAGE_SIZE;
page = page_nums;
- rtl8723_fw_page_write(hw, page, (bufferptr + offset),
- remain_size);
+ rtl_fw_page_write(hw, page, (bufferptr + offset), remain_size);
}
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
}
@@ -209,14 +150,10 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be,
(!(value32 & FWDL_CHKSUM_RPT)));
if (counter >= max_count) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "chksum report fail ! REG_MCUFWDL:0x%08x .\n",
- value32);
+ pr_err("chksum report fail ! REG_MCUFWDL:0x%08x .\n",
+ value32);
goto exit;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY;
value32 &= ~WINTINI_RDY;
rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
@@ -239,9 +176,8 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be,
} while (counter++ < max_count);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
- value32);
+ pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+ value32);
exit:
return err;
@@ -293,13 +229,8 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
rtl8723_enable_fw_download(hw, false);
err = rtl8723_fw_free_to_go(hw, is_8723be, max_count);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is not ready to run!\n");
- } else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Firmware is ready to run!\n");
- }
+ if (err)
+ pr_err("Firmware is not ready to run!\n");
return 0;
}
EXPORT_SYMBOL_GPL(rtl8723_download_fw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
index 8ea372d1626e..77c25a976233 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
@@ -28,7 +28,6 @@
#define REG_SYS_FUNC_EN 0x0002
#define REG_MCUFWDL 0x0080
-#define FW_8192C_START_ADDRESS 0x1000
#define FW_8192C_PAGE_SIZE 4096
#define FW_8723A_POLLING_TIMEOUT_COUNT 1000
#define FW_8723B_POLLING_TIMEOUT_COUNT 6000
@@ -84,10 +83,6 @@ enum rtl8723be_cmd {
void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable);
-void rtl8723_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size);
-void rtl8723_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size);
void rtl8723_write_fw(struct ieee80211_hw *hw,
enum version_8723e version,
u8 *buffer, u32 size, u8 max_page);
@@ -95,6 +90,5 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, int count);
int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be, int count);
bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb);
-void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index 75cbd1509b52..43d24e1ee5e6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -99,7 +99,7 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
offset &= 0xff;
newoffset = offset;
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+ pr_err("return all one\n");
return 0xFFFFFFFF;
}
tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -147,7 +147,7 @@ void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+ pr_err("stop\n");
return;
}
offset &= 0xff;
@@ -283,7 +283,7 @@ bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ WARN_ONCE(true, "rtl8723-common: cmdtable cannot be NULL.\n");
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index bdfd444955d2..32900c51f024 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -604,8 +604,7 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
rtl_dm_dig->min_undec_pwdb_for_dm = 0;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
+ pr_debug("rtl8821ae: Not connected to any AP\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index b665446351a4..a504dfae4ed3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -27,6 +27,7 @@
#include "../pci.h"
#include "../base.h"
#include "../core.h"
+#include "../efuse.h"
#include "reg.h"
#include "def.h"
#include "fw.h"
@@ -51,63 +52,6 @@ static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
-static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blocksize = sizeof(u32);
- u8 *bufferptr = (u8 *)buffer;
- u32 *pu4byteptr = (u32 *)buffer;
- u32 i, offset, blockcount, remainsize;
-
- blockcount = size / blocksize;
- remainsize = size % blocksize;
-
- for (i = 0; i < blockcount; i++) {
- offset = i * blocksize;
- rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset),
- *(pu4byteptr + i));
- }
-
- if (remainsize) {
- offset = blockcount * blocksize;
- bufferptr += offset;
- for (i = 0; i < remainsize; i++) {
- rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS +
- offset + i), *(bufferptr + i));
- }
- }
-}
-
-static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8)(page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- _rtl8821ae_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 fwlen = *pfwlen;
- u8 remain = (u8)(fwlen % 4);
-
- remain = (remain == 0) ? 0 : (4 - remain);
-
- while (remain > 0) {
- pfwbuf[fwlen] = 0;
- fwlen++;
- remain--;
- }
-
- *pfwlen = fwlen;
-}
-
static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
enum version_8821ae version,
u8 *buffer, u32 size)
@@ -119,27 +63,24 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
- _rtl8821ae_fill_dummy(bufferptr, &size);
+ rtl_fill_dummy(bufferptr, &size);
pagenums = size / FW_8821AE_PAGE_SIZE;
remainsize = size % FW_8821AE_PAGE_SIZE;
- if (pagenums > 8) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not greater then 8\n");
- }
+ if (pagenums > 8)
+ pr_err("Page numbers should not greater then 8\n");
for (page = 0; page < pagenums; page++) {
offset = page * FW_8821AE_PAGE_SIZE;
- _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset),
- FW_8821AE_PAGE_SIZE);
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8821AE_PAGE_SIZE);
}
if (remainsize) {
offset = pagenums * FW_8821AE_PAGE_SIZE;
page = pagenums;
- _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset),
- remainsize);
+ rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
}
}
@@ -161,10 +102,6 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
value32);
goto exit;
}
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_EMERG,
- "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
value32 |= MCUFWDL_RDY;
value32 &= ~WINTINI_RDY;
@@ -175,20 +112,14 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
counter = 0;
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
- value32);
- err = 0;
- goto exit;
- }
+ if (value32 & WINTINI_RDY)
+ return 0;
udelay(FW_8821AE_POLLING_DELAY);
} while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
- value32);
+ pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+ value32);
exit:
return err;
@@ -510,8 +441,8 @@ void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw,
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false,
- "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true,
+ "rtl8821ae: error H2C cmd because of Fw download fail!!!\n");
return;
}
@@ -1809,9 +1740,9 @@ static void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw,
rtl8821ae_dm_update_init_rate(hw, rate);
}
-static void _rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw,
- u8 c2h_cmd_id, u8 c2h_cmd_len,
- u8 *tmp_buf)
+void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw,
+ u8 c2h_cmd_id, u8 c2h_cmd_len,
+ u8 *tmp_buf)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1853,5 +1784,15 @@ void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer,
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD,
"[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
- _rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+
+ switch (c2h_cmd_id) {
+ case C2H_8812_BT_INFO:
+ rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+ break;
+
+ default:
+ rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len,
+ tmp_buf);
+ break;
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index 8f5b4aade3c9..90a98ed879f7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -329,4 +329,7 @@ void rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(struct ieee80211_hw *hw,
void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw);
void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw,
u8 *buffer, u8 length);
+void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw,
+ u8 c2h_cmd_id, u8 c2h_cmd_len,
+ u8 *tmp_buf);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 1281ebe0c30a..363d2f28da1f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -822,9 +822,8 @@ static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to polling write LLT done at address %d!\n",
- address);
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
status = false;
break;
}
@@ -891,9 +890,8 @@ static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw)
static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0;
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (rtlpriv->rtlhal.up_first_time)
@@ -1128,7 +1126,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
}
if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4;
- ret = rtl_read_word(rtlpriv, read_addr);
+ ret = rtl_read_byte(rtlpriv, read_addr);
}
return ret;
}
@@ -1927,7 +1925,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_init_mac(hw);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ pr_err("Init MAC failed\n");
err = 1;
return err;
}
@@ -2174,8 +2172,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
"Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Network type %d not support!\n", type);
+ pr_err("Network type %d not support!\n", type);
return 1;
}
@@ -2249,7 +2246,7 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
break;
default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ WARN_ONCE(true, "rtl8821ae: invalid aci: %d !\n", aci);
break;
}
}
@@ -2601,11 +2598,10 @@ static u8 _rtl8821ae_get_chnl_group(u8 chnl)
group = 12;
else if (173 <= chnl && chnl <= 177)
group = 13;
- else
- /*RT_TRACE(rtlpriv, COMP_EFUSE,DBG_LOUD,
- "5G, Channel %d in Group not found\n",chnl);*/
- RT_ASSERT(!COMP_EFUSE,
- "5G, Channel %d in Group not found\n", chnl);
+ else
+ WARN_ONCE(true,
+ "rtl8821ae: 5G, Channel %d in Group not found\n",
+ chnl);
}
return group;
}
@@ -3101,7 +3097,6 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
int params[] = {RTL_EEPROM_ID, EEPROM_VID, EEPROM_DID,
EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
@@ -3196,7 +3191,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
"SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type);
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
if (rtlhal->oem_id == RT_CID_DEFAULT) {
switch (rtlefuse->eeprom_oemid) {
@@ -3227,10 +3222,10 @@ exit:
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
- pcipriv->ledctl.led_opendrain = true;
+ rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
@@ -3276,7 +3271,7 @@ void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
_rtl8821ae_read_adapter_info(hw, false);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ pr_err("Autoload ERR!!\n");
}
/*hal_ReadRFType_8812A()*/
/* _rtl8821ae_hal_customized_behavior(hw); */
@@ -3951,8 +3946,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
if (mac->opmode == NL80211_IFTYPE_AP) {
entry_id = rtl_cam_get_free_entry(hw, p_macaddr);
if (entry_id >= TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
- "Can not find free hwsecurity cam entry\n");
+ pr_err("an not find free hwsecurity cam entry\n");
return;
}
} else {
@@ -4135,8 +4129,9 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
count++;
} while (tmp && count < 100);
- RT_ASSERT((count < 100),
- "Write wake up frame mask FAIL %d value!\n", tmp);
+ WARN_ONCE((count >= 100),
+ "rtl8821ae: Write wake up frame mask FAIL %d value!\n",
+ tmp);
}
/* Disable Rx packet buffer access. */
rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
index fcb3b28c6b8f..405c7541b386 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
@@ -101,7 +101,6 @@ void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -114,7 +113,7 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain) {
+ if (rtlpriv->ledctl.led_opendrain) {
ledcfg &= 0x90; /* Set to software control. */
rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -143,7 +142,6 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
u16 ledreg = REG_LEDCFG1;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
switch (pled->ledpin) {
case LED_PIN_LED0:
@@ -163,7 +161,7 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
"In SwLedOff,LedAddr:%X LEDPIN=%d\n",
ledreg, pled->ledpin);
/*Open-drain arrangement for controlling the LED*/
- if (pcipriv->ledctl.led_opendrain) {
+ if (rtlpriv->ledctl.led_opendrain) {
u8 ledcfg = rtl_read_byte(rtlpriv, ledreg);
ledreg &= 0xd0; /* Set to software control.*/
@@ -182,17 +180,17 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
- _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
- _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+ _rtl8821ae_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+ _rtl8821ae_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (ledaction) {
@@ -200,15 +198,15 @@ static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw,
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
- rtl8812ae_sw_led_on(hw, pLed0);
+ rtl8812ae_sw_led_on(hw, pled0);
else
- rtl8821ae_sw_led_on(hw, pLed0);
+ rtl8821ae_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
- rtl8812ae_sw_led_off(hw, pLed0);
+ rtl8812ae_sw_led_off(hw, pled0);
else
- rtl8821ae_sw_led_off(hw, pLed0);
+ rtl8821ae_sw_led_off(hw, pled0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 5dad402171c2..8da874cbec1a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -215,7 +215,6 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
bool is_pi_mode = false;
u32 retvalue = 0;
@@ -223,7 +222,7 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
/* 2009/06/17 MH We can not execute IO for power
save or other accident mode.*/
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+ pr_err("return all one\n");
return 0xFFFFFFFF;
}
/* <20120809, Kordan> CCA OFF(when entering),
@@ -284,7 +283,7 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
u32 newoffset;
if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+ pr_err("stop\n");
return;
}
offset &= 0xff;
@@ -989,7 +988,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
s8 temp_pwrlmt = 0;
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
+ for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) {
temp_pwrlmt = rtlphy->txpwr_limit_5g[regulation]
@@ -1164,7 +1163,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
_rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(hw);
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_2_4G_BANDWITH_NUM; ++bw) {
+ for (bw = 0; bw < MAX_2_4G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) {
for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) {
/* obtain the base dBm values in 2.4G band
@@ -1220,7 +1219,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
}
}
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
+ for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) {
/* obtain the base dBm values in 5G band
@@ -1297,7 +1296,7 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
"=====> _rtl8821ae_phy_init_txpower_limit()!\n");
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_2_4G_BANDWITH_NUM; ++j)
+ for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j)
for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m)
for (l = 0; l < MAX_RF_PATH_NUM; ++l)
@@ -1306,7 +1305,7 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
= MAX_POWER_INDEX;
}
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_5G_BANDWITH_NUM; ++j)
+ for (j = 0; j < MAX_5G_BANDWIDTH_NUM; ++j)
for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m)
for (l = 0; l < MAX_RF_PATH_NUM; ++l)
@@ -1665,7 +1664,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+ pr_err("Write BB Reg Fail!!\n");
return false;
}
_rtl8821ae_phy_init_tx_power_by_rate(hw);
@@ -1674,7 +1673,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
@@ -1688,7 +1687,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_AGC_TAB);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ pr_err("AGC Table Fail\n");
return false;
}
rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw,
@@ -1870,8 +1869,8 @@ static u8 _rtl8821ae_get_rate_section_index(u32 regaddr)
else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
index = (u8)((regaddr - 0xE20) / 4);
else
- RT_ASSERT(!COMP_INIT,
- "Invalid RegAddr 0x%x\n", regaddr);
+ WARN_ONCE(true,
+ "rtl8821ae: Invalid RegAddr 0x%x\n", regaddr);
return index;
}
@@ -2064,8 +2063,7 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_C:
case RF90_PATH_D:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpath);
+ pr_err("switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -2132,8 +2130,7 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
case RF90_PATH_B:
case RF90_PATH_C:
case RF90_PATH_D:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpath);
+ pr_err("switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -2322,7 +2319,7 @@ static s8 _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
rate_section = 11;
break;
default:
- RT_ASSERT(true, "Rate_Section is Illegal\n");
+ WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n");
break;
}
@@ -2588,7 +2585,7 @@ static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
shift = 24;
break;
default:
- RT_ASSERT(true, "Rate_Section is Illegal\n");
+ WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n");
break;
}
@@ -3336,8 +3333,7 @@ void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
(u8 *)&iotype);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
+ pr_err("Unknown Scan Backup operation.\n");
break;
}
}
@@ -3378,8 +3374,7 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv)
else if (mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER)
sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ;
else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "SCMapping: Not Correct Primary40MHz Setting\n");
+ pr_err("SCMapping: Not Correct Primary40MHz Setting\n");
if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
(mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
@@ -3394,16 +3389,14 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv)
(mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "SCMapping: Not Correct Primary40MHz Setting\n");
+ pr_err("SCMapping: Not Correct Primary40MHz Setting\n");
} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER)
sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER)
sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "SCMapping: Not Correct Primary40MHz Setting\n");
+ pr_err("SCMapping: Not Correct Primary40MHz Setting\n");
}
return (sc_set_40 << 4) | sc_set_20;
}
@@ -3479,8 +3472,8 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
break;
}
@@ -4660,8 +4653,8 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", iotype);
+ pr_err("switch case %#x not processed\n",
+ iotype);
break;
}
} while (false);
@@ -4704,9 +4697,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ pr_err("switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -4811,8 +4803,8 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case %#x not processed\n", rfpwr_state);
+ pr_err("switch case %#x not processed\n",
+ rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
index c6ab957023e6..95489f41f8a0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
@@ -34,8 +34,6 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
switch (bandwidth) {
case HT_CHANNEL_WIDTH_20:
rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3);
@@ -50,8 +48,7 @@ void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "unknown bandwidth: %#X\n", bandwidth);
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 297938e0effd..77cf3b2cd3f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -160,8 +160,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.wo_wlan_mode = WAKE_ON_MAGIC_PACKET |
WAKE_ON_PATTERN_MATCH;
- /* for debug level */
- rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -192,14 +190,12 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for fw.\n");
+ pr_err("Can't alloc buffer for fw.\n");
return 1;
}
rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.wowlan_firmware) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't alloc buffer for wowlan fw.\n");
+ pr_err("Can't alloc buffer for wowlan fw.\n");
return 1;
}
@@ -218,8 +214,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request normal firmware!\n");
+ pr_err("Failed to request normal firmware!\n");
return 1;
}
/*load wowlan firmware*/
@@ -229,8 +224,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_wowlan_fw_cb);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Failed to request wowlan firmware!\n");
+ pr_err("Failed to request wowlan firmware!\n");
return 1;
}
return 0;
@@ -303,6 +297,7 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = {
.fill_h2c_cmd = rtl8821ae_fill_h2c_cmd,
.get_btc_status = rtl8821ae_get_btc_status,
.rx_command_packet = rtl8821ae_rx_command_packet,
+ .c2h_content_parsing = rtl8821ae_c2h_content_parsing,
.add_wowlan_pattern = rtl8821ae_add_wowlan_pattern,
};
@@ -313,7 +308,8 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
.fwctrl_lps = true,
.msi_support = true,
.int_clear = true,
- .debug = DBG_EMERG,
+ .debug_level = 0,
+ .debug_mask = 0,
.disable_watchdog = 0,
};
@@ -434,7 +430,8 @@ MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl8821ae_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl8821ae_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl8821ae_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8821ae_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444);
@@ -447,7 +444,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 27727186ba5f..108098152cf3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -904,8 +904,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
break;
default:
- RT_ASSERT(false,
- "ERR txdesc :%d not process\n", desc_name);
+ WARN_ONCE(true,
+ "rtl8821ae: ERR txdesc :%d not processed\n",
+ desc_name);
break;
}
} else {
@@ -923,8 +924,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
SET_RX_DESC_EOR(pdesc, 1);
break;
default:
- RT_ASSERT(false,
- "ERR rxdesc :%d not process\n", desc_name);
+ WARN_ONCE(true,
+ "rtl8821ae: ERR rxdesc :%d not processed\n",
+ desc_name);
break;
}
}
@@ -943,8 +945,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
break;
default:
- RT_ASSERT(false,
- "ERR txdesc :%d not process\n", desc_name);
+ WARN_ONCE(true,
+ "rtl8821ae: ERR txdesc :%d not processed\n",
+ desc_name);
break;
}
} else {
@@ -959,8 +962,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
ret = GET_RX_DESC_BUFF_ADDR(pdesc);
break;
default:
- RT_ASSERT(false,
- "ERR rxdesc :%d not process\n", desc_name);
+ WARN_ONCE(true,
+ "rtl8821ae: ERR rxdesc :%d not processed\n",
+ desc_name);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 49015b05f3d1..4d989b8ab185 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -421,14 +421,12 @@ static void _rtl_rx_completed(struct urb *urb);
static int _rtl_prep_rx_urb(struct ieee80211_hw *hw, struct rtl_usb *rtlusb,
struct urb *urb, gfp_t gfp_mask)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
void *buf;
buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask,
&urb->transfer_dma);
if (!buf) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Failed to usb_alloc_coherent!!\n");
+ pr_err("Failed to usb_alloc_coherent!!\n");
return -ENOMEM;
}
@@ -613,8 +611,6 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
static void _rtl_rx_completed(struct urb *_urb)
{
struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
- struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
- struct rtl_priv *rtlpriv = rtl_priv(hw);
int err = 0;
if (unlikely(IS_USB_STOP(rtlusb)))
@@ -628,17 +624,15 @@ static void _rtl_rx_completed(struct urb *_urb)
struct ieee80211_hdr *hdr;
if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Too short packet from bulk IN! (len: %d)\n",
- size);
+ pr_err("Too short packet from bulk IN! (len: %d)\n",
+ size);
goto resubmit;
}
qlen = skb_queue_len(&rtlusb->rx_queue);
if (qlen >= __RX_SKB_MAX_QUEUED) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Pending RX skbuff queue full! (qlen: %d)\n",
- qlen);
+ pr_err("Pending RX skbuff queue full! (qlen: %d)\n",
+ qlen);
goto resubmit;
}
@@ -647,8 +641,7 @@ static void _rtl_rx_completed(struct urb *_urb)
skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
if (!skb) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Can't allocate skb for bulk IN!\n");
+ pr_err("Can't allocate skb for bulk IN!\n");
goto resubmit;
}
@@ -725,7 +718,6 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
struct urb *urb;
int err;
int i;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
WARN_ON(0 == rtlusb->rx_urb_num);
@@ -740,8 +732,7 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
if (err < 0) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Failed to prep_rx_urb!!\n");
+ pr_err("Failed to prep_rx_urb!!\n");
usb_free_urb(urb);
goto err_out;
}
@@ -827,19 +818,36 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct urb *urb;
/* should after adapter start and interrupt enable. */
set_hal_stop(rtlhal);
cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
/* Enable software */
SET_USB_STOP(rtlusb);
+
+ /* free pre-allocated URBs from rtl_usb_start() */
+ usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+
+ tasklet_kill(&rtlusb->rx_work_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_change_work);
+
+ flush_workqueue(rtlpriv->works.rtl_wq);
+
+ skb_queue_purge(&rtlusb->rx_queue);
+
+ while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+ }
+
rtlpriv->cfg->ops->hw_disable(hw);
}
static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
{
int err;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
usb_anchor_urb(_urb, &rtlusb->tx_submitted);
@@ -847,8 +855,7 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
if (err < 0) {
struct sk_buff *skb;
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Failed to submit urb\n");
+ pr_err("Failed to submit urb\n");
usb_unanchor_urb(_urb);
skb = (struct sk_buff *)_urb->context;
kfree_skb(skb);
@@ -859,7 +866,6 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
struct sk_buff *skb)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
struct ieee80211_tx_info *txinfo;
@@ -870,8 +876,7 @@ static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
txinfo->flags |= IEEE80211_TX_STAT_ACK;
if (urb->status) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Urb has error status 0x%X\n", urb->status);
+ pr_err("Urb has error status 0x%X\n", urb->status);
goto out;
}
/* TODO: statistics */
@@ -919,7 +924,6 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
enum rtl_txq qnum)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
u32 ep_num;
struct urb *_urb = NULL;
@@ -927,8 +931,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
if (unlikely(IS_USB_STOP(rtlusb))) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "USB device is stopping...\n");
+ pr_err("USB device is stopping...\n");
kfree_skb(skb);
return;
}
@@ -936,8 +939,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
_skb = skb;
_urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
if (unlikely(!_urb)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't allocate urb. Drop skb!\n");
+ pr_err("Can't allocate urb. Drop skb!\n");
kfree_skb(skb);
return;
}
@@ -1059,7 +1061,7 @@ int rtl_usb_probe(struct usb_interface *intf,
hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
sizeof(struct rtl_usb_priv), &rtl_ops);
if (!hw) {
- RT_ASSERT(false, "ieee80211 alloc failed\n");
+ WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n");
return -ENOMEM;
}
rtlpriv = hw->priv;
@@ -1090,7 +1092,6 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->rtlhal.interface = INTF_USB;
rtlpriv->cfg = rtl_hal_cfg;
rtlpriv->intf_ops = &rtl_usb_ops;
- rtl_dbgp_flag_init(hw);
/* Init IO handler */
_rtl_usb_io_handler_init(&udev->dev, hw);
rtlpriv->cfg->ops->read_chip_version(hw);
@@ -1103,20 +1104,18 @@ int rtl_usb_probe(struct usb_interface *intf,
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't allocate sw for mac80211\n");
+ pr_err("Can't allocate sw for mac80211\n");
goto error_out;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+ pr_err("Can't init_sw_vars\n");
goto error_out;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = ieee80211_register_hw(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Can't register mac80211 hw.\n");
+ pr_err("Can't register mac80211 hw.\n");
err = -ENODEV;
goto error_out;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index a6d43d2ecd36..c91cec04bfaf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -146,8 +146,8 @@ struct rtl_usb {
};
struct rtl_usb_priv {
+ struct bt_coexist_info bt_coexist;
struct rtl_usb dev;
- struct rtl_led_ctl ledctl;
};
#define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index dafe486f8448..65ef42b37651 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -157,8 +157,8 @@ enum rtl8192c_h2c_cmd {
#define MAX_REGULATION_NUM 4
#define MAX_RF_PATH_NUM 4
#define MAX_RATE_SECTION_NUM 6
-#define MAX_2_4G_BANDWITH_NUM 4
-#define MAX_5G_BANDWITH_NUM 4
+#define MAX_2_4G_BANDWIDTH_NUM 4
+#define MAX_5G_BANDWIDTH_NUM 4
#define MAX_RF_PATH 4
#define MAX_CHNL_GROUP_24G 6
#define MAX_CHNL_GROUP_5G 14
@@ -925,6 +925,14 @@ enum wolpattern_type {
UNKNOWN_TYPE = 4,
};
+enum package_type {
+ PACKAGE_DEFAULT,
+ PACKAGE_QFN68,
+ PACKAGE_TFBGA90,
+ PACKAGE_TFBGA80,
+ PACKAGE_TFBGA79
+};
+
struct octet_string {
u8 *octet;
u16 length;
@@ -1257,12 +1265,12 @@ struct rtl_phy {
u8 cur_bw40_txpwridx;
s8 txpwr_limit_2_4g[MAX_REGULATION_NUM]
- [MAX_2_4G_BANDWITH_NUM]
+ [MAX_2_4G_BANDWIDTH_NUM]
[MAX_RATE_SECTION_NUM]
[CHANNEL_MAX_NUMBER_2G]
[MAX_RF_PATH_NUM];
s8 txpwr_limit_5g[MAX_REGULATION_NUM]
- [MAX_5G_BANDWITH_NUM]
+ [MAX_5G_BANDWIDTH_NUM]
[MAX_RATE_SECTION_NUM]
[CHANNEL_MAX_NUMBER_5G]
[MAX_RF_PATH_NUM];
@@ -1509,6 +1517,7 @@ struct rtl_hal {
u32 version; /*version of chip */
u8 state; /*stop 0, start 1 */
u8 board_type;
+ u8 package_type;
u8 external_pa;
u8 pa_mode;
@@ -2193,6 +2202,8 @@ struct rtl_hal_ops {
struct rtl_wow_pattern *rtl_pattern,
u8 index);
u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
+ void (*c2h_content_parsing)(struct ieee80211_hw *hw, u8 tag, u8 len,
+ u8 *val);
};
struct rtl_intf_ops {
@@ -2221,11 +2232,13 @@ struct rtl_intf_ops {
};
struct rtl_mod_params {
+ /* default: 0,0 */
+ u64 debug_mask;
/* default: 0 = using hardware encryption */
bool sw_crypto;
/* default: 0 = DBG_EMERG (0)*/
- int debug;
+ int debug_level;
/* default: 1 = using no linked power save */
bool inactiveps;
@@ -2306,6 +2319,7 @@ struct rtl_locks {
spinlock_t waitq_lock;
spinlock_t entry_list_lock;
spinlock_t usb_lock;
+ spinlock_t c2hcmd_lock;
/*FW clock change */
spinlock_t fw_ps_lock;
@@ -2335,6 +2349,7 @@ struct rtl_works {
struct workqueue_struct *rtl_wq;
struct delayed_work watchdog_wq;
struct delayed_work ips_nic_off_wq;
+ struct delayed_work c2hcmd_wq;
/* For SW LPS */
struct delayed_work ps_work;
@@ -2345,16 +2360,6 @@ struct rtl_works {
struct work_struct fill_h2c_cmd;
};
-struct rtl_debug {
- u32 dbgp_type[DBGP_TYPE_MAX];
- int global_debuglevel;
- u64 global_debugcomponents;
-
- /* add for proc debug */
- struct proc_dir_entry *proc_dir;
- char proc_name[20];
-};
-
#define MIMO_PS_STATIC 0
#define MIMO_PS_DYNAMIC 1
#define MIMO_PS_NOLIMIT 3
@@ -2462,6 +2467,7 @@ struct rtl_btc_info {
u8 bt_type;
u8 btcoexist;
u8 ant_num;
+ u8 single_ant_path;
};
struct bt_coexist_info {
@@ -2551,6 +2557,13 @@ struct proxim {
u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
};
+struct rtl_c2hcmd {
+ struct list_head list;
+ u8 tag;
+ u8 len;
+ u8 *val;
+};
+
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
@@ -2570,6 +2583,7 @@ struct rtl_priv {
struct rtl_dm dm;
struct rtl_security sec;
struct rtl_efuse efuse;
+ struct rtl_led_ctl ledctl;
struct rtl_ps_ctl psc;
struct rate_adaptive ra;
@@ -2583,7 +2597,9 @@ struct rtl_priv {
/* sta entry list for ap adhoc or mesh */
struct list_head entry_list;
- struct rtl_debug dbg;
+ /* c2hcmd list for kthread level access */
+ struct list_head c2hcmd_list;
+
int max_fw_size;
/*
@@ -2713,23 +2729,14 @@ enum bt_radio_shared {
(le32_to_cpu(_val))
/* Read data from memory */
-#define READEF1BYTE(_ptr) \
+#define READEF1BYTE(_ptr) \
EF1BYTE(*((u8 *)(_ptr)))
/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
+#define READEF2BYTE(_ptr) \
EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
+#define READEF4BYTE(_ptr) \
EF4BYTE(*(_ptr))
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val) \
- (*((u8 *)(_ptr))) = EF1BYTE(_val)
-/* Write le16 data to memory in host ordering */
-#define WRITEEF2BYTE(_ptr, _val) \
- (*((u16 *)(_ptr))) = EF2BYTE(_val)
-#define WRITEEF4BYTE(_ptr, _val) \
- (*((u32 *)(_ptr))) = EF2BYTE(_val)
-
/* Create a bit mask
* Examples:
* BIT_LEN_MASK_32(0) => 0x00000000
@@ -2810,14 +2817,14 @@ value to host byte ordering.*/
* Set subfield of little-endian 4-byte value to specified value.
*/
#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u32 *)(__pstart)) = \
- ( \
+ *((__le32 *)(__pstart)) = \
+ cpu_to_le32( \
LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
);
#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u16 *)(__pstart)) = \
- ( \
+ *((__le16 *)(__pstart)) = \
+ cpu_to_le16( \
LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 603c90470225..785334f7a538 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3187,7 +3187,7 @@ static void rndis_do_cqm(struct usbnet *usbdev, s32 rssi)
return;
priv->last_cqm_event_rssi = rssi;
- cfg80211_cqm_rssi_notify(usbdev->net, event, GFP_KERNEL);
+ cfg80211_cqm_rssi_notify(usbdev->net, event, rssi, GFP_KERNEL);
}
#define DEVICE_POLLER_JIFFIES (HZ)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index dadaa73ab49d..e3216473aecb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -877,7 +877,7 @@ static void rsi_perform_cqm(struct rsi_common *common,
common->cqm_info.last_cqm_event_rssi = rssi;
rsi_dbg(INFO_ZONE, "CQM: Notifying event: %d\n", event);
- ieee80211_cqm_rssi_notify(adapter->vifs[0], event, GFP_KERNEL);
+ ieee80211_cqm_rssi_notify(adapter->vifs[0], event, rssi, GFP_KERNEL);
return;
}
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index daf06a4f842e..a52224836a2b 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1019,7 +1019,7 @@ void cw1200_event_handler(struct work_struct *work)
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW :
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi);
- ieee80211_cqm_rssi_notify(priv->vif, cqm_evt,
+ ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, rcpi_rssi,
GFP_KERNEL);
break;
}
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index d0593bc1f1a9..f5acd24d0e2b 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -150,7 +150,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
"ROAMING_TRIGGER_LOW_RSSI_EVENT");
ieee80211_cqm_rssi_notify(wl->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
}
if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
@@ -158,7 +158,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
"ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
ieee80211_cqm_rssi_notify(wl->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
}
}
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 7f672f6879d0..58e148d7bc7b 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -281,7 +281,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
}
if (value < 1 || value > 65535) {
- wl1271_warning("dyanmic_ps_timeout is not in valid range");
+ wl1271_warning("dynamic_ps_timeout is not in valid range");
return -ERANGE;
}
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 4b59f67724de..f2e90d223d94 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -129,7 +129,8 @@ void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
vif = wl12xx_wlvif_to_vif(wlvif);
if (event != wlvif->last_rssi_event)
- ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+ ieee80211_cqm_rssi_notify(vif, event, metric,
+ GFP_KERNEL);
wlvif->last_rssi_event = event;
}
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e536aa01b937..a21fda910529 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3202,6 +3202,21 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
}
+
+ /*
+ * If interface in AP mode and created with allmulticast then disable
+ * the firmware filters so that all multicast packets are passed
+ * This is mandatory for MDNS based discovery protocols
+ */
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI) {
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ false,
+ NULL, 0);
+ if (ret < 0)
+ goto out_sleep;
+ }
+ }
}
/*
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 47fe7f96a242..287023ef4a78 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -81,13 +81,6 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
sdio_claim_host(func);
- if (unlikely(dump)) {
- printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
- print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
- DUMP_PREFIX_OFFSET, 16, 1,
- buf, len, false);
- }
-
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
@@ -107,6 +100,13 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
if (WARN_ON(ret))
dev_err(child->parent, "sdio read failed (%d)\n", ret);
+ if (unlikely(dump)) {
+ printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
+ print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buf, len, false);
+ }
+
return ret;
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 3ce1f7da8647..530586be05b4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -113,10 +113,10 @@ struct xenvif_stats {
* A subset of struct net_device_stats that contains only the
* fields that are updated in netback.c for each queue.
*/
- unsigned int rx_bytes;
- unsigned int rx_packets;
- unsigned int tx_bytes;
- unsigned int tx_packets;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 tx_bytes;
+ u64 tx_packets;
/* Additional stats used by xenvif */
unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e30ffd29b7e9..a2d326760a72 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
work_done = xenvif_tx_action(queue, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
xenvif_napi_schedule_or_enable_events(queue);
}
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
- unsigned int num_queues = vif->num_queues;
- unsigned long rx_bytes = 0;
- unsigned long rx_packets = 0;
- unsigned long tx_bytes = 0;
- unsigned long tx_packets = 0;
+ u64 rx_bytes = 0;
+ u64 rx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 tx_packets = 0;
unsigned int index;
+ spin_lock(&vif->lock);
if (vif->queues == NULL)
goto out;
/* Aggregate tx and rx stats from each queue */
- for (index = 0; index < num_queues; ++index) {
+ for (index = 0; index < vif->num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
}
out:
+ spin_unlock(&vif->lock);
+
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
vif->dev->stats.tx_bytes = tx_bytes;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 47b481095d77..f9bcf4a665bc 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -67,6 +67,7 @@ module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
+#define MAX_QUEUES_DEFAULT 8
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
@@ -1622,11 +1623,12 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
- /* Allow as many queues as there are CPUs if user has not
+ /* Allow as many queues as there are CPUs but max. 8 if user has not
* specified a value.
*/
if (xenvif_max_queues == 0)
- xenvif_max_queues = num_online_cpus();
+ xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
+ num_online_cpus());
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3124eaec9427..bb854f92f5a5 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
static void backend_disconnect(struct backend_info *be)
{
if (be->vif) {
+ unsigned int queue_index;
+
xen_unregister_watchers(be->vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(be->vif);
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
+
+ spin_lock(&be->vif->lock);
+ vfree(be->vif->queues);
+ be->vif->num_queues = 0;
+ be->vif->queues = NULL;
+ spin_unlock(&be->vif->lock);
+
xenvif_disconnect_ctrl(be->vif);
}
}
@@ -723,7 +734,7 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
}
static void xen_net_rate_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
@@ -780,7 +791,7 @@ static void xen_unregister_credit_watch(struct xenvif *vif)
}
static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
struct xenvif *vif = container_of(watch, struct xenvif,
mcast_ctrl_watch);
@@ -855,8 +866,8 @@ static void unregister_hotplug_status_watch(struct backend_info *be)
}
static void hotplug_status_changed(struct xenbus_watch *watch,
- const char **vec,
- unsigned int vec_size)
+ const char *path,
+ const char *token)
{
struct backend_info *be = container_of(watch,
struct backend_info,
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
err:
if (be->vif->num_queues > 0)
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a479cd99911d..6ffc482550c1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,6 +57,7 @@
#include <xen/interface/grant_table.h>
/* Module parameters */
+#define MAX_QUEUES_DEFAULT 8
static unsigned int xennet_max_queues;
module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
@@ -281,6 +282,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
{
RING_IDX req_prod = queue->rx.req_prod_pvt;
int notify;
+ int err = 0;
if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
@@ -295,8 +297,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
struct xen_netif_rx_request *req;
skb = xennet_alloc_one_rx_buffer(queue);
- if (!skb)
+ if (!skb) {
+ err = -ENOMEM;
break;
+ }
id = xennet_rxidx(req_prod);
@@ -320,8 +324,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx.req_prod_pvt = req_prod;
- /* Not enough requests? Try again later. */
- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ /* Try again later if there are not enough requests or skb allocation
+ * failed.
+ * Enough requests is quantified as the sum of newly created slots and
+ * the unconsumed slots at the backend.
+ */
+ if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
+ unlikely(err)) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
@@ -1051,7 +1060,7 @@ err:
if (work_done < budget) {
int more_to_do = 0;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
if (more_to_do)
@@ -1073,8 +1082,8 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
return 0;
}
-static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
+static void xennet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
{
struct netfront_info *np = netdev_priv(dev);
int cpu;
@@ -1105,8 +1114,6 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
tot->rx_errors = dev->stats.rx_errors;
tot->tx_dropped = dev->stats.tx_dropped;
-
- return tot;
}
static void xennet_release_tx_bufs(struct netfront_queue *queue)
@@ -1379,6 +1386,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
for (i = 0; i < num_queues && info->queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
+ del_timer_sync(&queue->rx_refill_timer);
+
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1733,7 +1742,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
if (netif_running(info->netdev))
napi_disable(&queue->napi);
- del_timer_sync(&queue->rx_refill_timer);
netif_napi_del(&queue->napi);
}
@@ -1822,27 +1830,19 @@ static int talk_to_netback(struct xenbus_device *dev,
xennet_destroy_queues(info);
err = xennet_create_queues(info, &num_queues);
- if (err < 0)
- goto destroy_ring;
+ if (err < 0) {
+ xenbus_dev_fatal(dev, err, "creating queues");
+ kfree(info->queues);
+ info->queues = NULL;
+ goto out;
+ }
/* Create shared ring, alloc event channel -- for each queue */
for (i = 0; i < num_queues; ++i) {
queue = &info->queues[i];
err = setup_netfront(dev, queue, feature_split_evtchn);
- if (err) {
- /* setup_netfront() will tidy up the current
- * queue on error, but we need to clean up
- * those already allocated.
- */
- if (i > 0) {
- rtnl_lock();
- netif_set_real_num_tx_queues(info->netdev, i);
- rtnl_unlock();
- goto destroy_ring;
- } else {
- goto out;
- }
- }
+ if (err)
+ goto destroy_ring;
}
again:
@@ -1932,9 +1932,10 @@ abort_transaction_no_dev_fatal:
xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
- kfree(info->queues);
- info->queues = NULL;
+ xennet_destroy_queues(info);
out:
+ unregister_netdev(info->netdev);
+ xennet_free_netdev(info->netdev);
return err;
}
@@ -2164,11 +2165,12 @@ static int __init netif_init(void)
pr_info("Initialising Xen virtual ethernet driver\n");
- /* Allow as many queues as there are CPUs if user has not
+ /* Allow as many queues as there are CPUs inut max. 8 if user has not
* specified a value.
*/
if (xennet_max_queues == 0)
- xennet_max_queues = num_online_cpus();
+ xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
+ num_online_cpus());
return xenbus_register_frontend(&netfront_driver);
}
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index eca9688bf9d9..c00238491673 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -1629,6 +1629,28 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev)
/* Skylake Xeon NTB */
+static int skx_poll_link(struct intel_ntb_dev *ndev)
+{
+ u16 reg_val;
+ int rc;
+
+ ndev->reg->db_iowrite(ndev->db_link_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_clear);
+
+ rc = pci_read_config_word(ndev->ntb.pdev,
+ SKX_LINK_STATUS_OFFSET, &reg_val);
+ if (rc)
+ return 0;
+
+ if (reg_val == ndev->lnk_sta)
+ return 0;
+
+ ndev->lnk_sta = reg_val;
+
+ return 1;
+}
+
static u64 skx_db_ioread(void __iomem *mmio)
{
return ioread64(mmio);
@@ -2852,7 +2874,7 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = {
};
static const struct intel_ntb_reg skx_reg = {
- .poll_link = xeon_poll_link,
+ .poll_link = skx_poll_link,
.link_is_up = xeon_link_is_up,
.db_ioread = skx_db_ioread,
.db_iowrite = skx_db_iowrite,
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f81aa4b18d9f..02ca45fdd892 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1802,7 +1802,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
node = dev_to_node(&ndev->dev);
- free_queue = ffs(nt->qp_bitmap);
+ free_queue = ffs(nt->qp_bitmap_free);
if (!free_queue)
goto err;
@@ -2273,9 +2273,8 @@ module_init(ntb_transport_init);
static void __exit ntb_transport_exit(void)
{
- debugfs_remove_recursive(nt_debugfs_dir);
-
ntb_unregister_client(&ntb_transport_client);
bus_unregister(&ntb_transport_bus);
+ debugfs_remove_recursive(nt_debugfs_dir);
}
module_exit(ntb_transport_exit);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index e75d4fdc0866..434e1d474f33 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -265,6 +265,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
if (dma_submit_error(cookie))
goto err_set_unmap;
+ dmaengine_unmap_put(unmap);
+
atomic_inc(&pctx->dma_sync);
dma_async_issue_pending(chan);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6307088b375f..ce3e8dfa10ad 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -52,17 +52,17 @@ static void namespace_blk_release(struct device *dev)
kfree(nsblk);
}
-static struct device_type namespace_io_device_type = {
+static const struct device_type namespace_io_device_type = {
.name = "nd_namespace_io",
.release = namespace_io_release,
};
-static struct device_type namespace_pmem_device_type = {
+static const struct device_type namespace_pmem_device_type = {
.name = "nd_namespace_pmem",
.release = namespace_pmem_release,
};
-static struct device_type namespace_blk_device_type = {
+static const struct device_type namespace_blk_device_type = {
.name = "nd_namespace_blk",
.release = namespace_blk_release,
};
@@ -957,25 +957,28 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
{
resource_size_t allocated = 0, available = 0;
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_mapping *nd_mapping;
struct nvdimm_drvdata *ndd;
struct nd_label_id label_id;
u32 flags = 0, remainder;
+ int rc, i, id = -1;
u8 *uuid = NULL;
- int rc, i;
- if (dev->driver || to_ndns(dev)->claim)
+ if (dev->driver || ndns->claim)
return -EBUSY;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
uuid = nspm->uuid;
+ id = nspm->id;
} else if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
uuid = nsblk->uuid;
flags = NSLABEL_FLAG_LOCAL;
+ id = nsblk->id;
}
/*
@@ -1034,20 +1037,17 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
nd_namespace_pmem_set_resource(nd_region, nspm,
val * nd_region->ndr_mappings);
- } else if (is_namespace_blk(dev)) {
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
- /*
- * Try to delete the namespace if we deleted all of its
- * allocation, this is not the seed device for the
- * region, and it is not actively claimed by a btt
- * instance.
- */
- if (val == 0 && nd_region->ns_seed != dev
- && !nsblk->common.claim)
- nd_device_unregister(dev, ND_ASYNC);
}
+ /*
+ * Try to delete the namespace if we deleted all of its
+ * allocation, this is not the seed or 0th device for the
+ * region, and it is not actively claimed by a btt, pfn, or dax
+ * instance.
+ */
+ if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
+ nd_device_unregister(dev, ND_ASYNC);
+
return rc;
}
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index a2ac9e641aa9..6c033c9a2f06 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
size = resource_size(&nsio->res);
npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
if (nd_pfn->mode == PFN_MODE_PMEM) {
- unsigned long memmap_size;
-
/*
* vmemmap_populate_hugepages() allocates the memmap array in
* HPAGE_SIZE chunks.
*/
- memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
- offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
- nd_pfn->align) - start;
+ offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+ max(nd_pfn->align, HPAGE_SIZE)) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K + dax_label_reserve,
nd_pfn->align) - start;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7282d7495bf1..5b536be5a12e 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
rc = memcpy_from_pmem(mem + off, pmem_addr, len);
kunmap_atomic(mem);
- return rc;
+ if (rc)
+ return -EIO;
+ return 0;
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8a3c3e32a704..44a1a257e0b5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -208,18 +208,18 @@ EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags, int qid)
{
+ unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
struct request *req;
if (qid == NVME_QID_ANY) {
- req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
+ req = blk_mq_alloc_request(q, op, flags);
} else {
- req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
+ req = blk_mq_alloc_request_hctx(q, op, flags,
qid ? qid - 1 : 0);
}
if (IS_ERR(req))
return req;
- req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
nvme_req(req)->cmd = cmd;
@@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
+ unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
struct nvme_dsm_range *range;
- unsigned int nr_bytes = blk_rq_bytes(req);
+ struct bio *bio;
- range = kmalloc(sizeof(*range), GFP_ATOMIC);
+ range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
if (!range)
return BLK_MQ_RQ_QUEUE_BUSY;
- range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ __rq_for_each_bio(bio, req) {
+ u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
+ u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+ range[n].cattr = cpu_to_le32(0);
+ range[n].nlb = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ n++;
+ }
+
+ if (WARN_ON_ONCE(n != segments)) {
+ kfree(range);
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.nr = 0;
+ cmnd->dsm.nr = segments - 1;
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = sizeof(*range);
+ req->special_vec.bv_len = sizeof(*range) * segments;
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return BLK_MQ_RQ_QUEUE_OK;
@@ -309,17 +321,27 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
{
int ret = BLK_MQ_RQ_QUEUE_OK;
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
- else if (req_op(req) == REQ_OP_FLUSH)
+ break;
+ case REQ_OP_FLUSH:
nvme_setup_flush(ns, cmd);
- else if (req_op(req) == REQ_OP_DISCARD)
+ break;
+ case REQ_OP_DISCARD:
ret = nvme_setup_discard(ns, req, cmd);
- else
+ break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
nvme_setup_rw(ns, req, cmd);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
cmd->common.command_id = req->tag;
-
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -784,6 +806,13 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
return nvme_sg_io(ns, (void __user *)arg);
#endif
default:
+#ifdef CONFIG_NVM
+ if (ns->ndev)
+ return nvme_nvm_ioctl(ns, cmd, arg);
+#endif
+ if (is_sed_ioctl(cmd))
+ return sed_ioctl(ns->ctrl->opal_dev, cmd,
+ (void __user *) arg);
return -ENOTTY;
}
}
@@ -861,6 +890,9 @@ static void nvme_config_discard(struct nvme_ns *ns)
struct nvme_ctrl *ctrl = ns->ctrl;
u32 logical_block_size = queue_logical_block_size(ns->queue);
+ BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
+ NVME_DSM_MAX_RANGES);
+
if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
ns->queue->limits.discard_zeroes_data = 1;
else
@@ -869,6 +901,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
+ blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
@@ -1051,6 +1084,28 @@ static const struct pr_ops nvme_pr_ops = {
.pr_clear = nvme_pr_clear,
};
+#ifdef CONFIG_BLK_SED_OPAL
+int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
+ bool send)
+{
+ struct nvme_ctrl *ctrl = data;
+ struct nvme_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (send)
+ cmd.common.opcode = nvme_admin_security_send;
+ else
+ cmd.common.opcode = nvme_admin_security_recv;
+ cmd.common.nsid = 0;
+ cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
+ cmd.common.cdw10[1] = cpu_to_le32(len);
+
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
+ ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0);
+}
+EXPORT_SYMBOL_GPL(nvme_sec_submit);
+#endif /* CONFIG_BLK_SED_OPAL */
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
@@ -1230,6 +1285,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
return -EIO;
}
+ ctrl->oacs = le16_to_cpu(id->oacs);
ctrl->vid = le16_to_cpu(id->vid);
ctrl->oncs = le16_to_cpup(&id->oncs);
atomic_set(&ctrl->abort_limit, id->acl + 1);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index fcc9dcfdf675..fb51a8de9b29 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
return 0;
freq->sg_table.sgl = freq->first_sgl;
- ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
- freq->sg_table.sgl);
+ ret = sg_alloc_table_chained(&freq->sg_table,
+ blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
if (ret)
return -ENOMEM;
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
- WARN_ON(op->nents > rq->nr_phys_segments);
+ WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, dir);
@@ -1937,7 +1937,7 @@ nvme_fc_complete_rq(struct request *rq)
return;
}
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(rq))
error = rq->errors;
else
error = nvme_error_status(rq->errors);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 588d4a34c083..21cac8523bd8 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -26,6 +26,8 @@
#include <linux/bitops.h>
#include <linux/lightnvm.h>
#include <linux/vmalloc.h>
+#include <linux/sched/sysctl.h>
+#include <uapi/linux/lightnvm.h>
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2,
@@ -248,50 +250,48 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
struct nvme_nvm_id_group *src;
struct nvm_id_group *dst;
- int i, end;
-
- end = min_t(u32, 4, nvm_id->cgrps);
-
- for (i = 0; i < end; i++) {
- src = &nvme_nvm_id->groups[i];
- dst = &nvm_id->groups[i];
-
- dst->mtype = src->mtype;
- dst->fmtype = src->fmtype;
- dst->num_ch = src->num_ch;
- dst->num_lun = src->num_lun;
- dst->num_pln = src->num_pln;
-
- dst->num_pg = le16_to_cpu(src->num_pg);
- dst->num_blk = le16_to_cpu(src->num_blk);
- dst->fpg_sz = le16_to_cpu(src->fpg_sz);
- dst->csecs = le16_to_cpu(src->csecs);
- dst->sos = le16_to_cpu(src->sos);
-
- dst->trdt = le32_to_cpu(src->trdt);
- dst->trdm = le32_to_cpu(src->trdm);
- dst->tprt = le32_to_cpu(src->tprt);
- dst->tprm = le32_to_cpu(src->tprm);
- dst->tbet = le32_to_cpu(src->tbet);
- dst->tbem = le32_to_cpu(src->tbem);
- dst->mpos = le32_to_cpu(src->mpos);
- dst->mccap = le32_to_cpu(src->mccap);
-
- dst->cpar = le16_to_cpu(src->cpar);
-
- if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
- memcpy(dst->lptbl.id, src->lptbl.id, 8);
- dst->lptbl.mlc.num_pairs =
- le16_to_cpu(src->lptbl.mlc.num_pairs);
-
- if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
- pr_err("nvm: number of MLC pairs not supported\n");
- return -EINVAL;
- }
- memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
- dst->lptbl.mlc.num_pairs);
+ if (nvme_nvm_id->cgrps != 1)
+ return -EINVAL;
+
+ src = &nvme_nvm_id->groups[0];
+ dst = &nvm_id->grp;
+
+ dst->mtype = src->mtype;
+ dst->fmtype = src->fmtype;
+ dst->num_ch = src->num_ch;
+ dst->num_lun = src->num_lun;
+ dst->num_pln = src->num_pln;
+
+ dst->num_pg = le16_to_cpu(src->num_pg);
+ dst->num_blk = le16_to_cpu(src->num_blk);
+ dst->fpg_sz = le16_to_cpu(src->fpg_sz);
+ dst->csecs = le16_to_cpu(src->csecs);
+ dst->sos = le16_to_cpu(src->sos);
+
+ dst->trdt = le32_to_cpu(src->trdt);
+ dst->trdm = le32_to_cpu(src->trdm);
+ dst->tprt = le32_to_cpu(src->tprt);
+ dst->tprm = le32_to_cpu(src->tprm);
+ dst->tbet = le32_to_cpu(src->tbet);
+ dst->tbem = le32_to_cpu(src->tbem);
+ dst->mpos = le32_to_cpu(src->mpos);
+ dst->mccap = le32_to_cpu(src->mccap);
+
+ dst->cpar = le16_to_cpu(src->cpar);
+
+ if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
+ memcpy(dst->lptbl.id, src->lptbl.id, 8);
+ dst->lptbl.mlc.num_pairs =
+ le16_to_cpu(src->lptbl.mlc.num_pairs);
+
+ if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
+ pr_err("nvm: number of MLC pairs not supported\n");
+ return -EINVAL;
}
+
+ memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
+ dst->lptbl.mlc.num_pairs);
}
return 0;
@@ -321,7 +321,6 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
nvm_id->ver_id = nvme_nvm_id->ver_id;
nvm_id->vmnt = nvme_nvm_id->vmnt;
- nvm_id->cgrps = nvme_nvm_id->cgrps;
nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
@@ -372,7 +371,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
}
/* Transform physical address to target address space */
- nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb);
+ nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
ret = -EINTR;
@@ -485,7 +484,8 @@ static void nvme_nvm_end_io(struct request *rq, int error)
struct nvm_rq *rqd = rq->end_io_data;
rqd->ppa_status = nvme_req(rq)->result.u64;
- nvm_end_io(rqd, error);
+ rqd->error = error;
+ nvm_end_io(rqd);
kfree(nvme_req(rq)->cmd);
blk_mq_free_request(rq);
@@ -586,6 +586,224 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.max_phys_sect = 64,
};
+static void nvme_nvm_end_user_vio(struct request *rq, int error)
+{
+ struct completion *waiting = rq->end_io_data;
+
+ complete(waiting);
+}
+
+static int nvme_nvm_submit_user_cmd(struct request_queue *q,
+ struct nvme_ns *ns,
+ struct nvme_nvm_command *vcmd,
+ void __user *ubuf, unsigned int bufflen,
+ void __user *meta_buf, unsigned int meta_len,
+ void __user *ppa_buf, unsigned int ppa_len,
+ u32 *result, u64 *status, unsigned int timeout)
+{
+ bool write = nvme_is_write((struct nvme_command *)vcmd);
+ struct nvm_dev *dev = ns->ndev;
+ struct gendisk *disk = ns->disk;
+ struct request *rq;
+ struct bio *bio = NULL;
+ __le64 *ppa_list = NULL;
+ dma_addr_t ppa_dma;
+ __le64 *metadata = NULL;
+ dma_addr_t metadata_dma;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int ret;
+
+ rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
+ NVME_QID_ANY);
+ if (IS_ERR(rq)) {
+ ret = -ENOMEM;
+ goto err_cmd;
+ }
+
+ rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
+ rq->end_io_data = &wait;
+
+ if (ppa_buf && ppa_len) {
+ ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
+ if (!ppa_list) {
+ ret = -ENOMEM;
+ goto err_rq;
+ }
+ if (copy_from_user(ppa_list, (void __user *)ppa_buf,
+ sizeof(u64) * (ppa_len + 1))) {
+ ret = -EFAULT;
+ goto err_ppa;
+ }
+ vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
+ } else {
+ vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
+ }
+
+ if (ubuf && bufflen) {
+ ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
+ if (ret)
+ goto err_ppa;
+ bio = rq->bio;
+
+ if (meta_buf && meta_len) {
+ metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
+ &metadata_dma);
+ if (!metadata) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+
+ if (write) {
+ if (copy_from_user(metadata,
+ (void __user *)meta_buf,
+ meta_len)) {
+ ret = -EFAULT;
+ goto err_meta;
+ }
+ }
+ vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
+ }
+
+ if (!disk)
+ goto submit;
+
+ bio->bi_bdev = bdget_disk(disk, 0);
+ if (!bio->bi_bdev) {
+ ret = -ENODEV;
+ goto err_meta;
+ }
+ }
+
+submit:
+ blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
+
+ wait_for_completion_io(&wait);
+
+ ret = nvme_error_status(rq->errors);
+ if (result)
+ *result = rq->errors & 0x7ff;
+ if (status)
+ *status = le64_to_cpu(nvme_req(rq)->result.u64);
+
+ if (metadata && !ret && !write) {
+ if (copy_to_user(meta_buf, (void *)metadata, meta_len))
+ ret = -EFAULT;
+ }
+err_meta:
+ if (meta_buf && meta_len)
+ dma_pool_free(dev->dma_pool, metadata, metadata_dma);
+err_map:
+ if (bio) {
+ if (disk && bio->bi_bdev)
+ bdput(bio->bi_bdev);
+ blk_rq_unmap_user(bio);
+ }
+err_ppa:
+ if (ppa_buf && ppa_len)
+ dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
+err_rq:
+ blk_mq_free_request(rq);
+err_cmd:
+ return ret;
+}
+
+static int nvme_nvm_submit_vio(struct nvme_ns *ns,
+ struct nvm_user_vio __user *uvio)
+{
+ struct nvm_user_vio vio;
+ struct nvme_nvm_command c;
+ unsigned int length;
+ int ret;
+
+ if (copy_from_user(&vio, uvio, sizeof(vio)))
+ return -EFAULT;
+ if (vio.flags)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.ph_rw.opcode = vio.opcode;
+ c.ph_rw.nsid = cpu_to_le32(ns->ns_id);
+ c.ph_rw.control = cpu_to_le16(vio.control);
+ c.ph_rw.length = cpu_to_le16(vio.nppas);
+
+ length = (vio.nppas + 1) << ns->lba_shift;
+
+ ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
+ (void __user *)(uintptr_t)vio.addr, length,
+ (void __user *)(uintptr_t)vio.metadata,
+ vio.metadata_len,
+ (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
+ &vio.result, &vio.status, 0);
+
+ if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
+ return -EFAULT;
+
+ return ret;
+}
+
+static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
+ struct nvm_passthru_vio __user *uvcmd)
+{
+ struct nvm_passthru_vio vcmd;
+ struct nvme_nvm_command c;
+ struct request_queue *q;
+ unsigned int timeout = 0;
+ int ret;
+
+ if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
+ return -EFAULT;
+ if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
+ return -EACCES;
+ if (vcmd.flags)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = vcmd.opcode;
+ c.common.nsid = cpu_to_le32(ns->ns_id);
+ c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
+ /* cdw11-12 */
+ c.ph_rw.length = cpu_to_le16(vcmd.nppas);
+ c.ph_rw.control = cpu_to_le32(vcmd.control);
+ c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
+ c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
+ c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
+
+ if (vcmd.timeout_ms)
+ timeout = msecs_to_jiffies(vcmd.timeout_ms);
+
+ q = admin ? ns->ctrl->admin_q : ns->queue;
+
+ ret = nvme_nvm_submit_user_cmd(q, ns,
+ (struct nvme_nvm_command *)&c,
+ (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
+ (void __user *)(uintptr_t)vcmd.metadata,
+ vcmd.metadata_len,
+ (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
+ &vcmd.result, &vcmd.status, timeout);
+
+ if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
+ return -EFAULT;
+
+ return ret;
+}
+
+int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case NVME_NVM_IOCTL_ADMIN_VIO:
+ return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
+ case NVME_NVM_IOCTL_IO_VIO:
+ return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
+ case NVME_NVM_IOCTL_SUBMIT_VIO:
+ return nvme_nvm_submit_vio(ns, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
{
struct request_queue *q = ns->queue;
@@ -622,7 +840,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
return 0;
id = &ndev->identity;
- grp = &id->groups[0];
+ grp = &id->grp;
attr = &dattr->attr;
if (strcmp(attr->name, "version") == 0) {
@@ -633,10 +851,9 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
} else if (strcmp(attr->name, "device_mode") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+ /* kept for compatibility */
} else if (strcmp(attr->name, "media_manager") == 0) {
- if (!ndev->mt)
- return scnprintf(page, PAGE_SIZE, "%s\n", "none");
- return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
+ return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
} else if (strcmp(attr->name, "ppa_format") == 0) {
return scnprintf(page, PAGE_SIZE,
"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index aead6d08ed2c..14cfc6f7facb 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -19,6 +19,7 @@
#include <linux/kref.h>
#include <linux/blk-mq.h>
#include <linux/lightnvm.h>
+#include <linux/sed-opal.h>
enum {
/*
@@ -125,6 +126,8 @@ struct nvme_ctrl {
struct list_head node;
struct ida ns_ida;
+ struct opal_dev *opal_dev;
+
char name[12];
char serial[20];
char model[40];
@@ -137,6 +140,7 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u16 oncs;
u16 vid;
+ u16 oacs;
atomic_t abort_limit;
u8 event_limit;
u8 vwc;
@@ -267,6 +271,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
+ bool send);
+
#define NVME_NR_AERS 1
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
union nvme_result *res);
@@ -318,6 +325,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
int nvme_nvm_register_sysfs(struct nvme_ns *ns);
void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
+int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
#else
static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
int node)
@@ -335,6 +343,11 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i
{
return 0;
}
+static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
#endif /* CONFIG_NVM */
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3faefabf339c..ddc51adb594d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -43,6 +43,7 @@
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/unaligned.h>
+#include <linux/sed-opal.h>
#include "nvme.h"
@@ -588,7 +589,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
*/
if (ns && ns->ms && !blk_integrity_rq(req)) {
if (!(ns->pi_type && ns->ms == 8) &&
- req->cmd_type != REQ_TYPE_DRV_PRIV) {
+ !blk_rq_is_passthrough(req)) {
blk_mq_end_request(req, -EFAULT);
return BLK_MQ_RQ_QUEUE_OK;
}
@@ -645,7 +646,7 @@ static void nvme_complete_rq(struct request *req)
return;
}
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(req))
error = req->errors;
else
error = nvme_error_status(req->errors);
@@ -895,12 +896,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
return BLK_EH_HANDLED;
}
- iod->aborted = 1;
-
if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
+ iod->aborted = 1;
memset(&cmd, 0, sizeof(cmd));
cmd.abort.opcode = nvme_admin_abort_cmd;
@@ -1178,6 +1178,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
+ dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
if (blk_mq_alloc_tag_set(&dev->admin_tagset))
@@ -1738,6 +1739,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
+ kfree(dev->ctrl.opal_dev);
kfree(dev);
}
@@ -1754,6 +1756,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
static void nvme_reset_work(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+ bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
@@ -1786,6 +1789,14 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out;
+ if ((dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) && !dev->ctrl.opal_dev) {
+ dev->ctrl.opal_dev =
+ init_opal_dev(&dev->ctrl, &nvme_sec_submit);
+ }
+
+ if (was_suspend)
+ opal_unlock_from_suspend(dev->ctrl.opal_dev);
+
result = nvme_setup_io_queues(dev);
if (result)
goto out;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 557f29b1f1bb..a75e95d42b3f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1423,7 +1423,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
struct nvme_command *cmd = nvme_req(rq)->cmd;
- if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+ if (!blk_rq_is_passthrough(rq) ||
cmd->common.opcode != nvme_fabrics_command ||
cmd->fabrics.fctype != nvme_fabrics_type_connect)
return false;
@@ -1471,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
ib_dma_sync_single_for_device(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
- if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
+ if (req_op(rq) == REQ_OP_FLUSH)
flush = true;
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
@@ -1522,7 +1522,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
return;
}
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(rq))
error = rq->errors;
else
error = nvme_error_status(rq->errors);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index a5c09e703bd8..f49ae2758bb7 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -43,6 +43,7 @@
#include <asm/unaligned.h>
#include <scsi/sg.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_request.h>
#include "nvme.h"
@@ -2347,12 +2348,14 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
{
- u8 cmd[BLK_MAX_CDB];
+ u8 cmd[16];
int retcode;
unsigned int opcode;
if (hdr->cmdp == NULL)
return -EMSGSIZE;
+ if (hdr->cmd_len > sizeof(cmd))
+ return -EINVAL;
if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
@@ -2451,8 +2454,6 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return -EFAULT;
if (hdr.interface_id != 'S')
return -EINVAL;
- if (hdr.cmd_len > BLK_MAX_CDB)
- return -EINVAL;
/*
* A positive return code means a NVMe status, which has been
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 6f5074153dcd..be8c800078e2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
{
struct nvmet_subsys *subsys = to_subsys(item);
+ nvmet_subsys_del_ctrls(subsys);
nvmet_subsys_put(subsys);
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b1d66ed655c9..fc5ba2f9e15f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
ctrl->cntlid, ctrl->kato);
- ctrl->ops->delete_ctrl(ctrl);
+ nvmet_ctrl_fatal_error(ctrl);
}
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ flush_work(&ctrl->async_event_work);
+ cancel_work_sync(&ctrl->fatal_err_work);
+
ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
nvmet_subsys_put(subsys);
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
kfree(subsys);
}
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ ctrl->ops->delete_ctrl(ctrl);
+ mutex_unlock(&subsys->lock);
+}
+
void nvmet_subsys_put(struct nvmet_subsys *subsys)
{
kref_put(&subsys->ref, nvmet_subsys_free);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 173e842f19c9..ba57f9852bde 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
struct fcnvme_ls_disconnect_acc *acc =
(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
- struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_tgt_queue *queue = NULL;
struct nvmet_fc_tgt_assoc *assoc;
int ret = 0;
bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc;
- if (!assoc)
+ if (assoc) {
+ if (rqst->discon_cmd.scope ==
+ FCNVME_DISCONN_CONNECTION) {
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(
+ rqst->discon_cmd.id));
+ if (!queue) {
+ nvmet_fc_tgt_a_put(assoc);
+ ret = VERR_NO_CONN;
+ }
+ }
+ } else
ret = VERR_NO_ASSOC;
}
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
FCNVME_LS_DISCONNECT);
- if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
- queue = nvmet_fc_find_target_queue(tgtport,
- be64_to_cpu(rqst->discon_cmd.id));
- if (queue) {
- int qid = queue->qid;
+ /* are we to delete a Connection ID (queue) */
+ if (queue) {
+ int qid = queue->qid;
- nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_delete_target_queue(queue);
- /* release the get taken by find_target_queue */
- nvmet_fc_tgt_q_put(queue);
+ /* release the get taken by find_target_queue */
+ nvmet_fc_tgt_q_put(queue);
- /* tear association down if io queue terminated */
- if (!qid)
- del_assoc = true;
- }
+ /* tear association down if io queue terminated */
+ if (!qid)
+ del_assoc = true;
}
/* release get taken in nvmet_fc_find_target_assoc */
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9aaa70071ae5..f3862e38f574 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -104,7 +104,7 @@ static void nvme_loop_complete_rq(struct request *req)
return;
}
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ if (blk_rq_is_passthrough(req))
error = req->errors;
else
error = nvme_error_status(req->errors);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 23d5eb1c944f..cc7ad06b43a7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type);
void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
void nvmet_put_namespace(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a78ac0..60990220bd83 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
{
struct ib_recv_wr *bad_wr;
+ ib_dma_sync_single_for_device(ndev->device,
+ cmd->sge[0].addr, cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+
if (ndev->srq)
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
first_wr = &rsp->send_wr;
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+ ib_dma_sync_single_for_device(rsp->queue->dev->device,
+ rsp->send_sge.addr, rsp->send_sge.length,
+ DMA_TO_DEVICE);
+
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->n_rdma = 0;
cmd->req.port = queue->port;
+
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->send_sge.addr, cmd->send_sge.length,
+ DMA_TO_DEVICE);
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d4bea3c797d6..84cf2f3f396c 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2112,7 +2112,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
continue;
/* Allocate an alias_prop with enough space for the stem */
- ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+ ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
if (!ap)
continue;
memset(ap, 0, sizeof(*ap) + len + 1);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c9b5cac03b36..82967b07f7be 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -738,9 +738,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
const char *pathp;
int offset, rc = 0, depth = -1;
- for (offset = fdt_next_node(blob, -1, &depth);
- offset >= 0 && depth >= 0 && !rc;
- offset = fdt_next_node(blob, offset, &depth)) {
+ if (!blob)
+ return 0;
+
+ for (offset = fdt_next_node(blob, -1, &depth);
+ offset >= 0 && depth >= 0 && !rc;
+ offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
if (*pathp == '/')
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 262281bd68fa..0b2979816dbf 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -147,6 +147,7 @@ EXPORT_SYMBOL(of_mdio_parse_addr);
*/
static const struct of_device_id whitelist_phys[] = {
{ .compatible = "brcm,40nm-ephy" },
+ { .compatible = "broadcom,bcm5241" },
{ .compatible = "marvell,88E1111", },
{ .compatible = "marvell,88e1116", },
{ .compatible = "marvell,88e1118", },
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index dd6d4ccb41e4..3858b87fd0bb 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
- printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+ pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
{
int f = 0;
printmode(PCSPP);
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
// printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 1f38d0836751..f1b633bce525 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
xgene_msi_hwirq_alloc, NULL);
- if (rc)
+ if (rc < 0)
goto err_cpuhp;
pci_xgene_online = rc;
rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index bed19994c1e9..af8f6e92e885 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
- /* get iATU unroll support */
- pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
- dev_dbg(pp->dev, "iATU unroll: %s\n",
- pp->iatu_unroll_enabled ? "enabled" : "disabled");
-
/* set the number of lanes */
val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
+ /* get iATU unroll support */
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+ dev_dbg(pp->dev, "iATU unroll: %s\n",
+ pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 10c9c0ba8ff2..ec0b4c11ccd9 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
@@ -99,7 +98,6 @@ static int board_added(struct slot *p_slot)
pciehp_green_led_blink(p_slot);
/* Check link training status */
- pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
@@ -120,14 +118,12 @@ static int board_added(struct slot *p_slot)
if (retval != -EEXIST)
goto err_exit;
}
- pm_runtime_put(&ctrl->pcie->port->dev);
pciehp_green_led_on(p_slot);
pciehp_set_attention_status(p_slot, 0);
return 0;
err_exit:
- pm_runtime_put(&ctrl->pcie->port->dev);
set_slot_off(ctrl, p_slot);
return retval;
}
@@ -141,9 +137,7 @@ static int remove_board(struct slot *p_slot)
int retval;
struct controller *ctrl = p_slot->ctrl;
- pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_unconfigure_device(p_slot);
- pm_runtime_put(&ctrl->pcie->port->dev);
if (retval)
return retval;
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 56efaf72d08e..d2961ef39a3a 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -155,7 +155,7 @@ static void pnv_php_detach_device_nodes(struct device_node *parent)
pnv_php_detach_device_nodes(dn);
of_node_put(dn);
- refcount = atomic_read(&dn->kobj.kref.refcount);
+ refcount = kref_read(&dn->kobj.kref);
if (refcount != 1)
pr_warn("Invalid refcount %d on <%s>\n",
refcount, of_node_full_name(dn));
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 50c5003295ca..7f73bacf13ed 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1206,6 +1206,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
affd = &msi_default_affd;
+
+ if (affd->pre_vectors + affd->post_vectors > min_vecs)
+ return -EINVAL;
+
+ /*
+ * If there aren't any vectors left after applying the pre/post
+ * vectors don't bother with assigning affinity.
+ */
+ if (affd->pre_vectors + affd->post_vectors == min_vecs)
+ affd = NULL;
} else {
if (WARN_ON(affd))
affd = NULL;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1ccce1cd6aca..63d8e18fb6b1 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1432,6 +1432,11 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int pci_bus_num_vf(struct device *dev)
+{
+ return pci_num_vf(to_pci_dev(dev));
+}
+
struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
@@ -1443,6 +1448,7 @@ struct bus_type pci_bus_type = {
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
+ .num_vf = pci_bus_num_vf,
};
EXPORT_SYMBOL(pci_bus_type);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a881c0d3d2e8..7904d02ffdb9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2241,10 +2241,13 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * Hotplug ports handled by firmware in System Management Mode
+ * Hotplug interrupts cannot be delivered if the link is down,
+ * so parents of a hotplug port must stay awake. In addition,
+ * hotplug ports handled by firmware in System Management Mode
* may not be put into D3 by the OS (Thunderbolt on non-Macs).
+ * For simplicity, disallow in general for now.
*/
- if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+ if (bridge->is_hotplug_bridge)
return false;
if (pci_bridge_d3_force)
@@ -2276,10 +2279,7 @@ static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
!pci_pme_capable(dev, PCI_D3cold)) ||
/* If it is a bridge it must be allowed to go to D3. */
- !pci_power_manageable(dev) ||
-
- /* Hotplug interrupts cannot be delivered if the link is down. */
- dev->is_hotplug_bridge)
+ !pci_power_manageable(dev))
*d3cold_ok = false;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 17ac1dce3286..3dd8bcbb3011 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return NULL;
+
INIT_LIST_HEAD(&link->sibling);
INIT_LIST_HEAD(&link->children);
INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
+
+ /*
+ * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
+ * hierarchies.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+ link->root = link;
+ } else {
struct pcie_link_state *parent;
+
parent = pdev->bus->parent->self->link_state;
if (!parent) {
kfree(link);
return NULL;
}
+
link->parent = parent;
+ link->root = link->parent->root;
list_add(&link->link, &parent->children);
}
- /* Setup a pointer to the root port link */
- if (!link->parent)
- link->root = link;
- else
- link->root = link->parent->root;
list_add(&link->sibling, &link_list);
pdev->link_state = link;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 717529331dac..2dd1c68e6de8 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -433,6 +433,17 @@ static int pcie_pme_resume(struct pcie_device *srv)
return 0;
}
+/**
+ * pcie_pme_remove - Prepare PCIe PME service device for removal.
+ * @srv - PCIe service device to remove.
+ */
+static void pcie_pme_remove(struct pcie_device *srv)
+{
+ pcie_pme_suspend(srv);
+ free_irq(srv->irq, srv);
+ kfree(get_service_data(srv));
+}
+
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -441,6 +452,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
+ .remove = pcie_pme_remove,
};
/**
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c9f0f0..204960e70333 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
return;
+
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
/*
- * A Root Port is always the upstream end of a Link. No PCIe
- * component has two Links. Two Links are connected by a Switch
- * that has a Port on each Link and internal logic to connect the
- * two Ports.
+ * A Root Port or a PCI-to-PCIe bridge is always the upstream end
+ * of a Link. No PCIe component has two Links. Two Links are
+ * connected by a Switch that has a Port on each Link and internal
+ * logic to connect the two Ports.
*/
type = pci_pcie_type(pdev);
- if (type == PCI_EXP_TYPE_ROOT_PORT)
+ if (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE)
pdev->has_secondary_link = 1;
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 429d34c348b9..e42909524dee 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -345,7 +345,7 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
void pci_destroy_slot(struct pci_slot *slot)
{
dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
- slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
+ slot->number, kref_read(&slot->kobj.kref) - 1);
mutex_lock(&pci_slot_mutex);
kobject_put(&slot->kobj);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 54044a8ecbd7..8f8c2af45781 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -8,9 +8,16 @@ config PINCTRL
menu "Pin controllers"
depends on PINCTRL
+config GENERIC_PINCTRL_GROUPS
+ bool
+
config PINMUX
bool "Support pin multiplexing controllers" if COMPILE_TEST
+config GENERIC_PINMUX_FUNCTIONS
+ bool
+ select PINMUX
+
config PINCONF
bool "Support pin configuration controllers" if COMPILE_TEST
@@ -159,8 +166,8 @@ config PINCTRL_ROCKCHIP
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
- select PINMUX
- select PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
help
This selects the device tree based generic pinctrl driver.
@@ -293,6 +300,7 @@ source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/stm32/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
source "drivers/pinctrl/tegra/Kconfig"
+source "drivers/pinctrl/ti/Kconfig"
source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 25d50a86981d..a251f439626f 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
+obj-y += ti/
obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-$(CONFIG_PINCTRL_MTK) += mediatek/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index a21b071ff290..7de596e2b9d4 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -43,9 +43,18 @@
* Not all pins have their signals defined (yet).
*/
+#define D6 0
+SSSF_PIN_DECL(D6, GPIOA0, MAC1LINK, SIG_DESC_SET(SCU80, 0));
+
+#define B5 1
+SSSF_PIN_DECL(B5, GPIOA1, MAC2LINK, SIG_DESC_SET(SCU80, 1));
+
#define A4 2
SSSF_PIN_DECL(A4, GPIOA2, TIMER3, SIG_DESC_SET(SCU80, 2));
+#define E6 3
+SSSF_PIN_DECL(E6, GPIOA3, TIMER4, SIG_DESC_SET(SCU80, 3));
+
#define I2C9_DESC SIG_DESC_SET(SCU90, 22)
#define C5 4
@@ -80,6 +89,26 @@ MS_PIN_DECL(D5, GPIOA7, MDIO2, TIMER8);
FUNC_GROUP_DECL(TIMER8, D5);
FUNC_GROUP_DECL(MDIO2, A3, D5);
+#define J21 8
+SSSF_PIN_DECL(J21, GPIOB0, SALT1, SIG_DESC_SET(SCU80, 8));
+
+#define J20 9
+SSSF_PIN_DECL(J20, GPIOB1, SALT2, SIG_DESC_SET(SCU80, 9));
+
+#define H18 10
+SSSF_PIN_DECL(H18, GPIOB2, SALT3, SIG_DESC_SET(SCU80, 10));
+
+#define F18 11
+SSSF_PIN_DECL(F18, GPIOB3, SALT4, SIG_DESC_SET(SCU80, 11));
+
+#define E19 12
+SIG_EXPR_DECL(LPCRST, LPCRST, SIG_DESC_SET(SCU80, 12));
+SIG_EXPR_DECL(LPCRST, LPCRSTS, SIG_DESC_SET(HW_STRAP1, 14));
+SIG_EXPR_LIST_DECL_DUAL(LPCRST, LPCRST, LPCRSTS);
+SS_PIN_DECL(E19, GPIOB4, LPCRST);
+
+FUNC_GROUP_DECL(LPCRST, E19);
+
#define H19 13
#define H19_DESC SIG_DESC_SET(SCU80, 13)
SIG_EXPR_LIST_DECL_SINGLE(LPCPD, LPCPD, H19_DESC);
@@ -92,6 +121,19 @@ FUNC_GROUP_DECL(LPCSMI, H19);
#define H20 14
SSSF_PIN_DECL(H20, GPIOB6, LPCPME, SIG_DESC_SET(SCU80, 14));
+#define E18 15
+SIG_EXPR_LIST_DECL_SINGLE(EXTRST, EXTRST,
+ SIG_DESC_SET(SCU80, 15),
+ SIG_DESC_BIT(SCU90, 31, 0),
+ SIG_DESC_SET(SCU3C, 3));
+SIG_EXPR_LIST_DECL_SINGLE(SPICS1, SPICS1,
+ SIG_DESC_SET(SCU80, 15),
+ SIG_DESC_SET(SCU90, 31));
+MS_PIN_DECL(E18, GPIOB7, EXTRST, SPICS1);
+
+FUNC_GROUP_DECL(EXTRST, E18);
+FUNC_GROUP_DECL(SPICS1, E18);
+
#define SD1_DESC SIG_DESC_SET(SCU90, 0)
#define I2C10_DESC SIG_DESC_SET(SCU90, 23)
@@ -170,6 +212,62 @@ MS_PIN_DECL(D16, GPIOD1, SD2CMD, GPID0OUT);
FUNC_GROUP_DECL(GPID0, A18, D16);
+#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
+
+#define B17 26
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
+SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
+MS_PIN_DECL(B17, GPIOD2, SD2DAT0, GPID2IN);
+
+#define A17 27
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
+SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
+MS_PIN_DECL(A17, GPIOD3, SD2DAT1, GPID2OUT);
+
+FUNC_GROUP_DECL(GPID2, B17, A17);
+
+#define GPID4_DESC SIG_DESC_SET(SCU8C, 10)
+
+#define C16 28
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT2, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4IN, GPID4, GPID);
+MS_PIN_DECL(C16, GPIOD4, SD2DAT2, GPID4IN);
+
+#define B16 29
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT3, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4OUT, GPID4, GPID);
+MS_PIN_DECL(B16, GPIOD5, SD2DAT3, GPID4OUT);
+
+FUNC_GROUP_DECL(GPID4, C16, B16);
+
+#define GPID6_DESC SIG_DESC_SET(SCU8C, 11)
+
+#define A16 30
+SIG_EXPR_LIST_DECL_SINGLE(SD2CD, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6IN, GPID6, GPID);
+MS_PIN_DECL(A16, GPIOD6, SD2CD, GPID6IN);
+
+#define E15 31
+SIG_EXPR_LIST_DECL_SINGLE(SD2WP, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6OUT, GPID6, GPID);
+MS_PIN_DECL(E15, GPIOD7, SD2WP, GPID6OUT);
+
+FUNC_GROUP_DECL(GPID6, A16, E15);
+FUNC_GROUP_DECL(SD2, A18, D16, B17, A17, C16, B16, A16, E15);
+FUNC_GROUP_DECL(GPID, A18, D16, B17, A17, C16, B16, A16, E15);
+
#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 22)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
#define GPIE2_DESC SIG_DESC_SET(SCU8C, 13)
@@ -266,6 +364,15 @@ MS_PIN_DECL(B19, GPIOF1, NDCD4, SIOPBI);
FUNC_GROUP_DECL(NDCD4, B19);
FUNC_GROUP_DECL(SIOPBI, B19);
+#define A20 42
+SIG_EXPR_LIST_DECL_SINGLE(NDSR4, NDSR4, SIG_DESC_SET(SCU80, 26));
+SIG_EXPR_DECL(SIOPWRGD, SIOPWRGD, SIG_DESC_SET(SCUA4, 12));
+SIG_EXPR_DECL(SIOPWRGD, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWRGD, SIOPWRGD, ACPI);
+MS_PIN_DECL(A20, GPIOF2, NDSR4, SIOPWRGD);
+FUNC_GROUP_DECL(NDSR4, A20);
+FUNC_GROUP_DECL(SIOPWRGD, A20);
+
#define D17 43
SIG_EXPR_LIST_DECL_SINGLE(NRI4, NRI4, SIG_DESC_SET(SCU80, 27));
SIG_EXPR_DECL(SIOPBO, SIOPBO, SIG_DESC_SET(SCUA4, 14));
@@ -275,7 +382,17 @@ MS_PIN_DECL(D17, GPIOF3, NRI4, SIOPBO);
FUNC_GROUP_DECL(NRI4, D17);
FUNC_GROUP_DECL(SIOPBO, D17);
-FUNC_GROUP_DECL(ACPI, B19, D17);
+#define B18 44
+SSSF_PIN_DECL(B18, GPIOF4, NDTR4, SIG_DESC_SET(SCU80, 28));
+
+#define A19 45
+SIG_EXPR_LIST_DECL_SINGLE(NDTS4, NDTS4, SIG_DESC_SET(SCU80, 29));
+SIG_EXPR_DECL(SIOSCI, SIOSCI, SIG_DESC_SET(SCUA4, 15));
+SIG_EXPR_DECL(SIOSCI, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOSCI, SIOSCI, ACPI);
+MS_PIN_DECL(A19, GPIOF5, NDTS4, SIOSCI);
+FUNC_GROUP_DECL(NDTS4, A19);
+FUNC_GROUP_DECL(SIOSCI, A19);
#define E16 46
SSSF_PIN_DECL(E16, GPIOF6, TXD4, SIG_DESC_SET(SCU80, 30));
@@ -283,6 +400,34 @@ SSSF_PIN_DECL(E16, GPIOF6, TXD4, SIG_DESC_SET(SCU80, 30));
#define C17 47
SSSF_PIN_DECL(C17, GPIOF7, RXD4, SIG_DESC_SET(SCU80, 31));
+#define A14 48
+SSSF_PIN_DECL(A14, GPIOG0, SGPSCK, SIG_DESC_SET(SCU84, 0));
+
+#define E13 49
+SSSF_PIN_DECL(E13, GPIOG1, SGPSLD, SIG_DESC_SET(SCU84, 1));
+
+#define D13 50
+SSSF_PIN_DECL(D13, GPIOG2, SGPSI0, SIG_DESC_SET(SCU84, 2));
+
+#define C13 51
+SSSF_PIN_DECL(C13, GPIOG3, SGPSI1, SIG_DESC_SET(SCU84, 3));
+
+#define B13 52
+SIG_EXPR_LIST_DECL_SINGLE(OSCCLK, OSCCLK, SIG_DESC_SET(SCU2C, 1));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST1, WDTRST1, SIG_DESC_SET(SCU84, 4));
+MS_PIN_DECL(B13, GPIOG4, OSCCLK, WDTRST1);
+
+FUNC_GROUP_DECL(OSCCLK, B13);
+FUNC_GROUP_DECL(WDTRST1, B13);
+
+#define Y21 53
+SIG_EXPR_LIST_DECL_SINGLE(USBCKI, USBCKI, SIG_DESC_SET(HW_STRAP1, 23));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST2, WDTRST2, SIG_DESC_SET(SCU84, 5));
+MS_PIN_DECL(Y21, GPIOG5, USBCKI, WDTRST2);
+
+FUNC_GROUP_DECL(USBCKI, Y21);
+FUNC_GROUP_DECL(WDTRST2, Y21);
+
#define AA22 54
SSSF_PIN_DECL(AA22, GPIOG6, FLBUSY, SIG_DESC_SET(SCU84, 6));
@@ -292,7 +437,7 @@ SSSF_PIN_DECL(U18, GPIOG7, FLWP, SIG_DESC_SET(SCU84, 7));
#define UART6_DESC SIG_DESC_SET(SCU90, 7)
#define ROM16_DESC SIG_DESC_SET(SCU90, 6)
#define FLASH_WIDE SIG_DESC_SET(HW_STRAP1, 4)
-#define BOOT_SRC_NOR { HW_STRAP1, GENMASK(1, 0), 0, 0 }
+#define BOOT_SRC_NOR { ASPEED_IP_SCU, HW_STRAP1, GENMASK(1, 0), 0, 0 }
#define A8 56
SIG_EXPR_DECL(ROMD8, ROM16, ROM16_DESC);
@@ -352,6 +497,93 @@ MS_PIN_DECL(E7, GPIOH7, ROMD15, RXD6);
FUNC_GROUP_DECL(UART6, A8, C7, B7, A7, D7, B6, A6, E7);
+#define SPI1_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
+#define C22 64
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(C22, GPIOI0, SYSCS);
+
+#define G18 65
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(G18, GPIOI1, SYSCK);
+
+#define D19 66
+SIG_EXPR_DECL(SYSDO, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSDO, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSDO, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(D19, GPIOI2, SYSDO);
+
+#define C20 67
+SIG_EXPR_DECL(SYSDI, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSDI, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSDI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(C20, GPIOI3, SYSDI);
+
+#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B22 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+ SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(B22, GPIOI4, SPI1CS0, VBCS);
+
+#define G19 69
+SIG_EXPR_DECL(SPI1CK, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+ SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(G19, GPIOI5, SPI1CK, VBCK);
+
+#define C18 70
+SIG_EXPR_DECL(SPI1DO, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1DO, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1DO, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1DO, SIG_EXPR_PTR(SPI1DO, SPI1),
+ SIG_EXPR_PTR(SPI1DO, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1DO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBDO, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(C18, GPIOI6, SPI1DO, VBDO);
+
+#define E20 71
+SIG_EXPR_DECL(SPI1DI, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1DI, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1DI, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1DI, SIG_EXPR_PTR(SPI1DI, SPI1),
+ SIG_EXPR_PTR(SPI1DI, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1DI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBDI, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(E20, GPIOI7, SPI1DI, VBDI);
+
+FUNC_GROUP_DECL(SPI1, B22, G19, C18, E20);
+FUNC_GROUP_DECL(SPI1DEBUG, C22, G18, D19, C20, B22, G19, C18, E20);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C22, G18, D19, C20, B22, G19, C18, E20);
+FUNC_GROUP_DECL(VGABIOS_ROM, B22, G19, C18, E20);
+
+#define J5 72
+SSSF_PIN_DECL(J5, GPIOJ0, SGPMCK, SIG_DESC_SET(SCU84, 8));
+
+#define J4 73
+SSSF_PIN_DECL(J4, GPIOJ1, SGPMLD, SIG_DESC_SET(SCU84, 9));
+
+#define K5 74
+SSSF_PIN_DECL(K5, GPIOJ2, SGPMO, SIG_DESC_SET(SCU84, 10));
+
#define J3 75
SSSF_PIN_DECL(J3, GPIOJ3, SGPMI, SIG_DESC_SET(SCU84, 11));
@@ -418,9 +650,9 @@ FUNC_GROUP_DECL(I2C8, G5, F3);
#define U1 88
SSSF_PIN_DECL(U1, GPIOL0, NCTS1, SIG_DESC_SET(SCU84, 16));
-#define VPI18_DESC { SCU90, GENMASK(5, 4), 1, 0 }
-#define VPI24_DESC { SCU90, GENMASK(5, 4), 2, 0 }
-#define VPI30_DESC { SCU90, GENMASK(5, 4), 3, 0 }
+#define VPI18_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 1, 0 }
+#define VPI24_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 2, 0 }
+#define VPI30_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 3, 0 }
#define T5 89
#define T5_DESC SIG_DESC_SET(SCU84, 17)
@@ -496,6 +728,102 @@ SIG_EXPR_LIST_DECL_SINGLE(RXD1, RXD1, U5_DESC);
MS_PIN_DECL(U5, GPIOL7, VPIB1, RXD1);
FUNC_GROUP_DECL(RXD1, U5);
+#define V3 96
+#define V3_DESC SIG_DESC_SET(SCU84, 24)
+SIG_EXPR_DECL(VPIOB2, VPI18, VPI18_DESC, V3_DESC);
+SIG_EXPR_DECL(VPIOB2, VPI24, VPI24_DESC, V3_DESC);
+SIG_EXPR_DECL(VPIOB2, VPI30, VPI30_DESC, V3_DESC);
+SIG_EXPR_LIST_DECL(VPIOB2, SIG_EXPR_PTR(VPIOB2, VPI18),
+ SIG_EXPR_PTR(VPIOB2, VPI24),
+ SIG_EXPR_PTR(VPIOB2, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NCTS2, NCTS2, V3_DESC);
+MS_PIN_DECL(V3, GPIOM0, VPIOB2, NCTS2);
+FUNC_GROUP_DECL(NCTS2, V3);
+
+#define W2 97
+#define W2_DESC SIG_DESC_SET(SCU84, 25)
+SIG_EXPR_DECL(VPIOB3, VPI18, VPI18_DESC, W2_DESC);
+SIG_EXPR_DECL(VPIOB3, VPI24, VPI24_DESC, W2_DESC);
+SIG_EXPR_DECL(VPIOB3, VPI30, VPI30_DESC, W2_DESC);
+SIG_EXPR_LIST_DECL(VPIOB3, SIG_EXPR_PTR(VPIOB3, VPI18),
+ SIG_EXPR_PTR(VPIOB3, VPI24),
+ SIG_EXPR_PTR(VPIOB3, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDCD2, NDCD2, W2_DESC);
+MS_PIN_DECL(W2, GPIOM1, VPIOB3, NDCD2);
+FUNC_GROUP_DECL(NDCD2, W2);
+
+#define Y1 98
+#define Y1_DESC SIG_DESC_SET(SCU84, 26)
+SIG_EXPR_DECL(VPIOB4, VPI18, VPI18_DESC, Y1_DESC);
+SIG_EXPR_DECL(VPIOB4, VPI24, VPI24_DESC, Y1_DESC);
+SIG_EXPR_DECL(VPIOB4, VPI30, VPI30_DESC, Y1_DESC);
+SIG_EXPR_LIST_DECL(VPIOB4, SIG_EXPR_PTR(VPIOB4, VPI18),
+ SIG_EXPR_PTR(VPIOB4, VPI24),
+ SIG_EXPR_PTR(VPIOB4, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDSR2, NDSR2, Y1_DESC);
+MS_PIN_DECL(Y1, GPIOM2, VPIOB4, NDSR2);
+FUNC_GROUP_DECL(NDSR2, Y1);
+
+#define V4 99
+#define V4_DESC SIG_DESC_SET(SCU84, 27)
+SIG_EXPR_DECL(VPIOB5, VPI18, VPI18_DESC, V4_DESC);
+SIG_EXPR_DECL(VPIOB5, VPI24, VPI24_DESC, V4_DESC);
+SIG_EXPR_DECL(VPIOB5, VPI30, VPI30_DESC, V4_DESC);
+SIG_EXPR_LIST_DECL(VPIOB5, SIG_EXPR_PTR(VPIOB5, VPI18),
+ SIG_EXPR_PTR(VPIOB5, VPI24),
+ SIG_EXPR_PTR(VPIOB5, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NRI2, NRI2, V4_DESC);
+MS_PIN_DECL(V4, GPIOM3, VPIOB5, NRI2);
+FUNC_GROUP_DECL(NRI2, V4);
+
+#define W3 100
+#define W3_DESC SIG_DESC_SET(SCU84, 28)
+SIG_EXPR_DECL(VPIOB6, VPI18, VPI18_DESC, W3_DESC);
+SIG_EXPR_DECL(VPIOB6, VPI24, VPI24_DESC, W3_DESC);
+SIG_EXPR_DECL(VPIOB6, VPI30, VPI30_DESC, W3_DESC);
+SIG_EXPR_LIST_DECL(VPIOB6, SIG_EXPR_PTR(VPIOB6, VPI18),
+ SIG_EXPR_PTR(VPIOB6, VPI24),
+ SIG_EXPR_PTR(VPIOB6, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDTR2, NDTR2, W3_DESC);
+MS_PIN_DECL(W3, GPIOM4, VPIOB6, NDTR2);
+FUNC_GROUP_DECL(NDTR2, W3);
+
+#define Y2 101
+#define Y2_DESC SIG_DESC_SET(SCU84, 29)
+SIG_EXPR_DECL(VPIOB7, VPI18, VPI18_DESC, Y2_DESC);
+SIG_EXPR_DECL(VPIOB7, VPI24, VPI24_DESC, Y2_DESC);
+SIG_EXPR_DECL(VPIOB7, VPI30, VPI30_DESC, Y2_DESC);
+SIG_EXPR_LIST_DECL(VPIOB7, SIG_EXPR_PTR(VPIOB7, VPI18),
+ SIG_EXPR_PTR(VPIOB7, VPI24),
+ SIG_EXPR_PTR(VPIOB7, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NRTS2, NRTS2, Y2_DESC);
+MS_PIN_DECL(Y2, GPIOM5, VPIOB7, NRTS2);
+FUNC_GROUP_DECL(NRTS2, Y2);
+
+#define AA1 102
+#define AA1_DESC SIG_DESC_SET(SCU84, 30)
+SIG_EXPR_DECL(VPIOB8, VPI18, VPI18_DESC, AA1_DESC);
+SIG_EXPR_DECL(VPIOB8, VPI24, VPI24_DESC, AA1_DESC);
+SIG_EXPR_DECL(VPIOB8, VPI30, VPI30_DESC, AA1_DESC);
+SIG_EXPR_LIST_DECL(VPIOB8, SIG_EXPR_PTR(VPIOB8, VPI18),
+ SIG_EXPR_PTR(VPIOB8, VPI24),
+ SIG_EXPR_PTR(VPIOB8, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(TXD2, TXD2, AA1_DESC);
+MS_PIN_DECL(AA1, GPIOM6, VPIOB8, TXD2);
+FUNC_GROUP_DECL(TXD2, AA1);
+
+#define V5 103
+#define V5_DESC SIG_DESC_SET(SCU84, 31)
+SIG_EXPR_DECL(VPIOB9, VPI18, VPI18_DESC, V5_DESC);
+SIG_EXPR_DECL(VPIOB9, VPI24, VPI24_DESC, V5_DESC);
+SIG_EXPR_DECL(VPIOB9, VPI30, VPI30_DESC, V5_DESC);
+SIG_EXPR_LIST_DECL(VPIOB9, SIG_EXPR_PTR(VPIOB9, VPI18),
+ SIG_EXPR_PTR(VPIOB9, VPI24),
+ SIG_EXPR_PTR(VPIOB9, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(RXD2, RXD2, V5_DESC);
+MS_PIN_DECL(V5, GPIOM7, VPIOB9, RXD2);
+FUNC_GROUP_DECL(RXD2, V5);
+
#define W4 104
#define W4_DESC SIG_DESC_SET(SCU88, 0)
SIG_EXPR_LIST_DECL_SINGLE(VPIG0, VPI30, VPI30_DESC, W4_DESC);
@@ -580,10 +908,57 @@ SS_PIN_DECL(V6, GPIOO0, VPIG8);
SIG_EXPR_LIST_DECL_SINGLE(VPIG9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 9));
SS_PIN_DECL(Y5, GPIOO1, VPIG9);
-FUNC_GROUP_DECL(VPI18, T5, U3, V1, U4, V2, AA22, W5, Y4, AA3, AB2);
-FUNC_GROUP_DECL(VPI24, T5, U3, V1, U4, V2, AA22, W5, Y4, AA3, AB2, V6, Y5);
-FUNC_GROUP_DECL(VPI30, T5, U3, V1, U4, V2, W1, U5, W4, Y3, AA22, W5, Y4, AA3,
- AB2);
+#define AA4 114
+SIG_EXPR_LIST_DECL_SINGLE(VPIR0, VPI30, VPI30_DESC, SIG_DESC_SET(SCU88, 10));
+SS_PIN_DECL(AA4, GPIOO2, VPIR0);
+
+#define AB3 115
+SIG_EXPR_LIST_DECL_SINGLE(VPIR1, VPI30, VPI30_DESC, SIG_DESC_SET(SCU88, 11));
+SS_PIN_DECL(AB3, GPIOO3, VPIR1);
+
+#define W6 116
+SIG_EXPR_LIST_DECL_SINGLE(VPIR2, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 12));
+SS_PIN_DECL(W6, GPIOO4, VPIR2);
+
+#define AA5 117
+SIG_EXPR_LIST_DECL_SINGLE(VPIR3, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 13));
+SS_PIN_DECL(AA5, GPIOO5, VPIR3);
+
+#define AB4 118
+SIG_EXPR_LIST_DECL_SINGLE(VPIR4, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 14));
+SS_PIN_DECL(AB4, GPIOO6, VPIR4);
+
+#define V7 119
+SIG_EXPR_LIST_DECL_SINGLE(VPIR5, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 15));
+SS_PIN_DECL(V7, GPIOO7, VPIR5);
+
+#define Y6 120
+SIG_EXPR_LIST_DECL_SINGLE(VPIR6, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 16));
+SS_PIN_DECL(Y6, GPIOP0, VPIR6);
+
+#define AB5 121
+SIG_EXPR_LIST_DECL_SINGLE(VPIR7, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 17));
+SS_PIN_DECL(AB5, GPIOP1, VPIR7);
+
+#define W7 122
+SIG_EXPR_LIST_DECL_SINGLE(VPIR8, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 18));
+SS_PIN_DECL(W7, GPIOP2, VPIR8);
+
+#define AA6 123
+SIG_EXPR_LIST_DECL_SINGLE(VPIR9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 19));
+SS_PIN_DECL(AA6, GPIOP3, VPIR9);
+
+FUNC_GROUP_DECL(VPI18, T5, U3, V1, U4, V2, V3, W2, Y1, V4, W3, Y2, AA1, V5,
+ AA22, W5, Y4, AA3, AB2);
+FUNC_GROUP_DECL(VPI24, T5, U3, V1, U4, V2, V3, W2, Y1, V4, W3, Y2, AA1, V5,
+ AA22, W5, Y4, AA3, AB2, V6, Y5, W6, AA5, AB4, V7, Y6, AB5, W7,
+ AA6);
+FUNC_GROUP_DECL(VPI30, T5, U3, V1, U4, V2, W1, U5, V3, W2, Y1, V4, W3, Y2, AA1,
+ V5, W4, Y3, AA22, W5, Y4, AA3, AB2, AA4, AB3);
+
+#define AB6 124
+SIG_EXPR_LIST_DECL_SINGLE(GPIOP4, GPIOP4);
+MS_PIN_DECL_(AB6, SIG_EXPR_LIST_PTR(GPIOP4));
#define Y7 125
SIG_EXPR_LIST_DECL_SINGLE(GPIOP5, GPIOP5);
@@ -619,6 +994,18 @@ SS_PIN_DECL(F5, GPIOQ3, SDA4);
FUNC_GROUP_DECL(I2C4, B1, F5);
+#define I2C14_DESC SIG_DESC_SET(SCU90, 27)
+
+#define H4 132
+SIG_EXPR_LIST_DECL_SINGLE(SCL14, I2C14, I2C14_DESC);
+SS_PIN_DECL(H4, GPIOQ4, SCL14);
+
+#define H3 133
+SIG_EXPR_LIST_DECL_SINGLE(SDA14, I2C14, I2C14_DESC);
+SS_PIN_DECL(H3, GPIOQ5, SDA14);
+
+FUNC_GROUP_DECL(I2C14, H4, H3);
+
#define DASH9028_DESC SIG_DESC_SET(SCU90, 28)
#define H2 134
@@ -641,11 +1028,11 @@ SSSF_PIN_DECL(Y22, GPIOR2, ROMCS3, SIG_DESC_SET(SCU88, 26));
#define U19 139
SSSF_PIN_DECL(U19, GPIOR3, ROMCS4, SIG_DESC_SET(SCU88, 27));
-#define VPOOFF0_DESC { SCU94, GENMASK(1, 0), 0, 0 }
-#define VPO12_DESC { SCU94, GENMASK(1, 0), 1, 0 }
-#define VPO24_DESC { SCU94, GENMASK(1, 0), 2, 0 }
-#define VPOOFF1_DESC { SCU94, GENMASK(1, 0), 3, 0 }
-#define VPO_OFF_12 { SCU94, 0x2, 0, 0 }
+#define VPOOFF0_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 0, 0 }
+#define VPO12_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 1, 0 }
+#define VPO24_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 2, 0 }
+#define VPOOFF1_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 3, 0 }
+#define VPO_OFF_12 { ASPEED_IP_SCU, SCU94, 0x2, 0, 0 }
#define VPO_24_OFF SIG_DESC_SET(SCU94, 1)
#define V21 140
@@ -776,13 +1163,6 @@ SIG_EXPR_LIST_DECL(ROMA23, SIG_EXPR_PTR(ROMA23, ROM8),
SIG_EXPR_LIST_DECL_SINGLE(VPOR5, VPO24, K18_DESC, VPO_24_OFF);
MS_PIN_DECL(K18, GPIOS7, ROMA23, VPOR5);
-FUNC_GROUP_DECL(ROM8, V20, U21, T19, V22, U20, R18, N21, L22, K18, W21, Y22,
- U19);
-FUNC_GROUP_DECL(ROM16, V20, U21, T19, V22, U20, R18, N21, L22, K18,
- A8, C7, B7, A7, D7, B6, A6, E7, W21, Y22, U19);
-FUNC_GROUP_DECL(VPO12, U21, T19, V22, U20);
-FUNC_GROUP_DECL(VPO24, U21, T19, V22, U20, L22, K18, V21, W22);
-
#define RMII1_DESC SIG_DESC_BIT(HW_STRAP1, 6, 0)
#define A12 152
@@ -827,6 +1207,50 @@ SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD3, RGMII1);
MS_PIN_DECL_(A13, SIG_EXPR_LIST_PTR(GPIOT5), SIG_EXPR_LIST_PTR(DASHA13),
SIG_EXPR_LIST_PTR(RGMII1TXD3));
+#define RMII2_DESC SIG_DESC_BIT(HW_STRAP1, 7, 0)
+
+#define D9 158
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT6, GPIOT6, SIG_DESC_SET(SCUA0, 6));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXEN, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXCK, RGMII2);
+MS_PIN_DECL_(D9, SIG_EXPR_LIST_PTR(GPIOT6), SIG_EXPR_LIST_PTR(RMII2TXEN),
+ SIG_EXPR_LIST_PTR(RGMII2TXCK));
+
+#define E9 159
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT7, GPIOT7, SIG_DESC_SET(SCUA0, 7));
+SIG_EXPR_LIST_DECL_SINGLE(DASHE9, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXCTL, RGMII2);
+MS_PIN_DECL_(E9, SIG_EXPR_LIST_PTR(GPIOT7), SIG_EXPR_LIST_PTR(DASHE9),
+ SIG_EXPR_LIST_PTR(RGMII2TXCTL));
+
+#define A10 160
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU0, GPIOU0, SIG_DESC_SET(SCUA0, 8));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXD0, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD0, RGMII2);
+MS_PIN_DECL_(A10, SIG_EXPR_LIST_PTR(GPIOU0), SIG_EXPR_LIST_PTR(RMII2TXD0),
+ SIG_EXPR_LIST_PTR(RGMII2TXD0));
+
+#define B10 161
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU1, GPIOU1, SIG_DESC_SET(SCUA0, 9));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXD1, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD1, RGMII2);
+MS_PIN_DECL_(B10, SIG_EXPR_LIST_PTR(GPIOU1), SIG_EXPR_LIST_PTR(RMII2TXD1),
+ SIG_EXPR_LIST_PTR(RGMII2TXD1));
+
+#define C10 162
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU2, GPIOU2, SIG_DESC_SET(SCUA0, 10));
+SIG_EXPR_LIST_DECL_SINGLE(DASHC10, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD2, RGMII2);
+MS_PIN_DECL_(C10, SIG_EXPR_LIST_PTR(GPIOU2), SIG_EXPR_LIST_PTR(DASHC10),
+ SIG_EXPR_LIST_PTR(RGMII2TXD2));
+
+#define D10 163
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU3, GPIOU3, SIG_DESC_SET(SCUA0, 11));
+SIG_EXPR_LIST_DECL_SINGLE(DASHD10, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD3, RGMII2);
+MS_PIN_DECL_(D10, SIG_EXPR_LIST_PTR(GPIOU3), SIG_EXPR_LIST_PTR(DASHD10),
+ SIG_EXPR_LIST_PTR(RGMII2TXD3));
+
#define E11 164
SIG_EXPR_LIST_DECL_SINGLE(GPIOU4, GPIOU4, SIG_DESC_SET(SCUA0, 12));
SIG_EXPR_LIST_DECL_SINGLE(RMII1RCLK, RMII1, RMII1_DESC);
@@ -869,11 +1293,419 @@ SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD3, RGMII1);
MS_PIN_DECL_(E10, SIG_EXPR_LIST_PTR(GPIOV1), SIG_EXPR_LIST_PTR(RMII1RXER),
SIG_EXPR_LIST_PTR(RGMII1RXD3));
+#define C9 170
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV2, GPIOV2, SIG_DESC_SET(SCUA0, 18));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RCLK, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXCK, RGMII2);
+MS_PIN_DECL_(C9, SIG_EXPR_LIST_PTR(GPIOV2), SIG_EXPR_LIST_PTR(RMII2RCLK),
+ SIG_EXPR_LIST_PTR(RGMII2RXCK));
+
+#define B9 171
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV3, GPIOV3, SIG_DESC_SET(SCUA0, 19));
+SIG_EXPR_LIST_DECL_SINGLE(DASHB9, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXCTL, RGMII2);
+MS_PIN_DECL_(B9, SIG_EXPR_LIST_PTR(GPIOV3), SIG_EXPR_LIST_PTR(DASHB9),
+ SIG_EXPR_LIST_PTR(RGMII2RXCTL));
+
+#define A9 172
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV4, GPIOV4, SIG_DESC_SET(SCUA0, 20));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXD0, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD0, RGMII2);
+MS_PIN_DECL_(A9, SIG_EXPR_LIST_PTR(GPIOV4), SIG_EXPR_LIST_PTR(RMII2RXD0),
+ SIG_EXPR_LIST_PTR(RGMII2RXD0));
+
+#define E8 173
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV5, GPIOV5, SIG_DESC_SET(SCUA0, 21));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXD1, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD1, RGMII2);
+MS_PIN_DECL_(E8, SIG_EXPR_LIST_PTR(GPIOV5), SIG_EXPR_LIST_PTR(RMII2RXD1),
+ SIG_EXPR_LIST_PTR(RGMII2RXD1));
+
+#define D8 174
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV6, GPIOV6, SIG_DESC_SET(SCUA0, 22));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2CRSDV, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD2, RGMII2);
+MS_PIN_DECL_(D8, SIG_EXPR_LIST_PTR(GPIOV6), SIG_EXPR_LIST_PTR(RMII2CRSDV),
+ SIG_EXPR_LIST_PTR(RGMII2RXD2));
+
+#define C8 175
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV7, GPIOV7, SIG_DESC_SET(SCUA0, 23));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXER, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD3, RGMII2);
+MS_PIN_DECL_(C8, SIG_EXPR_LIST_PTR(GPIOV7), SIG_EXPR_LIST_PTR(RMII2RXER),
+ SIG_EXPR_LIST_PTR(RGMII2RXD3));
+
FUNC_GROUP_DECL(RMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
E10);
FUNC_GROUP_DECL(RGMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
E10);
+FUNC_GROUP_DECL(RMII2, D9, E9, A10, B10, C10, D10, C9, B9, A9, E8, D8, C8);
+FUNC_GROUP_DECL(RGMII2, D9, E9, A10, B10, C10, D10, C9, B9, A9, E8, D8, C8);
+
+#define L5 176
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW0, GPIOW0, SIG_DESC_SET(SCUA0, 24));
+SIG_EXPR_LIST_DECL_SINGLE(ADC0, ADC0);
+MS_PIN_DECL_(L5, SIG_EXPR_LIST_PTR(GPIOW0), SIG_EXPR_LIST_PTR(ADC0));
+FUNC_GROUP_DECL(ADC0, L5);
+
+#define L4 177
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW1, GPIOW1, SIG_DESC_SET(SCUA0, 25));
+SIG_EXPR_LIST_DECL_SINGLE(ADC1, ADC1);
+MS_PIN_DECL_(L4, SIG_EXPR_LIST_PTR(GPIOW1), SIG_EXPR_LIST_PTR(ADC1));
+FUNC_GROUP_DECL(ADC1, L4);
+
+#define L3 178
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW2, GPIOW2, SIG_DESC_SET(SCUA0, 26));
+SIG_EXPR_LIST_DECL_SINGLE(ADC2, ADC2);
+MS_PIN_DECL_(L3, SIG_EXPR_LIST_PTR(GPIOW2), SIG_EXPR_LIST_PTR(ADC2));
+FUNC_GROUP_DECL(ADC2, L3);
+
+#define L2 179
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW3, GPIOW3, SIG_DESC_SET(SCUA0, 27));
+SIG_EXPR_LIST_DECL_SINGLE(ADC3, ADC3);
+MS_PIN_DECL_(L2, SIG_EXPR_LIST_PTR(GPIOW3), SIG_EXPR_LIST_PTR(ADC3));
+FUNC_GROUP_DECL(ADC3, L2);
+
+#define L1 180
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW4, GPIOW4, SIG_DESC_SET(SCUA0, 28));
+SIG_EXPR_LIST_DECL_SINGLE(ADC4, ADC4);
+MS_PIN_DECL_(L1, SIG_EXPR_LIST_PTR(GPIOW4), SIG_EXPR_LIST_PTR(ADC4));
+FUNC_GROUP_DECL(ADC4, L1);
+
+#define M5 181
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW5, GPIOW5, SIG_DESC_SET(SCUA0, 29));
+SIG_EXPR_LIST_DECL_SINGLE(ADC5, ADC5);
+MS_PIN_DECL_(M5, SIG_EXPR_LIST_PTR(GPIOW5), SIG_EXPR_LIST_PTR(ADC5));
+FUNC_GROUP_DECL(ADC5, M5);
+
+#define M4 182
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW6, GPIOW6, SIG_DESC_SET(SCUA0, 30));
+SIG_EXPR_LIST_DECL_SINGLE(ADC6, ADC6);
+MS_PIN_DECL_(M4, SIG_EXPR_LIST_PTR(GPIOW6), SIG_EXPR_LIST_PTR(ADC6));
+FUNC_GROUP_DECL(ADC6, M4);
+
+#define M3 183
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW7, GPIOW7, SIG_DESC_SET(SCUA0, 31));
+SIG_EXPR_LIST_DECL_SINGLE(ADC7, ADC7);
+MS_PIN_DECL_(M3, SIG_EXPR_LIST_PTR(GPIOW7), SIG_EXPR_LIST_PTR(ADC7));
+FUNC_GROUP_DECL(ADC7, M3);
+
+#define M2 184
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX0, GPIOX0, SIG_DESC_SET(SCUA4, 0));
+SIG_EXPR_LIST_DECL_SINGLE(ADC8, ADC8);
+MS_PIN_DECL_(M2, SIG_EXPR_LIST_PTR(GPIOX0), SIG_EXPR_LIST_PTR(ADC8));
+FUNC_GROUP_DECL(ADC8, M2);
+
+#define M1 185
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX1, GPIOX1, SIG_DESC_SET(SCUA4, 1));
+SIG_EXPR_LIST_DECL_SINGLE(ADC9, ADC9);
+MS_PIN_DECL_(M1, SIG_EXPR_LIST_PTR(GPIOX1), SIG_EXPR_LIST_PTR(ADC9));
+FUNC_GROUP_DECL(ADC9, M1);
+
+#define N5 186
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX2, GPIOX2, SIG_DESC_SET(SCUA4, 2));
+SIG_EXPR_LIST_DECL_SINGLE(ADC10, ADC10);
+MS_PIN_DECL_(N5, SIG_EXPR_LIST_PTR(GPIOX2), SIG_EXPR_LIST_PTR(ADC10));
+FUNC_GROUP_DECL(ADC10, N5);
+
+#define N4 187
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX3, GPIOX3, SIG_DESC_SET(SCUA4, 3));
+SIG_EXPR_LIST_DECL_SINGLE(ADC11, ADC11);
+MS_PIN_DECL_(N4, SIG_EXPR_LIST_PTR(GPIOX3), SIG_EXPR_LIST_PTR(ADC11));
+FUNC_GROUP_DECL(ADC11, N4);
+
+#define N3 188
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX4, GPIOX4, SIG_DESC_SET(SCUA4, 4));
+SIG_EXPR_LIST_DECL_SINGLE(ADC12, ADC12);
+MS_PIN_DECL_(N3, SIG_EXPR_LIST_PTR(GPIOX4), SIG_EXPR_LIST_PTR(ADC12));
+FUNC_GROUP_DECL(ADC12, N3);
+
+#define N2 189
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX5, GPIOX5, SIG_DESC_SET(SCUA4, 5));
+SIG_EXPR_LIST_DECL_SINGLE(ADC13, ADC13);
+MS_PIN_DECL_(N2, SIG_EXPR_LIST_PTR(GPIOX5), SIG_EXPR_LIST_PTR(ADC13));
+FUNC_GROUP_DECL(ADC13, N2);
+
+#define N1 190
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX6, GPIOX6, SIG_DESC_SET(SCUA4, 6));
+SIG_EXPR_LIST_DECL_SINGLE(ADC14, ADC14);
+MS_PIN_DECL_(N1, SIG_EXPR_LIST_PTR(GPIOX6), SIG_EXPR_LIST_PTR(ADC14));
+FUNC_GROUP_DECL(ADC14, N1);
+
+#define P5 191
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX7, GPIOX7, SIG_DESC_SET(SCUA4, 7));
+SIG_EXPR_LIST_DECL_SINGLE(ADC15, ADC15);
+MS_PIN_DECL_(P5, SIG_EXPR_LIST_PTR(GPIOX7), SIG_EXPR_LIST_PTR(ADC15));
+FUNC_GROUP_DECL(ADC15, P5);
+
+#define C21 192
+SIG_EXPR_DECL(SIOS3, SIOS3, SIG_DESC_SET(SCUA4, 8));
+SIG_EXPR_DECL(SIOS3, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS3, SIOS3, ACPI);
+SS_PIN_DECL(C21, GPIOY0, SIOS3);
+FUNC_GROUP_DECL(SIOS3, C21);
+
+#define F20 193
+SIG_EXPR_DECL(SIOS5, SIOS5, SIG_DESC_SET(SCUA4, 9));
+SIG_EXPR_DECL(SIOS5, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS5, SIOS5, ACPI);
+SS_PIN_DECL(F20, GPIOY1, SIOS5);
+FUNC_GROUP_DECL(SIOS5, F20);
+
+#define G20 194
+SIG_EXPR_DECL(SIOPWREQ, SIOPWREQ, SIG_DESC_SET(SCUA4, 10));
+SIG_EXPR_DECL(SIOPWREQ, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWREQ, SIOPWREQ, ACPI);
+SS_PIN_DECL(G20, GPIOY2, SIOPWREQ);
+FUNC_GROUP_DECL(SIOPWREQ, G20);
+
+#define K20 195
+SIG_EXPR_DECL(SIOONCTRL, SIOONCTRL, SIG_DESC_SET(SCUA4, 11));
+SIG_EXPR_DECL(SIOONCTRL, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOONCTRL, SIOONCTRL, ACPI);
+SS_PIN_DECL(K20, GPIOY3, SIOONCTRL);
+FUNC_GROUP_DECL(SIOONCTRL, K20);
+
+FUNC_GROUP_DECL(ACPI, B19, A20, D17, A19, C21, F20, G20, K20);
+
+#define R22 200
+#define R22_DESC SIG_DESC_SET(SCUA4, 16)
+SIG_EXPR_DECL(ROMA2, ROM8, R22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA2, ROM16, R22_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA2, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB0, VPO12, R22_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB0, VPO24, R22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB0, VPOOFF1, R22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB0, SIG_EXPR_PTR(VPOB0, VPO12),
+ SIG_EXPR_PTR(VPOB0, VPO24), SIG_EXPR_PTR(VPOB0, VPOOFF1));
+MS_PIN_DECL(R22, GPIOZ0, ROMA2, VPOB0);
+
+#define P18 201
+#define P18_DESC SIG_DESC_SET(SCUA4, 17)
+SIG_EXPR_DECL(ROMA3, ROM8, P18_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA3, ROM16, P18_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA3, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB1, VPO12, P18_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB1, VPO24, P18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB1, VPOOFF1, P18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB1, SIG_EXPR_PTR(VPOB1, VPO12),
+ SIG_EXPR_PTR(VPOB1, VPO24), SIG_EXPR_PTR(VPOB1, VPOOFF1));
+MS_PIN_DECL(P18, GPIOZ1, ROMA3, VPOB1);
+
+#define P19 202
+#define P19_DESC SIG_DESC_SET(SCUA4, 18)
+SIG_EXPR_DECL(ROMA4, ROM8, P19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA4, ROM16, P19_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA4, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB2, VPO12, P19_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB2, VPO24, P19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB2, VPOOFF1, P19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB2, SIG_EXPR_PTR(VPOB2, VPO12),
+ SIG_EXPR_PTR(VPOB2, VPO24), SIG_EXPR_PTR(VPOB2, VPOOFF1));
+MS_PIN_DECL(P19, GPIOZ2, ROMA4, VPOB2);
+
+#define P20 203
+#define P20_DESC SIG_DESC_SET(SCUA4, 19)
+SIG_EXPR_DECL(ROMA5, ROM8, P20_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA5, ROM16, P20_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA5, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB3, VPO12, P20_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB3, VPO24, P20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB3, VPOOFF1, P20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB3, SIG_EXPR_PTR(VPOB3, VPO12),
+ SIG_EXPR_PTR(VPOB3, VPO24), SIG_EXPR_PTR(VPOB3, VPOOFF1));
+MS_PIN_DECL(P20, GPIOZ3, ROMA5, VPOB3);
+
+#define P21 204
+#define P21_DESC SIG_DESC_SET(SCUA4, 20)
+SIG_EXPR_DECL(ROMA6, ROM8, P21_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA6, ROM16, P21_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA6, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB4, VPO12, P21_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB4, VPO24, P21_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB4, VPOOFF1, P21_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB4, SIG_EXPR_PTR(VPOB4, VPO12),
+ SIG_EXPR_PTR(VPOB4, VPO24), SIG_EXPR_PTR(VPOB4, VPOOFF1));
+MS_PIN_DECL(P21, GPIOZ4, ROMA6, VPOB4);
+
+#define P22 205
+#define P22_DESC SIG_DESC_SET(SCUA4, 21)
+SIG_EXPR_DECL(ROMA7, ROM8, P22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA7, ROM16, P22_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA7, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB5, VPO12, P22_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB5, VPO24, P22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB5, VPOOFF1, P22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB5, SIG_EXPR_PTR(VPOB5, VPO12),
+ SIG_EXPR_PTR(VPOB5, VPO24), SIG_EXPR_PTR(VPOB5, VPOOFF1));
+MS_PIN_DECL(P22, GPIOZ5, ROMA7, VPOB5);
+
+#define M19 206
+#define M19_DESC SIG_DESC_SET(SCUA4, 22)
+SIG_EXPR_DECL(ROMA8, ROM8, M19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA8, ROM16, M19_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA8, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB6, VPO12, M19_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB6, VPO24, M19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB6, VPOOFF1, M19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB6, SIG_EXPR_PTR(VPOB6, VPO12),
+ SIG_EXPR_PTR(VPOB6, VPO24), SIG_EXPR_PTR(VPOB6, VPOOFF1));
+MS_PIN_DECL(M19, GPIOZ6, ROMA8, VPOB6);
+
+#define M20 207
+#define M20_DESC SIG_DESC_SET(SCUA4, 23)
+SIG_EXPR_DECL(ROMA9, ROM8, M20_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA9, ROM16, M20_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA9, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB7, VPO12, M20_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB7, VPO24, M20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB7, VPOOFF1, M20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB7, SIG_EXPR_PTR(VPOB7, VPO12),
+ SIG_EXPR_PTR(VPOB7, VPO24), SIG_EXPR_PTR(VPOB7, VPOOFF1));
+MS_PIN_DECL(M20, GPIOZ7, ROMA9, VPOB7);
+
+#define M21 208
+#define M21_DESC SIG_DESC_SET(SCUA4, 24)
+SIG_EXPR_DECL(ROMA10, ROM8, M21_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA10, ROM16, M21_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA10, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG0, VPO12, M21_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG0, VPO24, M21_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG0, VPOOFF1, M21_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG0, SIG_EXPR_PTR(VPOG0, VPO12),
+ SIG_EXPR_PTR(VPOG0, VPO24), SIG_EXPR_PTR(VPOG0, VPOOFF1));
+MS_PIN_DECL(M21, GPIOAA0, ROMA10, VPOG0);
+
+#define M22 209
+#define M22_DESC SIG_DESC_SET(SCUA4, 25)
+SIG_EXPR_DECL(ROMA11, ROM8, M22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA11, ROM16, M22_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA11, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG1, VPO12, M22_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG1, VPO24, M22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG1, VPOOFF1, M22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG1, SIG_EXPR_PTR(VPOG1, VPO12),
+ SIG_EXPR_PTR(VPOG1, VPO24), SIG_EXPR_PTR(VPOG1, VPOOFF1));
+MS_PIN_DECL(M22, GPIOAA1, ROMA11, VPOG1);
+
+#define L18 210
+#define L18_DESC SIG_DESC_SET(SCUA4, 26)
+SIG_EXPR_DECL(ROMA12, ROM8, L18_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA12, ROM16, L18_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA12, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG2, VPO12, L18_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG2, VPO24, L18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG2, VPOOFF1, L18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG2, SIG_EXPR_PTR(VPOG2, VPO12),
+ SIG_EXPR_PTR(VPOG2, VPO24), SIG_EXPR_PTR(VPOG2, VPOOFF1));
+MS_PIN_DECL(L18, GPIOAA2, ROMA12, VPOG2);
+
+#define L19 211
+#define L19_DESC SIG_DESC_SET(SCUA4, 27)
+SIG_EXPR_DECL(ROMA13, ROM8, L19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA13, ROM16, L19_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA13, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG3, VPO12, L19_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG3, VPO24, L19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG3, VPOOFF1, L19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG3, SIG_EXPR_PTR(VPOG3, VPO12),
+ SIG_EXPR_PTR(VPOG3, VPO24), SIG_EXPR_PTR(VPOG3, VPOOFF1));
+MS_PIN_DECL(L19, GPIOAA3, ROMA13, VPOG3);
+
+#define L20 212
+#define L20_DESC SIG_DESC_SET(SCUA4, 28)
+SIG_EXPR_DECL(ROMA14, ROM8, L20_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA14, ROM16, L20_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA14, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG4, VPO24, L20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG4, VPOOFF1, L20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG4, VPO24, VPOOFF1);
+MS_PIN_DECL(L20, GPIOAA4, ROMA14, VPOG4);
+
+#define L21 213
+#define L21_DESC SIG_DESC_SET(SCUA4, 29)
+SIG_EXPR_DECL(ROMA15, ROM8, L21_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA15, ROM16, L21_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA15, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG5, VPO24, L21_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG5, VPOOFF1, L21_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG5, VPO24, VPOOFF1);
+MS_PIN_DECL(L21, GPIOAA5, ROMA15, VPOG5);
+
+#define T18 214
+#define T18_DESC SIG_DESC_SET(SCUA4, 30)
+SIG_EXPR_DECL(ROMA16, ROM8, T18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA16, ROM16, T18_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA16, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG6, VPO24, T18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG6, VPOOFF1, T18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG6, VPO24, VPOOFF1);
+MS_PIN_DECL(T18, GPIOAA6, ROMA16, VPOG6);
+
+#define N18 215
+#define N18_DESC SIG_DESC_SET(SCUA4, 31)
+SIG_EXPR_DECL(ROMA17, ROM8, N18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA17, ROM16, N18_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA17, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG7, VPO24, N18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG7, VPOOFF1, N18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG7, VPO24, VPOOFF1);
+MS_PIN_DECL(N18, GPIOAA7, ROMA17, VPOG7);
+
+#define N19 216
+#define N19_DESC SIG_DESC_SET(SCUA8, 0)
+SIG_EXPR_DECL(ROMA18, ROM8, N19_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA18, ROM16, N19_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA18, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR0, VPO24, N19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR0, VPOOFF1, N19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR0, VPO24, VPOOFF1);
+MS_PIN_DECL(N19, GPIOAB0, ROMA18, VPOR0);
+
+#define M18 217
+#define M18_DESC SIG_DESC_SET(SCUA8, 1)
+SIG_EXPR_DECL(ROMA19, ROM8, M18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA19, ROM16, M18_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA19, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR1, VPO24, M18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR1, VPOOFF1, M18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR1, VPO24, VPOOFF1);
+MS_PIN_DECL(M18, GPIOAB1, ROMA19, VPOR1);
+
+#define N22 218
+#define N22_DESC SIG_DESC_SET(SCUA8, 2)
+SIG_EXPR_DECL(ROMA20, ROM8, N22_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA20, ROM16, N22_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA20, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR2, VPO24, N22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR2, VPOOFF1, N22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR2, VPO24, VPOOFF1);
+MS_PIN_DECL(N22, GPIOAB2, ROMA20, VPOR2);
+
+#define N20 219
+#define N20_DESC SIG_DESC_SET(SCUA8, 3)
+SIG_EXPR_DECL(ROMA21, ROM8, N20_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA21, ROM16, N20_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA21, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR3, VPO24, N20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR3, VPOOFF1, N20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR3, VPO24, VPOOFF1);
+MS_PIN_DECL(N20, GPIOAB3, ROMA21, VPOR3);
+
+FUNC_GROUP_DECL(ROM8, V20, U21, T19, V22, U20, R18, N21, L22, K18, W21, Y22,
+ U19, R22, P18, P19, P20, P21, P22, M19, M20, M21, M22, L18,
+ L19, L20, L21, T18, N18, N19, M18, N22, N20);
+FUNC_GROUP_DECL(ROM16, V20, U21, T19, V22, U20, R18, N21, L22, K18,
+ A8, C7, B7, A7, D7, B6, A6, E7, W21, Y22, U19, R22, P18, P19,
+ P20, P21, P22, M19, M20, M21, M22, L18, L19, L20, L21, T18,
+ N18, N19, M18, N22, N20);
+FUNC_GROUP_DECL(VPO12, U21, T19, V22, U20, R22, P18, P19, P20, P21, P22, M19,
+ M20, M21, M22, L18, L19, L20, L21, T18, N18, N19, M18, N22,
+ N20);
+FUNC_GROUP_DECL(VPO24, U21, T19, V22, U20, L22, K18, V21, W22, R22, P18, P19,
+ P20, P21, P22, M19, M20, M21, M22, L18, L19);
+
/* Note we account for GPIOY4-GPIOY7 even though they're not valid, thus 216
* pins becomes 220.
*/
@@ -883,84 +1715,180 @@ FUNC_GROUP_DECL(RGMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
static struct pinctrl_pin_desc aspeed_g4_pins[ASPEED_G4_NR_PINS] = {
ASPEED_PINCTRL_PIN(A1),
+ ASPEED_PINCTRL_PIN(A10),
ASPEED_PINCTRL_PIN(A11),
ASPEED_PINCTRL_PIN(A12),
ASPEED_PINCTRL_PIN(A13),
+ ASPEED_PINCTRL_PIN(A14),
ASPEED_PINCTRL_PIN(A15),
+ ASPEED_PINCTRL_PIN(A16),
+ ASPEED_PINCTRL_PIN(A17),
ASPEED_PINCTRL_PIN(A18),
+ ASPEED_PINCTRL_PIN(A19),
ASPEED_PINCTRL_PIN(A2),
+ ASPEED_PINCTRL_PIN(A20),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
ASPEED_PINCTRL_PIN(A5),
ASPEED_PINCTRL_PIN(A6),
ASPEED_PINCTRL_PIN(A7),
ASPEED_PINCTRL_PIN(A8),
+ ASPEED_PINCTRL_PIN(A9),
+ ASPEED_PINCTRL_PIN(AA1),
ASPEED_PINCTRL_PIN(AA2),
ASPEED_PINCTRL_PIN(AA22),
ASPEED_PINCTRL_PIN(AA3),
+ ASPEED_PINCTRL_PIN(AA4),
+ ASPEED_PINCTRL_PIN(AA5),
+ ASPEED_PINCTRL_PIN(AA6),
ASPEED_PINCTRL_PIN(AA7),
ASPEED_PINCTRL_PIN(AB1),
ASPEED_PINCTRL_PIN(AB2),
+ ASPEED_PINCTRL_PIN(AB3),
+ ASPEED_PINCTRL_PIN(AB4),
+ ASPEED_PINCTRL_PIN(AB5),
+ ASPEED_PINCTRL_PIN(AB6),
ASPEED_PINCTRL_PIN(AB7),
ASPEED_PINCTRL_PIN(B1),
+ ASPEED_PINCTRL_PIN(B10),
ASPEED_PINCTRL_PIN(B11),
ASPEED_PINCTRL_PIN(B12),
+ ASPEED_PINCTRL_PIN(B13),
ASPEED_PINCTRL_PIN(B14),
ASPEED_PINCTRL_PIN(B15),
+ ASPEED_PINCTRL_PIN(B16),
+ ASPEED_PINCTRL_PIN(B17),
+ ASPEED_PINCTRL_PIN(B18),
ASPEED_PINCTRL_PIN(B19),
ASPEED_PINCTRL_PIN(B2),
+ ASPEED_PINCTRL_PIN(B22),
ASPEED_PINCTRL_PIN(B3),
ASPEED_PINCTRL_PIN(B4),
+ ASPEED_PINCTRL_PIN(B5),
ASPEED_PINCTRL_PIN(B6),
ASPEED_PINCTRL_PIN(B7),
+ ASPEED_PINCTRL_PIN(B9),
ASPEED_PINCTRL_PIN(C1),
+ ASPEED_PINCTRL_PIN(C10),
ASPEED_PINCTRL_PIN(C11),
ASPEED_PINCTRL_PIN(C12),
+ ASPEED_PINCTRL_PIN(C13),
ASPEED_PINCTRL_PIN(C14),
ASPEED_PINCTRL_PIN(C15),
+ ASPEED_PINCTRL_PIN(C16),
ASPEED_PINCTRL_PIN(C17),
+ ASPEED_PINCTRL_PIN(C18),
ASPEED_PINCTRL_PIN(C2),
+ ASPEED_PINCTRL_PIN(C20),
+ ASPEED_PINCTRL_PIN(C21),
+ ASPEED_PINCTRL_PIN(C22),
ASPEED_PINCTRL_PIN(C3),
ASPEED_PINCTRL_PIN(C4),
ASPEED_PINCTRL_PIN(C5),
ASPEED_PINCTRL_PIN(C6),
ASPEED_PINCTRL_PIN(C7),
+ ASPEED_PINCTRL_PIN(C8),
+ ASPEED_PINCTRL_PIN(C9),
ASPEED_PINCTRL_PIN(D1),
+ ASPEED_PINCTRL_PIN(D10),
ASPEED_PINCTRL_PIN(D11),
ASPEED_PINCTRL_PIN(D12),
+ ASPEED_PINCTRL_PIN(D13),
ASPEED_PINCTRL_PIN(D14),
ASPEED_PINCTRL_PIN(D15),
ASPEED_PINCTRL_PIN(D16),
ASPEED_PINCTRL_PIN(D17),
ASPEED_PINCTRL_PIN(D18),
+ ASPEED_PINCTRL_PIN(D19),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D3),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
+ ASPEED_PINCTRL_PIN(D6),
ASPEED_PINCTRL_PIN(D7),
+ ASPEED_PINCTRL_PIN(D8),
+ ASPEED_PINCTRL_PIN(D9),
ASPEED_PINCTRL_PIN(E10),
ASPEED_PINCTRL_PIN(E11),
ASPEED_PINCTRL_PIN(E12),
+ ASPEED_PINCTRL_PIN(E13),
ASPEED_PINCTRL_PIN(E14),
+ ASPEED_PINCTRL_PIN(E15),
ASPEED_PINCTRL_PIN(E16),
+ ASPEED_PINCTRL_PIN(E18),
+ ASPEED_PINCTRL_PIN(E19),
ASPEED_PINCTRL_PIN(E2),
+ ASPEED_PINCTRL_PIN(E20),
ASPEED_PINCTRL_PIN(E3),
ASPEED_PINCTRL_PIN(E5),
+ ASPEED_PINCTRL_PIN(E6),
ASPEED_PINCTRL_PIN(E7),
+ ASPEED_PINCTRL_PIN(E8),
+ ASPEED_PINCTRL_PIN(E9),
+ ASPEED_PINCTRL_PIN(F18),
+ ASPEED_PINCTRL_PIN(F20),
ASPEED_PINCTRL_PIN(F3),
ASPEED_PINCTRL_PIN(F4),
ASPEED_PINCTRL_PIN(F5),
+ ASPEED_PINCTRL_PIN(G18),
+ ASPEED_PINCTRL_PIN(G19),
+ ASPEED_PINCTRL_PIN(G20),
ASPEED_PINCTRL_PIN(G5),
ASPEED_PINCTRL_PIN(H1),
+ ASPEED_PINCTRL_PIN(H18),
ASPEED_PINCTRL_PIN(H19),
ASPEED_PINCTRL_PIN(H2),
ASPEED_PINCTRL_PIN(H20),
+ ASPEED_PINCTRL_PIN(H3),
+ ASPEED_PINCTRL_PIN(H4),
+ ASPEED_PINCTRL_PIN(J20),
+ ASPEED_PINCTRL_PIN(J21),
ASPEED_PINCTRL_PIN(J3),
+ ASPEED_PINCTRL_PIN(J4),
+ ASPEED_PINCTRL_PIN(J5),
ASPEED_PINCTRL_PIN(K18),
+ ASPEED_PINCTRL_PIN(K20),
+ ASPEED_PINCTRL_PIN(K5),
+ ASPEED_PINCTRL_PIN(L1),
+ ASPEED_PINCTRL_PIN(L18),
+ ASPEED_PINCTRL_PIN(L19),
+ ASPEED_PINCTRL_PIN(L2),
+ ASPEED_PINCTRL_PIN(L20),
+ ASPEED_PINCTRL_PIN(L21),
ASPEED_PINCTRL_PIN(L22),
+ ASPEED_PINCTRL_PIN(L3),
+ ASPEED_PINCTRL_PIN(L4),
+ ASPEED_PINCTRL_PIN(L5),
+ ASPEED_PINCTRL_PIN(M1),
+ ASPEED_PINCTRL_PIN(M18),
+ ASPEED_PINCTRL_PIN(M19),
+ ASPEED_PINCTRL_PIN(M2),
+ ASPEED_PINCTRL_PIN(M20),
+ ASPEED_PINCTRL_PIN(M21),
+ ASPEED_PINCTRL_PIN(M22),
+ ASPEED_PINCTRL_PIN(M3),
+ ASPEED_PINCTRL_PIN(M4),
+ ASPEED_PINCTRL_PIN(M5),
+ ASPEED_PINCTRL_PIN(N1),
+ ASPEED_PINCTRL_PIN(N18),
+ ASPEED_PINCTRL_PIN(N19),
+ ASPEED_PINCTRL_PIN(N2),
+ ASPEED_PINCTRL_PIN(N20),
ASPEED_PINCTRL_PIN(N21),
+ ASPEED_PINCTRL_PIN(N22),
+ ASPEED_PINCTRL_PIN(N3),
+ ASPEED_PINCTRL_PIN(N4),
+ ASPEED_PINCTRL_PIN(N5),
+ ASPEED_PINCTRL_PIN(P18),
+ ASPEED_PINCTRL_PIN(P19),
+ ASPEED_PINCTRL_PIN(P20),
+ ASPEED_PINCTRL_PIN(P21),
+ ASPEED_PINCTRL_PIN(P22),
+ ASPEED_PINCTRL_PIN(P5),
ASPEED_PINCTRL_PIN(R18),
+ ASPEED_PINCTRL_PIN(R22),
ASPEED_PINCTRL_PIN(T1),
+ ASPEED_PINCTRL_PIN(T18),
ASPEED_PINCTRL_PIN(T19),
ASPEED_PINCTRL_PIN(T2),
ASPEED_PINCTRL_PIN(T4),
@@ -979,28 +1907,61 @@ static struct pinctrl_pin_desc aspeed_g4_pins[ASPEED_G4_NR_PINS] = {
ASPEED_PINCTRL_PIN(V20),
ASPEED_PINCTRL_PIN(V21),
ASPEED_PINCTRL_PIN(V22),
+ ASPEED_PINCTRL_PIN(V3),
+ ASPEED_PINCTRL_PIN(V4),
+ ASPEED_PINCTRL_PIN(V5),
ASPEED_PINCTRL_PIN(V6),
+ ASPEED_PINCTRL_PIN(V7),
ASPEED_PINCTRL_PIN(W1),
+ ASPEED_PINCTRL_PIN(W2),
ASPEED_PINCTRL_PIN(W21),
ASPEED_PINCTRL_PIN(W22),
+ ASPEED_PINCTRL_PIN(W3),
ASPEED_PINCTRL_PIN(W4),
ASPEED_PINCTRL_PIN(W5),
+ ASPEED_PINCTRL_PIN(W6),
+ ASPEED_PINCTRL_PIN(W7),
+ ASPEED_PINCTRL_PIN(Y1),
+ ASPEED_PINCTRL_PIN(Y2),
+ ASPEED_PINCTRL_PIN(Y21),
ASPEED_PINCTRL_PIN(Y22),
ASPEED_PINCTRL_PIN(Y3),
ASPEED_PINCTRL_PIN(Y4),
ASPEED_PINCTRL_PIN(Y5),
+ ASPEED_PINCTRL_PIN(Y6),
ASPEED_PINCTRL_PIN(Y7),
};
static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(ACPI),
+ ASPEED_PINCTRL_GROUP(ADC0),
+ ASPEED_PINCTRL_GROUP(ADC1),
+ ASPEED_PINCTRL_GROUP(ADC10),
+ ASPEED_PINCTRL_GROUP(ADC11),
+ ASPEED_PINCTRL_GROUP(ADC12),
+ ASPEED_PINCTRL_GROUP(ADC13),
+ ASPEED_PINCTRL_GROUP(ADC14),
+ ASPEED_PINCTRL_GROUP(ADC15),
+ ASPEED_PINCTRL_GROUP(ADC2),
+ ASPEED_PINCTRL_GROUP(ADC3),
+ ASPEED_PINCTRL_GROUP(ADC4),
+ ASPEED_PINCTRL_GROUP(ADC5),
+ ASPEED_PINCTRL_GROUP(ADC6),
+ ASPEED_PINCTRL_GROUP(ADC7),
+ ASPEED_PINCTRL_GROUP(ADC8),
+ ASPEED_PINCTRL_GROUP(ADC9),
ASPEED_PINCTRL_GROUP(BMCINT),
ASPEED_PINCTRL_GROUP(DDCCLK),
ASPEED_PINCTRL_GROUP(DDCDAT),
+ ASPEED_PINCTRL_GROUP(EXTRST),
ASPEED_PINCTRL_GROUP(FLACK),
ASPEED_PINCTRL_GROUP(FLBUSY),
ASPEED_PINCTRL_GROUP(FLWP),
+ ASPEED_PINCTRL_GROUP(GPID),
ASPEED_PINCTRL_GROUP(GPID0),
+ ASPEED_PINCTRL_GROUP(GPID2),
+ ASPEED_PINCTRL_GROUP(GPID4),
+ ASPEED_PINCTRL_GROUP(GPID6),
ASPEED_PINCTRL_GROUP(GPIE0),
ASPEED_PINCTRL_GROUP(GPIE2),
ASPEED_PINCTRL_GROUP(GPIE4),
@@ -1009,6 +1970,7 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(I2C11),
ASPEED_PINCTRL_GROUP(I2C12),
ASPEED_PINCTRL_GROUP(I2C13),
+ ASPEED_PINCTRL_GROUP(I2C14),
ASPEED_PINCTRL_GROUP(I2C3),
ASPEED_PINCTRL_GROUP(I2C4),
ASPEED_PINCTRL_GROUP(I2C5),
@@ -1018,25 +1980,37 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(I2C9),
ASPEED_PINCTRL_GROUP(LPCPD),
ASPEED_PINCTRL_GROUP(LPCPME),
- ASPEED_PINCTRL_GROUP(LPCPME),
+ ASPEED_PINCTRL_GROUP(LPCRST),
ASPEED_PINCTRL_GROUP(LPCSMI),
+ ASPEED_PINCTRL_GROUP(MAC1LINK),
+ ASPEED_PINCTRL_GROUP(MAC2LINK),
ASPEED_PINCTRL_GROUP(MDIO1),
ASPEED_PINCTRL_GROUP(MDIO2),
ASPEED_PINCTRL_GROUP(NCTS1),
+ ASPEED_PINCTRL_GROUP(NCTS2),
ASPEED_PINCTRL_GROUP(NCTS3),
ASPEED_PINCTRL_GROUP(NCTS4),
ASPEED_PINCTRL_GROUP(NDCD1),
+ ASPEED_PINCTRL_GROUP(NDCD2),
ASPEED_PINCTRL_GROUP(NDCD3),
ASPEED_PINCTRL_GROUP(NDCD4),
ASPEED_PINCTRL_GROUP(NDSR1),
+ ASPEED_PINCTRL_GROUP(NDSR2),
ASPEED_PINCTRL_GROUP(NDSR3),
+ ASPEED_PINCTRL_GROUP(NDSR4),
ASPEED_PINCTRL_GROUP(NDTR1),
+ ASPEED_PINCTRL_GROUP(NDTR2),
ASPEED_PINCTRL_GROUP(NDTR3),
+ ASPEED_PINCTRL_GROUP(NDTR4),
+ ASPEED_PINCTRL_GROUP(NDTS4),
ASPEED_PINCTRL_GROUP(NRI1),
+ ASPEED_PINCTRL_GROUP(NRI2),
ASPEED_PINCTRL_GROUP(NRI3),
ASPEED_PINCTRL_GROUP(NRI4),
ASPEED_PINCTRL_GROUP(NRTS1),
+ ASPEED_PINCTRL_GROUP(NRTS2),
ASPEED_PINCTRL_GROUP(NRTS3),
+ ASPEED_PINCTRL_GROUP(OSCCLK),
ASPEED_PINCTRL_GROUP(PWM0),
ASPEED_PINCTRL_GROUP(PWM1),
ASPEED_PINCTRL_GROUP(PWM2),
@@ -1046,7 +2020,9 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(PWM6),
ASPEED_PINCTRL_GROUP(PWM7),
ASPEED_PINCTRL_GROUP(RGMII1),
+ ASPEED_PINCTRL_GROUP(RGMII2),
ASPEED_PINCTRL_GROUP(RMII1),
+ ASPEED_PINCTRL_GROUP(RMII2),
ASPEED_PINCTRL_GROUP(ROM16),
ASPEED_PINCTRL_GROUP(ROM8),
ASPEED_PINCTRL_GROUP(ROMCS1),
@@ -1054,21 +2030,48 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(ROMCS3),
ASPEED_PINCTRL_GROUP(ROMCS4),
ASPEED_PINCTRL_GROUP(RXD1),
+ ASPEED_PINCTRL_GROUP(RXD2),
ASPEED_PINCTRL_GROUP(RXD3),
ASPEED_PINCTRL_GROUP(RXD4),
+ ASPEED_PINCTRL_GROUP(SALT1),
+ ASPEED_PINCTRL_GROUP(SALT2),
+ ASPEED_PINCTRL_GROUP(SALT3),
+ ASPEED_PINCTRL_GROUP(SALT4),
ASPEED_PINCTRL_GROUP(SD1),
+ ASPEED_PINCTRL_GROUP(SD2),
+ ASPEED_PINCTRL_GROUP(SGPMCK),
ASPEED_PINCTRL_GROUP(SGPMI),
+ ASPEED_PINCTRL_GROUP(SGPMLD),
+ ASPEED_PINCTRL_GROUP(SGPMO),
+ ASPEED_PINCTRL_GROUP(SGPSCK),
+ ASPEED_PINCTRL_GROUP(SGPSI0),
+ ASPEED_PINCTRL_GROUP(SGPSI1),
+ ASPEED_PINCTRL_GROUP(SGPSLD),
+ ASPEED_PINCTRL_GROUP(SIOONCTRL),
ASPEED_PINCTRL_GROUP(SIOPBI),
ASPEED_PINCTRL_GROUP(SIOPBO),
+ ASPEED_PINCTRL_GROUP(SIOPWREQ),
+ ASPEED_PINCTRL_GROUP(SIOPWRGD),
+ ASPEED_PINCTRL_GROUP(SIOS3),
+ ASPEED_PINCTRL_GROUP(SIOS5),
+ ASPEED_PINCTRL_GROUP(SIOSCI),
+ ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+ ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
+ ASPEED_PINCTRL_GROUP(SPICS1),
ASPEED_PINCTRL_GROUP(TIMER3),
+ ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
ASPEED_PINCTRL_GROUP(TXD1),
+ ASPEED_PINCTRL_GROUP(TXD2),
ASPEED_PINCTRL_GROUP(TXD3),
ASPEED_PINCTRL_GROUP(TXD4),
ASPEED_PINCTRL_GROUP(UART6),
+ ASPEED_PINCTRL_GROUP(USBCKI),
+ ASPEED_PINCTRL_GROUP(VGABIOS_ROM),
ASPEED_PINCTRL_GROUP(VGAHS),
ASPEED_PINCTRL_GROUP(VGAVS),
ASPEED_PINCTRL_GROUP(VPI18),
@@ -1076,17 +2079,40 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(VPI30),
ASPEED_PINCTRL_GROUP(VPO12),
ASPEED_PINCTRL_GROUP(VPO24),
+ ASPEED_PINCTRL_GROUP(WDTRST1),
+ ASPEED_PINCTRL_GROUP(WDTRST2),
};
static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(ACPI),
+ ASPEED_PINCTRL_FUNC(ADC0),
+ ASPEED_PINCTRL_FUNC(ADC1),
+ ASPEED_PINCTRL_FUNC(ADC10),
+ ASPEED_PINCTRL_FUNC(ADC11),
+ ASPEED_PINCTRL_FUNC(ADC12),
+ ASPEED_PINCTRL_FUNC(ADC13),
+ ASPEED_PINCTRL_FUNC(ADC14),
+ ASPEED_PINCTRL_FUNC(ADC15),
+ ASPEED_PINCTRL_FUNC(ADC2),
+ ASPEED_PINCTRL_FUNC(ADC3),
+ ASPEED_PINCTRL_FUNC(ADC4),
+ ASPEED_PINCTRL_FUNC(ADC5),
+ ASPEED_PINCTRL_FUNC(ADC6),
+ ASPEED_PINCTRL_FUNC(ADC7),
+ ASPEED_PINCTRL_FUNC(ADC8),
+ ASPEED_PINCTRL_FUNC(ADC9),
ASPEED_PINCTRL_FUNC(BMCINT),
ASPEED_PINCTRL_FUNC(DDCCLK),
ASPEED_PINCTRL_FUNC(DDCDAT),
+ ASPEED_PINCTRL_FUNC(EXTRST),
ASPEED_PINCTRL_FUNC(FLACK),
ASPEED_PINCTRL_FUNC(FLBUSY),
ASPEED_PINCTRL_FUNC(FLWP),
+ ASPEED_PINCTRL_FUNC(GPID),
ASPEED_PINCTRL_FUNC(GPID0),
+ ASPEED_PINCTRL_FUNC(GPID2),
+ ASPEED_PINCTRL_FUNC(GPID4),
+ ASPEED_PINCTRL_FUNC(GPID6),
ASPEED_PINCTRL_FUNC(GPIE0),
ASPEED_PINCTRL_FUNC(GPIE2),
ASPEED_PINCTRL_FUNC(GPIE4),
@@ -1095,6 +2121,7 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(I2C11),
ASPEED_PINCTRL_FUNC(I2C12),
ASPEED_PINCTRL_FUNC(I2C13),
+ ASPEED_PINCTRL_FUNC(I2C14),
ASPEED_PINCTRL_FUNC(I2C3),
ASPEED_PINCTRL_FUNC(I2C4),
ASPEED_PINCTRL_FUNC(I2C5),
@@ -1104,24 +2131,37 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(I2C9),
ASPEED_PINCTRL_FUNC(LPCPD),
ASPEED_PINCTRL_FUNC(LPCPME),
+ ASPEED_PINCTRL_FUNC(LPCRST),
ASPEED_PINCTRL_FUNC(LPCSMI),
+ ASPEED_PINCTRL_FUNC(MAC1LINK),
+ ASPEED_PINCTRL_FUNC(MAC2LINK),
ASPEED_PINCTRL_FUNC(MDIO1),
ASPEED_PINCTRL_FUNC(MDIO2),
ASPEED_PINCTRL_FUNC(NCTS1),
+ ASPEED_PINCTRL_FUNC(NCTS2),
ASPEED_PINCTRL_FUNC(NCTS3),
ASPEED_PINCTRL_FUNC(NCTS4),
ASPEED_PINCTRL_FUNC(NDCD1),
+ ASPEED_PINCTRL_FUNC(NDCD2),
ASPEED_PINCTRL_FUNC(NDCD3),
ASPEED_PINCTRL_FUNC(NDCD4),
ASPEED_PINCTRL_FUNC(NDSR1),
+ ASPEED_PINCTRL_FUNC(NDSR2),
ASPEED_PINCTRL_FUNC(NDSR3),
+ ASPEED_PINCTRL_FUNC(NDSR4),
ASPEED_PINCTRL_FUNC(NDTR1),
+ ASPEED_PINCTRL_FUNC(NDTR2),
ASPEED_PINCTRL_FUNC(NDTR3),
+ ASPEED_PINCTRL_FUNC(NDTR4),
+ ASPEED_PINCTRL_FUNC(NDTS4),
ASPEED_PINCTRL_FUNC(NRI1),
+ ASPEED_PINCTRL_FUNC(NRI2),
ASPEED_PINCTRL_FUNC(NRI3),
ASPEED_PINCTRL_FUNC(NRI4),
ASPEED_PINCTRL_FUNC(NRTS1),
+ ASPEED_PINCTRL_FUNC(NRTS2),
ASPEED_PINCTRL_FUNC(NRTS3),
+ ASPEED_PINCTRL_FUNC(OSCCLK),
ASPEED_PINCTRL_FUNC(PWM0),
ASPEED_PINCTRL_FUNC(PWM1),
ASPEED_PINCTRL_FUNC(PWM2),
@@ -1131,7 +2171,9 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(PWM6),
ASPEED_PINCTRL_FUNC(PWM7),
ASPEED_PINCTRL_FUNC(RGMII1),
+ ASPEED_PINCTRL_FUNC(RGMII2),
ASPEED_PINCTRL_FUNC(RMII1),
+ ASPEED_PINCTRL_FUNC(RMII2),
ASPEED_PINCTRL_FUNC(ROM16),
ASPEED_PINCTRL_FUNC(ROM8),
ASPEED_PINCTRL_FUNC(ROMCS1),
@@ -1139,21 +2181,48 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(ROMCS3),
ASPEED_PINCTRL_FUNC(ROMCS4),
ASPEED_PINCTRL_FUNC(RXD1),
+ ASPEED_PINCTRL_FUNC(RXD2),
ASPEED_PINCTRL_FUNC(RXD3),
ASPEED_PINCTRL_FUNC(RXD4),
+ ASPEED_PINCTRL_FUNC(SALT1),
+ ASPEED_PINCTRL_FUNC(SALT2),
+ ASPEED_PINCTRL_FUNC(SALT3),
+ ASPEED_PINCTRL_FUNC(SALT4),
ASPEED_PINCTRL_FUNC(SD1),
+ ASPEED_PINCTRL_FUNC(SD2),
+ ASPEED_PINCTRL_FUNC(SGPMCK),
ASPEED_PINCTRL_FUNC(SGPMI),
+ ASPEED_PINCTRL_FUNC(SGPMLD),
+ ASPEED_PINCTRL_FUNC(SGPMO),
+ ASPEED_PINCTRL_FUNC(SGPSCK),
+ ASPEED_PINCTRL_FUNC(SGPSI0),
+ ASPEED_PINCTRL_FUNC(SGPSI1),
+ ASPEED_PINCTRL_FUNC(SGPSLD),
+ ASPEED_PINCTRL_FUNC(SIOONCTRL),
ASPEED_PINCTRL_FUNC(SIOPBI),
ASPEED_PINCTRL_FUNC(SIOPBO),
+ ASPEED_PINCTRL_FUNC(SIOPWREQ),
+ ASPEED_PINCTRL_FUNC(SIOPWRGD),
+ ASPEED_PINCTRL_FUNC(SIOS3),
+ ASPEED_PINCTRL_FUNC(SIOS5),
+ ASPEED_PINCTRL_FUNC(SIOSCI),
+ ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+ ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
+ ASPEED_PINCTRL_FUNC(SPICS1),
ASPEED_PINCTRL_FUNC(TIMER3),
+ ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
ASPEED_PINCTRL_FUNC(TXD1),
+ ASPEED_PINCTRL_FUNC(TXD2),
ASPEED_PINCTRL_FUNC(TXD3),
ASPEED_PINCTRL_FUNC(TXD4),
ASPEED_PINCTRL_FUNC(UART6),
+ ASPEED_PINCTRL_FUNC(USBCKI),
+ ASPEED_PINCTRL_FUNC(VGABIOS_ROM),
ASPEED_PINCTRL_FUNC(VGAHS),
ASPEED_PINCTRL_FUNC(VGAVS),
ASPEED_PINCTRL_FUNC(VPI18),
@@ -1161,6 +2230,8 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(VPI30),
ASPEED_PINCTRL_FUNC(VPO12),
ASPEED_PINCTRL_FUNC(VPO24),
+ ASPEED_PINCTRL_FUNC(WDTRST1),
+ ASPEED_PINCTRL_FUNC(WDTRST2),
};
static struct aspeed_pinctrl_data aspeed_g4_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 87b46390b695..43221a3c7e23 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -24,14 +25,28 @@
#include "../pinctrl-utils.h"
#include "pinctrl-aspeed.h"
-#define ASPEED_G5_NR_PINS 228
+#define ASPEED_G5_NR_PINS 232
-#define COND1 { SCU90, BIT(6), 0, 0 }
-#define COND2 { SCU94, GENMASK(1, 0), 0, 0 }
+#define COND1 { ASPEED_IP_SCU, SCU90, BIT(6), 0, 0 }
+#define COND2 { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 0, 0 }
+
+/* LHCR0 is offset from the end of the H8S/2168-compatible registers */
+#define LHCR0 0x20
+#define GFX064 0x64
#define B14 0
SSSF_PIN_DECL(B14, GPIOA0, MAC1LINK, SIG_DESC_SET(SCU80, 0));
+#define D14 1
+SSSF_PIN_DECL(D14, GPIOA1, MAC2LINK, SIG_DESC_SET(SCU80, 1));
+
+#define D13 2
+SIG_EXPR_LIST_DECL_SINGLE(SPI1CS1, SPI1CS1, SIG_DESC_SET(SCU80, 15));
+SIG_EXPR_LIST_DECL_SINGLE(TIMER3, TIMER3, SIG_DESC_SET(SCU80, 2));
+MS_PIN_DECL(D13, GPIOA2, SPI1CS1, TIMER3);
+FUNC_GROUP_DECL(SPI1CS1, D13);
+FUNC_GROUP_DECL(TIMER3, D13);
+
#define E13 3
SSSF_PIN_DECL(E13, GPIOA3, TIMER4, SIG_DESC_SET(SCU80, 3));
@@ -71,6 +86,32 @@ FUNC_GROUP_DECL(TIMER8, B13);
FUNC_GROUP_DECL(MDIO2, C13, B13);
+#define K19 8
+GPIO_PIN_DECL(K19, GPIOB0);
+
+#define L19 9
+GPIO_PIN_DECL(L19, GPIOB1);
+
+#define L18 10
+GPIO_PIN_DECL(L18, GPIOB2);
+
+#define K18 11
+GPIO_PIN_DECL(K18, GPIOB3);
+
+#define J20 12
+SSSF_PIN_DECL(J20, GPIOB4, USBCKI, SIG_DESC_SET(HW_STRAP1, 23));
+
+#define H21 13
+#define H21_DESC SIG_DESC_SET(SCU80, 13)
+SIG_EXPR_LIST_DECL_SINGLE(LPCPD, LPCPD, H21_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LPCSMI, LPCSMI, H21_DESC);
+MS_PIN_DECL(H21, GPIOB5, LPCPD, LPCSMI);
+FUNC_GROUP_DECL(LPCPD, H21);
+FUNC_GROUP_DECL(LPCSMI, H21);
+
+#define H22 14
+SSSF_PIN_DECL(H22, GPIOB6, LPCPME, SIG_DESC_SET(SCU80, 14));
+
#define H20 15
GPIO_PIN_DECL(H20, GPIOB7);
@@ -167,7 +208,44 @@ MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
FUNC_GROUP_DECL(GPID2, F20, D20);
-#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
+#define GPID4_DESC SIG_DESC_SET(SCU8C, 10)
+
+#define D21 28
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT2, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4IN, GPID4, GPID);
+MS_PIN_DECL(D21, GPIOD4, SD2DAT2, GPID4IN);
+
+#define E20 29
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT3, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4OUT, GPID4, GPID);
+MS_PIN_DECL(E20, GPIOD5, SD2DAT3, GPID4OUT);
+
+FUNC_GROUP_DECL(GPID4, D21, E20);
+
+#define GPID6_DESC SIG_DESC_SET(SCU8C, 11)
+
+#define G18 30
+SIG_EXPR_LIST_DECL_SINGLE(SD2CD, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6IN, GPID6, GPID);
+MS_PIN_DECL(G18, GPIOD6, SD2CD, GPID6IN);
+
+#define C21 31
+SIG_EXPR_LIST_DECL_SINGLE(SD2WP, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6OUT, GPID6, GPID);
+MS_PIN_DECL(C21, GPIOD7, SD2WP, GPID6OUT);
+
+FUNC_GROUP_DECL(GPID6, G18, C21);
+FUNC_GROUP_DECL(SD2, F19, E21, F20, D20, D21, E20, G18, C21);
+
+#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 22)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
#define B20 32
@@ -176,6 +254,7 @@ SIG_EXPR_DECL(GPIE0IN, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0IN, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0IN, GPIE0, GPIE);
MS_PIN_DECL(B20, GPIOE0, NCTS3, GPIE0IN);
+FUNC_GROUP_DECL(NCTS3, B20);
#define C20 33
SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
@@ -183,12 +262,233 @@ SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
+FUNC_GROUP_DECL(NDCD3, C20);
FUNC_GROUP_DECL(GPIE0, B20, C20);
-#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 }
-#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 }
-#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 }
+#define GPIE2_DESC SIG_DESC_SET(SCU8C, 13)
+
+#define F18 34
+SIG_EXPR_LIST_DECL_SINGLE(NDSR3, NDSR3, SIG_DESC_SET(SCU80, 18));
+SIG_EXPR_DECL(GPIE2IN, GPIE2, GPIE2_DESC);
+SIG_EXPR_DECL(GPIE2IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE2IN, GPIE2, GPIE);
+MS_PIN_DECL(F18, GPIOE2, NDSR3, GPIE2IN);
+FUNC_GROUP_DECL(NDSR3, F18);
+
+
+#define F17 35
+SIG_EXPR_LIST_DECL_SINGLE(NRI3, NRI3, SIG_DESC_SET(SCU80, 19));
+SIG_EXPR_DECL(GPIE2OUT, GPIE2, GPIE2_DESC);
+SIG_EXPR_DECL(GPIE2OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE2OUT, GPIE2, GPIE);
+MS_PIN_DECL(F17, GPIOE3, NRI3, GPIE2OUT);
+FUNC_GROUP_DECL(NRI3, F17);
+
+FUNC_GROUP_DECL(GPIE2, F18, F17);
+
+#define GPIE4_DESC SIG_DESC_SET(SCU8C, 14)
+
+#define E18 36
+SIG_EXPR_LIST_DECL_SINGLE(NDTR3, NDTR3, SIG_DESC_SET(SCU80, 20));
+SIG_EXPR_DECL(GPIE4IN, GPIE4, GPIE4_DESC);
+SIG_EXPR_DECL(GPIE4IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE4IN, GPIE4, GPIE);
+MS_PIN_DECL(E18, GPIOE4, NDTR3, GPIE4IN);
+FUNC_GROUP_DECL(NDTR3, E18);
+
+#define D19 37
+SIG_EXPR_LIST_DECL_SINGLE(NRTS3, NRTS3, SIG_DESC_SET(SCU80, 21));
+SIG_EXPR_DECL(GPIE4OUT, GPIE4, GPIE4_DESC);
+SIG_EXPR_DECL(GPIE4OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE4OUT, GPIE4, GPIE);
+MS_PIN_DECL(D19, GPIOE5, NRTS3, GPIE4OUT);
+FUNC_GROUP_DECL(NRTS3, D19);
+
+FUNC_GROUP_DECL(GPIE4, E18, D19);
+
+#define GPIE6_DESC SIG_DESC_SET(SCU8C, 15)
+
+#define A20 38
+SIG_EXPR_LIST_DECL_SINGLE(TXD3, TXD3, SIG_DESC_SET(SCU80, 22));
+SIG_EXPR_DECL(GPIE6IN, GPIE6, GPIE6_DESC);
+SIG_EXPR_DECL(GPIE6IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE6IN, GPIE6, GPIE);
+MS_PIN_DECL(A20, GPIOE6, TXD3, GPIE6IN);
+FUNC_GROUP_DECL(TXD3, A20);
+
+#define B19 39
+SIG_EXPR_LIST_DECL_SINGLE(RXD3, RXD3, SIG_DESC_SET(SCU80, 23));
+SIG_EXPR_DECL(GPIE6OUT, GPIE6, GPIE6_DESC);
+SIG_EXPR_DECL(GPIE6OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE6OUT, GPIE6, GPIE);
+MS_PIN_DECL(B19, GPIOE7, RXD3, GPIE6OUT);
+FUNC_GROUP_DECL(RXD3, B19);
+
+FUNC_GROUP_DECL(GPIE6, A20, B19);
+
+#define LPCHC_DESC SIG_DESC_IP_SET(ASPEED_IP_LPC, LHCR0, 0)
+#define LPCPLUS_DESC SIG_DESC_SET(SCU90, 30)
+
+#define J19 40
+SIG_EXPR_DECL(LHAD0, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD0, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD0, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NCTS4, NCTS4, SIG_DESC_SET(SCU80, 24));
+MS_PIN_DECL(J19, GPIOF0, LHAD0, NCTS4);
+FUNC_GROUP_DECL(NCTS4, J19);
+
+#define J18 41
+SIG_EXPR_DECL(LHAD1, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD1, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD1, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NDCD4, NDCD4, SIG_DESC_SET(SCU80, 25));
+MS_PIN_DECL(J18, GPIOF1, LHAD1, NDCD4);
+FUNC_GROUP_DECL(NDCD4, J18);
+
+#define B22 42
+SIG_EXPR_DECL(LHAD2, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD2, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD2, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NDSR4, NDSR4, SIG_DESC_SET(SCU80, 26));
+MS_PIN_DECL(B22, GPIOF2, LHAD2, NDSR4);
+FUNC_GROUP_DECL(NDSR4, B22);
+
+#define B21 43
+SIG_EXPR_DECL(LHAD3, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD3, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD3, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NRI4, NRI4, SIG_DESC_SET(SCU80, 27));
+MS_PIN_DECL(B21, GPIOF3, LHAD3, NRI4);
+FUNC_GROUP_DECL(NRI4, B21);
+
+#define A21 44
+SIG_EXPR_DECL(LHCLK, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHCLK, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHCLK, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NDTR4, NDTR4, SIG_DESC_SET(SCU80, 28));
+MS_PIN_DECL(A21, GPIOF4, LHCLK, NDTR4);
+FUNC_GROUP_DECL(NDTR4, A21);
+
+#define H19 45
+SIG_EXPR_DECL(LHFRAME, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHFRAME, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHFRAME, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NRTS4, NRTS4, SIG_DESC_SET(SCU80, 29));
+MS_PIN_DECL(H19, GPIOF5, LHFRAME, NRTS4);
+FUNC_GROUP_DECL(NRTS4, H19);
+
+#define G17 46
+SIG_EXPR_LIST_DECL_SINGLE(LHSIRQ, LPCHC, LPCHC_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TXD4, TXD4, SIG_DESC_SET(SCU80, 30));
+MS_PIN_DECL(G17, GPIOF6, LHSIRQ, TXD4);
+FUNC_GROUP_DECL(TXD4, G17);
+
+#define H18 47
+SIG_EXPR_DECL(LHRST, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHRST, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHRST, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(RXD4, RXD4, SIG_DESC_SET(SCU80, 31));
+MS_PIN_DECL(H18, GPIOF7, LHRST, RXD4);
+FUNC_GROUP_DECL(RXD4, H18);
+
+FUNC_GROUP_DECL(LPCHC, J19, J18, B22, B21, A21, H19, G17, H18);
+FUNC_GROUP_DECL(LPCPLUS, J19, J18, B22, B21, A21, H19, H18);
+
+#define A19 48
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1CK, SGPS1, COND1, SIG_DESC_SET(SCU84, 0));
+SS_PIN_DECL(A19, GPIOG0, SGPS1CK);
+
+#define E19 49
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1LD, SGPS1, COND1, SIG_DESC_SET(SCU84, 1));
+SS_PIN_DECL(E19, GPIOG1, SGPS1LD);
+
+#define C19 50
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1I0, SGPS1, COND1, SIG_DESC_SET(SCU84, 2));
+SS_PIN_DECL(C19, GPIOG2, SGPS1I0);
+
+#define E16 51
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1I1, SGPS1, COND1, SIG_DESC_SET(SCU84, 3));
+SS_PIN_DECL(E16, GPIOG3, SGPS1I1);
+
+FUNC_GROUP_DECL(SGPS1, A19, E19, C19, E16);
+
+#define SGPS2_DESC SIG_DESC_SET(SCU94, 12)
+
+#define E17 52
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2CK, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT1, SALT1, COND1, SIG_DESC_SET(SCU84, 4));
+MS_PIN_DECL(E17, GPIOG4, SGPS2CK, SALT1);
+FUNC_GROUP_DECL(SALT1, E17);
+
+#define D16 53
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2LD, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT2, SALT2, COND1, SIG_DESC_SET(SCU84, 5));
+MS_PIN_DECL(D16, GPIOG5, SGPS2LD, SALT2);
+FUNC_GROUP_DECL(SALT2, D16);
+
+#define D15 54
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2I0, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT3, SALT3, COND1, SIG_DESC_SET(SCU84, 6));
+MS_PIN_DECL(D15, GPIOG6, SGPS2I0, SALT3);
+FUNC_GROUP_DECL(SALT3, D15);
+
+#define E14 55
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2I1, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT4, SALT4, COND1, SIG_DESC_SET(SCU84, 7));
+MS_PIN_DECL(E14, GPIOG7, SGPS2I1, SALT4);
+FUNC_GROUP_DECL(SALT4, E14);
+
+FUNC_GROUP_DECL(SGPS2, E17, D16, D15, E14);
+
+#define UART6_DESC SIG_DESC_SET(SCU90, 7)
+
+#define A18 56
+SIG_EXPR_LIST_DECL_SINGLE(DASHA18, DASHA18, COND1, SIG_DESC_SET(SCU94, 5));
+SIG_EXPR_LIST_DECL_SINGLE(NCTS6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(A18, GPIOH0, DASHA18, NCTS6);
+
+#define B18 57
+SIG_EXPR_LIST_DECL_SINGLE(DASHB18, DASHB18, COND1, SIG_DESC_SET(SCU94, 5));
+SIG_EXPR_LIST_DECL_SINGLE(NDCD6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(B18, GPIOH1, DASHB18, NDCD6);
+
+#define D17 58
+SIG_EXPR_LIST_DECL_SINGLE(DASHD17, DASHD17, COND1, SIG_DESC_SET(SCU94, 6));
+SIG_EXPR_LIST_DECL_SINGLE(NDSR6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(D17, GPIOH2, DASHD17, NDSR6);
+
+#define C17 59
+SIG_EXPR_LIST_DECL_SINGLE(DASHC17, DASHC17, COND1, SIG_DESC_SET(SCU94, 6));
+SIG_EXPR_LIST_DECL_SINGLE(NRI6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(C17, GPIOH3, DASHC17, NRI6);
+
+#define A17 60
+SIG_EXPR_LIST_DECL_SINGLE(DASHA17, DASHA17, COND1, SIG_DESC_SET(SCU94, 7));
+SIG_EXPR_LIST_DECL_SINGLE(NDTR6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(A17, GPIOH4, DASHA17, NDTR6);
+
+#define B17 61
+SIG_EXPR_LIST_DECL_SINGLE(DASHB17, DASHB17, COND1, SIG_DESC_SET(SCU94, 7));
+SIG_EXPR_LIST_DECL_SINGLE(NRTS6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(B17, GPIOH5, DASHB17, NRTS6);
+
+#define A16 62
+SIG_EXPR_LIST_DECL_SINGLE(TXD6, UART6, COND1, UART6_DESC);
+SS_PIN_DECL(A16, GPIOH6, TXD6);
+
+#define D18 63
+SIG_EXPR_LIST_DECL_SINGLE(RXD6, UART6, COND1, UART6_DESC);
+SS_PIN_DECL(D18, GPIOH7, RXD6);
+
+FUNC_GROUP_DECL(UART6, A18, B18, D17, C17, A17, B17, A16, D18);
+
+#define SPI1_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 3, 0 }
#define C18 64
SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
@@ -277,6 +577,30 @@ SS_PIN_DECL(N3, GPIOJ2, SGPMO);
SIG_EXPR_LIST_DECL_SINGLE(SGPMI, SGPM, SIG_DESC_SET(SCU84, 11));
SS_PIN_DECL(N4, GPIOJ3, SGPMI);
+#define N5 76
+SIG_EXPR_LIST_DECL_SINGLE(VGAHS, VGAHS, SIG_DESC_SET(SCU84, 12));
+SIG_EXPR_LIST_DECL_SINGLE(DASHN5, DASHN5, SIG_DESC_SET(SCU94, 8));
+MS_PIN_DECL(N5, GPIOJ4, VGAHS, DASHN5);
+FUNC_GROUP_DECL(VGAHS, N5);
+
+#define R4 77
+SIG_EXPR_LIST_DECL_SINGLE(VGAVS, VGAVS, SIG_DESC_SET(SCU84, 13));
+SIG_EXPR_LIST_DECL_SINGLE(DASHR4, DASHR4, SIG_DESC_SET(SCU94, 8));
+MS_PIN_DECL(R4, GPIOJ5, VGAVS, DASHR4);
+FUNC_GROUP_DECL(VGAVS, R4);
+
+#define R3 78
+SIG_EXPR_LIST_DECL_SINGLE(DDCCLK, DDCCLK, SIG_DESC_SET(SCU84, 14));
+SIG_EXPR_LIST_DECL_SINGLE(DASHR3, DASHR3, SIG_DESC_SET(SCU94, 9));
+MS_PIN_DECL(R3, GPIOJ6, DDCCLK, DASHR3);
+FUNC_GROUP_DECL(DDCCLK, R3);
+
+#define T3 79
+SIG_EXPR_LIST_DECL_SINGLE(DDCDAT, DDCDAT, SIG_DESC_SET(SCU84, 15));
+SIG_EXPR_LIST_DECL_SINGLE(DASHT3, DASHT3, SIG_DESC_SET(SCU94, 9));
+MS_PIN_DECL(T3, GPIOJ7, DDCDAT, DASHT3);
+FUNC_GROUP_DECL(DDCDAT, T3);
+
#define I2C5_DESC SIG_DESC_SET(SCU90, 18)
#define L3 80
@@ -325,10 +649,119 @@ SS_PIN_DECL(R1, GPIOK7, SDA8);
FUNC_GROUP_DECL(I2C8, P2, R1);
-#define VPIOFF0_DESC { SCU90, GENMASK(5, 4), 0, 0 }
-#define VPIOFF1_DESC { SCU90, GENMASK(5, 4), 1, 0 }
-#define VPI24_DESC { SCU90, GENMASK(5, 4), 2, 0 }
-#define VPIRSVD_DESC { SCU90, GENMASK(5, 4), 3, 0 }
+#define T2 88
+SSSF_PIN_DECL(T2, GPIOL0, NCTS1, SIG_DESC_SET(SCU84, 16));
+
+#define VPIOFF0_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 0, 0 }
+#define VPIOFF1_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 1, 0 }
+#define VPI24_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 2, 0 }
+#define VPIRSVD_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 3, 0 }
+#define VPI_24_RSVD_DESC SIG_DESC_SET(SCU90, 5)
+
+#define T1 89
+#define T1_DESC SIG_DESC_SET(SCU84, 17)
+SIG_EXPR_LIST_DECL_SINGLE(VPIDE, VPI24, VPI_24_RSVD_DESC, T1_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDCD1, NDCD1, T1_DESC, COND2);
+MS_PIN_DECL(T1, GPIOL1, VPIDE, NDCD1);
+FUNC_GROUP_DECL(NDCD1, T1);
+
+#define U1 90
+#define U1_DESC SIG_DESC_SET(SCU84, 18)
+SIG_EXPR_LIST_DECL_SINGLE(DASHU1, VPI24, VPI_24_RSVD_DESC, U1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NDSR1, NDSR1, U1_DESC);
+MS_PIN_DECL(U1, GPIOL2, DASHU1, NDSR1);
+FUNC_GROUP_DECL(NDSR1, U1);
+
+#define U2 91
+#define U2_DESC SIG_DESC_SET(SCU84, 19)
+SIG_EXPR_LIST_DECL_SINGLE(VPIHS, VPI24, VPI_24_RSVD_DESC, U2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRI1, NRI1, U2_DESC, COND2);
+MS_PIN_DECL(U2, GPIOL3, VPIHS, NRI1);
+FUNC_GROUP_DECL(NRI1, U2);
+
+#define P4 92
+#define P4_DESC SIG_DESC_SET(SCU84, 20)
+SIG_EXPR_LIST_DECL_SINGLE(VPIVS, VPI24, VPI_24_RSVD_DESC, P4_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDTR1, NDTR1, P4_DESC, COND2);
+MS_PIN_DECL(P4, GPIOL4, VPIVS, NDTR1);
+FUNC_GROUP_DECL(NDTR1, P4);
+
+#define P3 93
+#define P3_DESC SIG_DESC_SET(SCU84, 21)
+SIG_EXPR_LIST_DECL_SINGLE(VPICLK, VPI24, VPI_24_RSVD_DESC, P3_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRTS1, NRTS1, P3_DESC, COND2);
+MS_PIN_DECL(P3, GPIOL5, VPICLK, NRTS1);
+FUNC_GROUP_DECL(NRTS1, P3);
+
+#define V1 94
+#define V1_DESC SIG_DESC_SET(SCU84, 22)
+SIG_EXPR_LIST_DECL_SINGLE(DASHV1, DASHV1, VPIRSVD_DESC, V1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TXD1, TXD1, V1_DESC, COND2);
+MS_PIN_DECL(V1, GPIOL6, DASHV1, TXD1);
+FUNC_GROUP_DECL(TXD1, V1);
+
+#define W1 95
+#define W1_DESC SIG_DESC_SET(SCU84, 23)
+SIG_EXPR_LIST_DECL_SINGLE(DASHW1, DASHW1, VPIRSVD_DESC, W1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RXD1, RXD1, W1_DESC, COND2);
+MS_PIN_DECL(W1, GPIOL7, DASHW1, RXD1);
+FUNC_GROUP_DECL(RXD1, W1);
+
+#define Y1 96
+#define Y1_DESC SIG_DESC_SET(SCU84, 24)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB2, VPI24, VPI_24_RSVD_DESC, Y1_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NCTS2, NCTS2, Y1_DESC, COND2);
+MS_PIN_DECL(Y1, GPIOM0, VPIB2, NCTS2);
+FUNC_GROUP_DECL(NCTS2, Y1);
+
+#define AB2 97
+#define AB2_DESC SIG_DESC_SET(SCU84, 25)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB3, VPI24, VPI_24_RSVD_DESC, AB2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDCD2, NDCD2, AB2_DESC, COND2);
+MS_PIN_DECL(AB2, GPIOM1, VPIB3, NDCD2);
+FUNC_GROUP_DECL(NDCD2, AB2);
+
+#define AA1 98
+#define AA1_DESC SIG_DESC_SET(SCU84, 26)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB4, VPI24, VPI_24_RSVD_DESC, AA1_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDSR2, NDSR2, AA1_DESC, COND2);
+MS_PIN_DECL(AA1, GPIOM2, VPIB4, NDSR2);
+FUNC_GROUP_DECL(NDSR2, AA1);
+
+#define Y2 99
+#define Y2_DESC SIG_DESC_SET(SCU84, 27)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB5, VPI24, VPI_24_RSVD_DESC, Y2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRI2, NRI2, Y2_DESC, COND2);
+MS_PIN_DECL(Y2, GPIOM3, VPIB5, NRI2);
+FUNC_GROUP_DECL(NRI2, Y2);
+
+#define AA2 100
+#define AA2_DESC SIG_DESC_SET(SCU84, 28)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB6, VPI24, VPI_24_RSVD_DESC, AA2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDTR2, NDTR2, AA2_DESC, COND2);
+MS_PIN_DECL(AA2, GPIOM4, VPIB6, NDTR2);
+FUNC_GROUP_DECL(NDTR2, AA2);
+
+#define P5 101
+#define P5_DESC SIG_DESC_SET(SCU84, 29)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB7, VPI24, VPI_24_RSVD_DESC, P5_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRTS2, NRTS2, P5_DESC, COND2);
+MS_PIN_DECL(P5, GPIOM5, VPIB7, NRTS2);
+FUNC_GROUP_DECL(NRTS2, P5);
+
+#define R5 102
+#define R5_DESC SIG_DESC_SET(SCU84, 30)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB8, VPI24, VPI_24_RSVD_DESC, R5_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(TXD2, TXD2, R5_DESC, COND2);
+MS_PIN_DECL(R5, GPIOM6, VPIB8, TXD2);
+FUNC_GROUP_DECL(TXD2, R5);
+
+#define T5 103
+#define T5_DESC SIG_DESC_SET(SCU84, 31)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB9, VPI24, VPI_24_RSVD_DESC, T5_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(RXD2, RXD2, T5_DESC, COND2);
+MS_PIN_DECL(T5, GPIOM7, VPIB9, RXD2);
+FUNC_GROUP_DECL(RXD2, T5);
#define V2 104
#define V2_DESC SIG_DESC_SET(SCU88, 0)
@@ -394,9 +827,88 @@ SIG_EXPR_LIST_DECL_SINGLE(PWM7, PWM7, T4_DESC, COND2);
MS_PIN_DECL(T4, GPION7, VPIG7, PWM7);
FUNC_GROUP_DECL(PWM7, T4);
+#define U5 112
+SIG_EXPR_LIST_DECL_SINGLE(VPIG8, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 8),
+ COND2);
+SS_PIN_DECL(U5, GPIOO0, VPIG8);
+
+#define U4 113
+SIG_EXPR_LIST_DECL_SINGLE(VPIG9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 9),
+ COND2);
+SS_PIN_DECL(U4, GPIOO1, VPIG9);
+
+#define V5 114
+SIG_EXPR_LIST_DECL_SINGLE(DASHV5, DASHV5, VPI_24_RSVD_DESC,
+ SIG_DESC_SET(SCU88, 10));
+SS_PIN_DECL(V5, GPIOO2, DASHV5);
+
+#define AB4 115
+SIG_EXPR_LIST_DECL_SINGLE(DASHAB4, DASHAB4, VPI_24_RSVD_DESC,
+ SIG_DESC_SET(SCU88, 11));
+SS_PIN_DECL(AB4, GPIOO3, DASHAB4);
+
+#define AB3 116
+SIG_EXPR_LIST_DECL_SINGLE(VPIR2, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 12),
+ COND2);
+SS_PIN_DECL(AB3, GPIOO4, VPIR2);
+
+#define Y4 117
+SIG_EXPR_LIST_DECL_SINGLE(VPIR3, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 13),
+ COND2);
+SS_PIN_DECL(Y4, GPIOO5, VPIR3);
+
+#define AA4 118
+SIG_EXPR_LIST_DECL_SINGLE(VPIR4, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 14),
+ COND2);
+SS_PIN_DECL(AA4, GPIOO6, VPIR4);
+
+#define W4 119
+SIG_EXPR_LIST_DECL_SINGLE(VPIR5, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 15),
+ COND2);
+SS_PIN_DECL(W4, GPIOO7, VPIR5);
+
+#define V4 120
+SIG_EXPR_LIST_DECL_SINGLE(VPIR6, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 16),
+ COND2);
+SS_PIN_DECL(V4, GPIOP0, VPIR6);
+
+#define W5 121
+SIG_EXPR_LIST_DECL_SINGLE(VPIR7, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 17),
+ COND2);
+SS_PIN_DECL(W5, GPIOP1, VPIR7);
+
+#define AA5 122
+SIG_EXPR_LIST_DECL_SINGLE(VPIR8, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 18),
+ COND2);
+SS_PIN_DECL(AA5, GPIOP2, VPIR8);
+
+#define AB5 123
+SIG_EXPR_LIST_DECL_SINGLE(VPIR9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 19),
+ COND2);
+SS_PIN_DECL(AB5, GPIOP3, VPIR9);
+
+FUNC_GROUP_DECL(VPI24, T1, U2, P4, P3, Y1, AB2, AA1, Y2, AA2, P5, R5, T5, V3,
+ U3, W3, AA3, Y3, T4, U5, U4, AB3, Y4, AA4, W4, V4, W5, AA5,
+ AB5);
+
+#define Y6 124
+SIG_EXPR_LIST_DECL_SINGLE(DASHY6, DASHY6, SIG_DESC_SET(SCU90, 28),
+ SIG_DESC_SET(SCU88, 20));
+SS_PIN_DECL(Y6, GPIOP4, DASHY6);
+
+#define Y5 125
+SIG_EXPR_LIST_DECL_SINGLE(DASHY5, DASHY5, SIG_DESC_SET(SCU90, 28),
+ SIG_DESC_SET(SCU88, 21));
+SS_PIN_DECL(Y5, GPIOP5, DASHY5);
+
+#define W6 126
+SIG_EXPR_LIST_DECL_SINGLE(DASHW6, DASHW6, SIG_DESC_SET(SCU90, 28),
+ SIG_DESC_SET(SCU88, 22));
+SS_PIN_DECL(W6, GPIOP6, DASHW6);
+
#define V6 127
SIG_EXPR_LIST_DECL_SINGLE(DASHV6, DASHV6, SIG_DESC_SET(SCU90, 28),
- SIG_DESC_SET(SCU88, 23));
+ SIG_DESC_SET(SCU88, 23));
SS_PIN_DECL(V6, GPIOP7, DASHV6);
#define I2C3_DESC SIG_DESC_SET(SCU90, 16)
@@ -441,6 +953,24 @@ SSSF_PIN_DECL(B10, GPIOQ6, OSCCLK, SIG_DESC_SET(SCU2C, 1));
#define N20 135
SSSF_PIN_DECL(N20, GPIOQ7, PEWAKE, SIG_DESC_SET(SCU2C, 29));
+#define AA19 136
+SSSF_PIN_DECL(AA19, GPIOR0, FWSPICS1, SIG_DESC_SET(SCU88, 24), COND2);
+
+#define T19 137
+SSSF_PIN_DECL(T19, GPIOR1, FWSPICS2, SIG_DESC_SET(SCU88, 25), COND2);
+
+#define T17 138
+SSSF_PIN_DECL(T17, GPIOR2, SPI2CS0, SIG_DESC_SET(SCU88, 26), COND2);
+
+#define Y19 139
+SSSF_PIN_DECL(Y19, GPIOR3, SPI2CK, SIG_DESC_SET(SCU88, 27), COND2);
+
+#define W19 140
+SSSF_PIN_DECL(W19, GPIOR4, SPI2MOSI, SIG_DESC_SET(SCU88, 28), COND2);
+
+#define V19 141
+SSSF_PIN_DECL(V19, GPIOR5, SPI2MISO, SIG_DESC_SET(SCU88, 29), COND2);
+
#define D8 142
SIG_EXPR_LIST_DECL_SINGLE(MDC1, MDIO1, SIG_DESC_SET(SCU88, 30));
SS_PIN_DECL(D8, GPIOR6, MDC1);
@@ -451,6 +981,93 @@ SS_PIN_DECL(E10, GPIOR7, MDIO1);
FUNC_GROUP_DECL(MDIO1, D8, E10);
+#define VPOOFF0_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 0, 0 }
+#define VPO_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 1, 0 }
+#define VPOOFF1_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 2, 0 }
+#define VPOOFF2_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 3, 0 }
+
+#define CRT_DVO_EN_DESC SIG_DESC_IP_SET(ASPEED_IP_GFX, GFX064, 7)
+
+#define V20 144
+#define V20_DESC SIG_DESC_SET(SCU8C, 0)
+SIG_EXPR_DECL(VPOB2, VPO, V20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB2, VPOOFF1, V20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB2, VPOOFF2, V20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB2, SIG_EXPR_PTR(VPOB2, VPO),
+ SIG_EXPR_PTR(VPOB2, VPOOFF1), SIG_EXPR_PTR(VPOB2, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SPI2CS1, SPI2CS1, V20_DESC);
+MS_PIN_DECL(V20, GPIOS0, VPOB2, SPI2CS1);
+FUNC_GROUP_DECL(SPI2CS1, V20);
+
+#define U19 145
+#define U19_DESC SIG_DESC_SET(SCU8C, 1)
+SIG_EXPR_DECL(VPOB3, VPO, U19_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB3, VPOOFF1, U19_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB3, VPOOFF2, U19_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB3, SIG_EXPR_PTR(VPOB3, VPO),
+ SIG_EXPR_PTR(VPOB3, VPOOFF1), SIG_EXPR_PTR(VPOB3, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(BMCINT, BMCINT, U19_DESC);
+MS_PIN_DECL(U19, GPIOS1, VPOB3, BMCINT);
+FUNC_GROUP_DECL(BMCINT, U19);
+
+#define R18 146
+#define R18_DESC SIG_DESC_SET(SCU8C, 2)
+SIG_EXPR_DECL(VPOB4, VPO, R18_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB4, VPOOFF1, R18_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB4, VPOOFF2, R18_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB4, SIG_EXPR_PTR(VPOB4, VPO),
+ SIG_EXPR_PTR(VPOB4, VPOOFF1), SIG_EXPR_PTR(VPOB4, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT5, SALT5, R18_DESC);
+MS_PIN_DECL(R18, GPIOS2, VPOB4, SALT5);
+FUNC_GROUP_DECL(SALT5, R18);
+
+#define P18 147
+#define P18_DESC SIG_DESC_SET(SCU8C, 3)
+SIG_EXPR_DECL(VPOB5, VPO, P18_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB5, VPOOFF1, P18_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB5, VPOOFF2, P18_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB5, SIG_EXPR_PTR(VPOB5, VPO),
+ SIG_EXPR_PTR(VPOB5, VPOOFF1), SIG_EXPR_PTR(VPOB5, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT6, SALT6, P18_DESC);
+MS_PIN_DECL(P18, GPIOS3, VPOB5, SALT6);
+FUNC_GROUP_DECL(SALT6, P18);
+
+#define R19 148
+#define R19_DESC SIG_DESC_SET(SCU8C, 4)
+SIG_EXPR_DECL(VPOB6, VPO, R19_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB6, VPOOFF1, R19_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB6, VPOOFF2, R19_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB6, SIG_EXPR_PTR(VPOB6, VPO),
+ SIG_EXPR_PTR(VPOB6, VPOOFF1), SIG_EXPR_PTR(VPOB6, VPOOFF2));
+SS_PIN_DECL(R19, GPIOS4, VPOB6);
+
+#define W20 149
+#define W20_DESC SIG_DESC_SET(SCU8C, 5)
+SIG_EXPR_DECL(VPOB7, VPO, W20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB7, VPOOFF1, W20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB7, VPOOFF2, W20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB7, SIG_EXPR_PTR(VPOB7, VPO),
+ SIG_EXPR_PTR(VPOB7, VPOOFF1), SIG_EXPR_PTR(VPOB7, VPOOFF2));
+SS_PIN_DECL(W20, GPIOS5, VPOB7);
+
+#define U20 150
+#define U20_DESC SIG_DESC_SET(SCU8C, 6)
+SIG_EXPR_DECL(VPOB8, VPO, U20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB8, VPOOFF1, U20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB8, VPOOFF2, U20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB8, SIG_EXPR_PTR(VPOB8, VPO),
+ SIG_EXPR_PTR(VPOB8, VPOOFF1), SIG_EXPR_PTR(VPOB8, VPOOFF2));
+SS_PIN_DECL(U20, GPIOS6, VPOB8);
+
+#define AA20 151
+#define AA20_DESC SIG_DESC_SET(SCU8C, 7)
+SIG_EXPR_DECL(VPOB9, VPO, AA20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB9, VPOOFF1, AA20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB9, VPOOFF2, AA20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB9, SIG_EXPR_PTR(VPOB9, VPO),
+ SIG_EXPR_PTR(VPOB9, VPOOFF1), SIG_EXPR_PTR(VPOB9, VPOOFF2));
+SS_PIN_DECL(AA20, GPIOS7, VPOB9);
+
/* RGMII1/RMII1 */
#define RMII1_DESC SIG_DESC_BIT(HW_STRAP1, 6, 0)
@@ -632,6 +1249,481 @@ MS_PIN_DECL_(E6, SIG_EXPR_LIST_PTR(GPIOV7), SIG_EXPR_LIST_PTR(RMII2RXER),
FUNC_GROUP_DECL(RGMII2, B2, B1, A2, B3, D5, D4, C2, C1, C3, D1, D2, E6);
FUNC_GROUP_DECL(RMII2, B2, B1, A2, B3, C2, C3, D1, D2, E6);
+#define F4 176
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW0, GPIOW0, SIG_DESC_SET(SCUA0, 24));
+SIG_EXPR_LIST_DECL_SINGLE(ADC0, ADC0);
+MS_PIN_DECL_(F4, SIG_EXPR_LIST_PTR(GPIOW0), SIG_EXPR_LIST_PTR(ADC0));
+FUNC_GROUP_DECL(ADC0, F4);
+
+#define F5 177
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW1, GPIOW1, SIG_DESC_SET(SCUA0, 25));
+SIG_EXPR_LIST_DECL_SINGLE(ADC1, ADC1);
+MS_PIN_DECL_(F5, SIG_EXPR_LIST_PTR(GPIOW1), SIG_EXPR_LIST_PTR(ADC1));
+FUNC_GROUP_DECL(ADC1, F5);
+
+#define E2 178
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW2, GPIOW2, SIG_DESC_SET(SCUA0, 26));
+SIG_EXPR_LIST_DECL_SINGLE(ADC2, ADC2);
+MS_PIN_DECL_(E2, SIG_EXPR_LIST_PTR(GPIOW2), SIG_EXPR_LIST_PTR(ADC2));
+FUNC_GROUP_DECL(ADC2, E2);
+
+#define E1 179
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW3, GPIOW3, SIG_DESC_SET(SCUA0, 27));
+SIG_EXPR_LIST_DECL_SINGLE(ADC3, ADC3);
+MS_PIN_DECL_(E1, SIG_EXPR_LIST_PTR(GPIOW3), SIG_EXPR_LIST_PTR(ADC3));
+FUNC_GROUP_DECL(ADC3, E1);
+
+#define F3 180
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW4, GPIOW4, SIG_DESC_SET(SCUA0, 28));
+SIG_EXPR_LIST_DECL_SINGLE(ADC4, ADC4);
+MS_PIN_DECL_(F3, SIG_EXPR_LIST_PTR(GPIOW4), SIG_EXPR_LIST_PTR(ADC4));
+FUNC_GROUP_DECL(ADC4, F3);
+
+#define E3 181
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW5, GPIOW5, SIG_DESC_SET(SCUA0, 29));
+SIG_EXPR_LIST_DECL_SINGLE(ADC5, ADC5);
+MS_PIN_DECL_(E3, SIG_EXPR_LIST_PTR(GPIOW5), SIG_EXPR_LIST_PTR(ADC5));
+FUNC_GROUP_DECL(ADC5, E3);
+
+#define G5 182
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW6, GPIOW6, SIG_DESC_SET(SCUA0, 30));
+SIG_EXPR_LIST_DECL_SINGLE(ADC6, ADC6);
+MS_PIN_DECL_(G5, SIG_EXPR_LIST_PTR(GPIOW6), SIG_EXPR_LIST_PTR(ADC6));
+FUNC_GROUP_DECL(ADC6, G5);
+
+#define G4 183
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW7, GPIOW7, SIG_DESC_SET(SCUA0, 31));
+SIG_EXPR_LIST_DECL_SINGLE(ADC7, ADC7);
+MS_PIN_DECL_(G4, SIG_EXPR_LIST_PTR(GPIOW7), SIG_EXPR_LIST_PTR(ADC7));
+FUNC_GROUP_DECL(ADC7, G4);
+
+#define F2 184
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX0, GPIOX0, SIG_DESC_SET(SCUA4, 0));
+SIG_EXPR_LIST_DECL_SINGLE(ADC8, ADC8);
+MS_PIN_DECL_(F2, SIG_EXPR_LIST_PTR(GPIOX0), SIG_EXPR_LIST_PTR(ADC8));
+FUNC_GROUP_DECL(ADC8, F2);
+
+#define G3 185
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX1, GPIOX1, SIG_DESC_SET(SCUA4, 1));
+SIG_EXPR_LIST_DECL_SINGLE(ADC9, ADC9);
+MS_PIN_DECL_(G3, SIG_EXPR_LIST_PTR(GPIOX1), SIG_EXPR_LIST_PTR(ADC9));
+FUNC_GROUP_DECL(ADC9, G3);
+
+#define G2 186
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX2, GPIOX2, SIG_DESC_SET(SCUA4, 2));
+SIG_EXPR_LIST_DECL_SINGLE(ADC10, ADC10);
+MS_PIN_DECL_(G2, SIG_EXPR_LIST_PTR(GPIOX2), SIG_EXPR_LIST_PTR(ADC10));
+FUNC_GROUP_DECL(ADC10, G2);
+
+#define F1 187
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX3, GPIOX3, SIG_DESC_SET(SCUA4, 3));
+SIG_EXPR_LIST_DECL_SINGLE(ADC11, ADC11);
+MS_PIN_DECL_(F1, SIG_EXPR_LIST_PTR(GPIOX3), SIG_EXPR_LIST_PTR(ADC11));
+FUNC_GROUP_DECL(ADC11, F1);
+
+#define H5 188
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX4, GPIOX4, SIG_DESC_SET(SCUA4, 4));
+SIG_EXPR_LIST_DECL_SINGLE(ADC12, ADC12);
+MS_PIN_DECL_(H5, SIG_EXPR_LIST_PTR(GPIOX4), SIG_EXPR_LIST_PTR(ADC12));
+FUNC_GROUP_DECL(ADC12, H5);
+
+#define G1 189
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX5, GPIOX5, SIG_DESC_SET(SCUA4, 5));
+SIG_EXPR_LIST_DECL_SINGLE(ADC13, ADC13);
+MS_PIN_DECL_(G1, SIG_EXPR_LIST_PTR(GPIOX5), SIG_EXPR_LIST_PTR(ADC13));
+FUNC_GROUP_DECL(ADC13, G1);
+
+#define H3 190
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX6, GPIOX6, SIG_DESC_SET(SCUA4, 6));
+SIG_EXPR_LIST_DECL_SINGLE(ADC14, ADC14);
+MS_PIN_DECL_(H3, SIG_EXPR_LIST_PTR(GPIOX6), SIG_EXPR_LIST_PTR(ADC14));
+FUNC_GROUP_DECL(ADC14, H3);
+
+#define H4 191
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX7, GPIOX7, SIG_DESC_SET(SCUA4, 7));
+SIG_EXPR_LIST_DECL_SINGLE(ADC15, ADC15);
+MS_PIN_DECL_(H4, SIG_EXPR_LIST_PTR(GPIOX7), SIG_EXPR_LIST_PTR(ADC15));
+FUNC_GROUP_DECL(ADC15, H4);
+
+#define ACPI_DESC SIG_DESC_SET(HW_STRAP1, 19)
+
+#define R22 192
+SIG_EXPR_DECL(SIOS3, SIOS3, SIG_DESC_SET(SCUA4, 8));
+SIG_EXPR_DECL(SIOS3, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS3, SIOS3, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHR22, DASHR22, SIG_DESC_SET(SCU94, 10));
+MS_PIN_DECL(R22, GPIOY0, SIOS3, DASHR22);
+FUNC_GROUP_DECL(SIOS3, R22);
+
+#define R21 193
+SIG_EXPR_DECL(SIOS5, SIOS5, SIG_DESC_SET(SCUA4, 9));
+SIG_EXPR_DECL(SIOS5, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS5, SIOS5, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHR21, DASHR21, SIG_DESC_SET(SCU94, 10));
+MS_PIN_DECL(R21, GPIOY1, SIOS5, DASHR21);
+FUNC_GROUP_DECL(SIOS5, R21);
+
+#define P22 194
+SIG_EXPR_DECL(SIOPWREQ, SIOPWREQ, SIG_DESC_SET(SCUA4, 10));
+SIG_EXPR_DECL(SIOPWREQ, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWREQ, SIOPWREQ, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHP22, DASHP22, SIG_DESC_SET(SCU94, 11));
+MS_PIN_DECL(P22, GPIOY2, SIOPWREQ, DASHP22);
+FUNC_GROUP_DECL(SIOPWREQ, P22);
+
+#define P21 195
+SIG_EXPR_DECL(SIOONCTRL, SIOONCTRL, SIG_DESC_SET(SCUA4, 11));
+SIG_EXPR_DECL(SIOONCTRL, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOONCTRL, SIOONCTRL, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHP21, DASHP21, SIG_DESC_SET(SCU94, 11));
+MS_PIN_DECL(P21, GPIOY3, SIOONCTRL, DASHP21);
+FUNC_GROUP_DECL(SIOONCTRL, P21);
+
+#define M18 196
+SSSF_PIN_DECL(M18, GPIOY4, SCL1, SIG_DESC_SET(SCUA4, 12));
+
+#define M19 197
+SSSF_PIN_DECL(M19, GPIOY5, SDA1, SIG_DESC_SET(SCUA4, 13));
+
+#define M20 198
+SSSF_PIN_DECL(M20, GPIOY6, SCL2, SIG_DESC_SET(SCUA4, 14));
+
+#define P20 199
+SSSF_PIN_DECL(P20, GPIOY7, SDA2, SIG_DESC_SET(SCUA4, 15));
+
+#define PNOR_DESC SIG_DESC_SET(SCU90, 31)
+
+#define Y20 200
+#define Y20_DESC SIG_DESC_SET(SCUA4, 16)
+SIG_EXPR_DECL(VPOG2, VPO, Y20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG2, VPOOFF1, Y20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG2, VPOOFF2, Y20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG2, SIG_EXPR_PTR(VPOG2, VPO),
+ SIG_EXPR_PTR(VPOG2, VPOOFF1), SIG_EXPR_PTR(VPOG2, VPOOFF2));
+SIG_EXPR_DECL(SIOPBI, SIOPBI, Y20_DESC);
+SIG_EXPR_DECL(SIOPBI, ACPI, Y20_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPBI, SIOPBI, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA0, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ0, GPIOZ0);
+MS_PIN_DECL_(Y20, SIG_EXPR_LIST_PTR(VPOG2), SIG_EXPR_LIST_PTR(SIOPBI),
+ SIG_EXPR_LIST_PTR(NORA0), SIG_EXPR_LIST_PTR(GPIOZ0));
+FUNC_GROUP_DECL(SIOPBI, Y20);
+
+#define AB20 201
+#define AB20_DESC SIG_DESC_SET(SCUA4, 17)
+SIG_EXPR_DECL(VPOG3, VPO, AB20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG3, VPOOFF1, AB20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG3, VPOOFF2, AB20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG3, SIG_EXPR_PTR(VPOG3, VPO),
+ SIG_EXPR_PTR(VPOG3, VPOOFF1), SIG_EXPR_PTR(VPOG3, VPOOFF2));
+SIG_EXPR_DECL(SIOPWRGD, SIOPWRGD, AB20_DESC);
+SIG_EXPR_DECL(SIOPWRGD, ACPI, AB20_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWRGD, SIOPWRGD, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA1, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ1, GPIOZ1);
+MS_PIN_DECL_(AB20, SIG_EXPR_LIST_PTR(VPOG3), SIG_EXPR_LIST_PTR(SIOPWRGD),
+ SIG_EXPR_LIST_PTR(NORA1), SIG_EXPR_LIST_PTR(GPIOZ1));
+FUNC_GROUP_DECL(SIOPWRGD, AB20);
+
+#define AB21 202
+#define AB21_DESC SIG_DESC_SET(SCUA4, 18)
+SIG_EXPR_DECL(VPOG4, VPO, AB21_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG4, VPOOFF1, AB21_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG4, VPOOFF2, AB21_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG4, SIG_EXPR_PTR(VPOG4, VPO),
+ SIG_EXPR_PTR(VPOG4, VPOOFF1), SIG_EXPR_PTR(VPOG4, VPOOFF2));
+SIG_EXPR_DECL(SIOPBO, SIOPBO, AB21_DESC);
+SIG_EXPR_DECL(SIOPBO, ACPI, AB21_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPBO, SIOPBO, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA2, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ2, GPIOZ2);
+MS_PIN_DECL_(AB21, SIG_EXPR_LIST_PTR(VPOG4), SIG_EXPR_LIST_PTR(SIOPBO),
+ SIG_EXPR_LIST_PTR(NORA2), SIG_EXPR_LIST_PTR(GPIOZ2));
+FUNC_GROUP_DECL(SIOPBO, AB21);
+
+#define AA21 203
+#define AA21_DESC SIG_DESC_SET(SCUA4, 19)
+SIG_EXPR_DECL(VPOG5, VPO, AA21_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG5, VPOOFF1, AA21_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG5, VPOOFF2, AA21_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG5, SIG_EXPR_PTR(VPOG5, VPO),
+ SIG_EXPR_PTR(VPOG5, VPOOFF1), SIG_EXPR_PTR(VPOG5, VPOOFF2));
+SIG_EXPR_DECL(SIOSCI, SIOSCI, AA21_DESC);
+SIG_EXPR_DECL(SIOSCI, ACPI, AA21_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOSCI, SIOSCI, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA3, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ3, GPIOZ3);
+MS_PIN_DECL_(AA21, SIG_EXPR_LIST_PTR(VPOG5), SIG_EXPR_LIST_PTR(SIOSCI),
+ SIG_EXPR_LIST_PTR(NORA3), SIG_EXPR_LIST_PTR(GPIOZ3));
+FUNC_GROUP_DECL(SIOSCI, AA21);
+
+FUNC_GROUP_DECL(ACPI, R22, R21, P22, P21, Y20, AB20, AB21, AA21);
+
+/* CRT DVO disabled, configured for single-edge mode */
+#define CRT_DVO_DS_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 0, 0 }
+
+/* CRT DVO disabled, configured for dual-edge mode */
+#define CRT_DVO_DD_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 1, 1 }
+
+/* CRT DVO enabled, configured for single-edge mode */
+#define CRT_DVO_ES_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 2, 2 }
+
+/* CRT DVO enabled, configured for dual-edge mode */
+#define CRT_DVO_ED_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 3, 3 }
+
+#define U21 204
+#define U21_DESC SIG_DESC_SET(SCUA4, 20)
+SIG_EXPR_DECL(VPOG6, VPO, U21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG6, VPOOFF1, U21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG6, VPOOFF2, U21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG6, SIG_EXPR_PTR(VPOG6, VPO),
+ SIG_EXPR_PTR(VPOG6, VPOOFF1), SIG_EXPR_PTR(VPOG6, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA4, PNOR, PNOR_DESC);
+MS_PIN_DECL(U21, GPIOZ4, VPOG6, NORA4);
+
+#define W22 205
+#define W22_DESC SIG_DESC_SET(SCUA4, 21)
+SIG_EXPR_DECL(VPOG7, VPO, W22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG7, VPOOFF1, W22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG7, VPOOFF2, W22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG7, SIG_EXPR_PTR(VPOG7, VPO),
+ SIG_EXPR_PTR(VPOG7, VPOOFF1), SIG_EXPR_PTR(VPOG7, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA5, PNOR, PNOR_DESC);
+MS_PIN_DECL(W22, GPIOZ5, VPOG7, NORA5);
+
+#define V22 206
+#define V22_DESC SIG_DESC_SET(SCUA4, 22)
+SIG_EXPR_DECL(VPOG8, VPO, V22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG8, VPOOFF1, V22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG8, VPOOFF2, V22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG8, SIG_EXPR_PTR(VPOG8, VPO),
+ SIG_EXPR_PTR(VPOG8, VPOOFF1), SIG_EXPR_PTR(VPOG8, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA6, PNOR, PNOR_DESC);
+MS_PIN_DECL(V22, GPIOZ6, VPOG8, NORA6);
+
+#define W21 207
+#define W21_DESC SIG_DESC_SET(SCUA4, 23)
+SIG_EXPR_DECL(VPOG9, VPO, W21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG9, VPOOFF1, W21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG9, VPOOFF2, W21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG9, SIG_EXPR_PTR(VPOG9, VPO),
+ SIG_EXPR_PTR(VPOG9, VPOOFF1), SIG_EXPR_PTR(VPOG9, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA7, PNOR, PNOR_DESC);
+MS_PIN_DECL(W21, GPIOZ7, VPOG9, NORA7);
+
+#define Y21 208
+#define Y21_DESC SIG_DESC_SET(SCUA4, 24)
+SIG_EXPR_DECL(VPOR2, VPO, Y21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR2, VPOOFF1, Y21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR2, VPOOFF2, Y21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR2, SIG_EXPR_PTR(VPOR2, VPO),
+ SIG_EXPR_PTR(VPOR2, VPOOFF1), SIG_EXPR_PTR(VPOR2, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT7, SALT7, Y21_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD0, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA0, GPIOAA0);
+MS_PIN_DECL_(Y21, SIG_EXPR_LIST_PTR(VPOR2), SIG_EXPR_LIST_PTR(SALT7),
+ SIG_EXPR_LIST_PTR(NORD0), SIG_EXPR_LIST_PTR(GPIOAA0));
+FUNC_GROUP_DECL(SALT7, Y21);
+
+#define V21 209
+#define V21_DESC SIG_DESC_SET(SCUA4, 25)
+SIG_EXPR_DECL(VPOR3, VPO, V21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR3, VPOOFF1, V21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR3, VPOOFF2, V21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR3, SIG_EXPR_PTR(VPOR3, VPO),
+ SIG_EXPR_PTR(VPOR3, VPOOFF1), SIG_EXPR_PTR(VPOR3, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT8, SALT8, V21_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD1, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA1, GPIOAA1);
+MS_PIN_DECL_(V21, SIG_EXPR_LIST_PTR(VPOR3), SIG_EXPR_LIST_PTR(SALT8),
+ SIG_EXPR_LIST_PTR(NORD1), SIG_EXPR_LIST_PTR(GPIOAA1));
+FUNC_GROUP_DECL(SALT8, V21);
+
+#define Y22 210
+#define Y22_DESC SIG_DESC_SET(SCUA4, 26)
+SIG_EXPR_DECL(VPOR4, VPO, Y22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR4, VPOOFF1, Y22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR4, VPOOFF2, Y22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR4, SIG_EXPR_PTR(VPOR4, VPO),
+ SIG_EXPR_PTR(VPOR4, VPOOFF1), SIG_EXPR_PTR(VPOR4, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT9, SALT9, Y22_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD2, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA2, GPIOAA2);
+MS_PIN_DECL_(Y22, SIG_EXPR_LIST_PTR(VPOR4), SIG_EXPR_LIST_PTR(SALT9),
+ SIG_EXPR_LIST_PTR(NORD2), SIG_EXPR_LIST_PTR(GPIOAA2));
+FUNC_GROUP_DECL(SALT9, Y22);
+
+#define AA22 211
+#define AA22_DESC SIG_DESC_SET(SCUA4, 27)
+SIG_EXPR_DECL(VPOR5, VPO, AA22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR5, VPOOFF1, AA22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR5, VPOOFF2, AA22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR5, SIG_EXPR_PTR(VPOR5, VPO),
+ SIG_EXPR_PTR(VPOR5, VPOOFF1), SIG_EXPR_PTR(VPOR5, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT10, SALT10, AA22_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD3, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA3, GPIOAA3);
+MS_PIN_DECL_(AA22, SIG_EXPR_LIST_PTR(VPOR5), SIG_EXPR_LIST_PTR(SALT10),
+ SIG_EXPR_LIST_PTR(NORD3), SIG_EXPR_LIST_PTR(GPIOAA3));
+FUNC_GROUP_DECL(SALT10, AA22);
+
+#define U22 212
+#define U22_DESC SIG_DESC_SET(SCUA4, 28)
+SIG_EXPR_DECL(VPOR6, VPO, U22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR6, VPOOFF1, U22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR6, VPOOFF2, U22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR6, SIG_EXPR_PTR(VPOR6, VPO),
+ SIG_EXPR_PTR(VPOR6, VPOOFF1), SIG_EXPR_PTR(VPOR6, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT11, SALT11, U22_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD4, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA4, GPIOAA4);
+MS_PIN_DECL_(U22, SIG_EXPR_LIST_PTR(VPOR6), SIG_EXPR_LIST_PTR(SALT11),
+ SIG_EXPR_LIST_PTR(NORD4), SIG_EXPR_LIST_PTR(GPIOAA4));
+FUNC_GROUP_DECL(SALT11, U22);
+
+#define T20 213
+#define T20_DESC SIG_DESC_SET(SCUA4, 29)
+SIG_EXPR_DECL(VPOR7, VPO, T20_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR7, VPOOFF1, T20_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR7, VPOOFF2, T20_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR7, SIG_EXPR_PTR(VPOR7, VPO),
+ SIG_EXPR_PTR(VPOR7, VPOOFF1), SIG_EXPR_PTR(VPOR7, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT12, SALT12, T20_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD5, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA5, GPIOAA5);
+MS_PIN_DECL_(T20, SIG_EXPR_LIST_PTR(VPOR7), SIG_EXPR_LIST_PTR(SALT12),
+ SIG_EXPR_LIST_PTR(NORD5), SIG_EXPR_LIST_PTR(GPIOAA5));
+FUNC_GROUP_DECL(SALT12, T20);
+
+#define N18 214
+#define N18_DESC SIG_DESC_SET(SCUA4, 30)
+SIG_EXPR_DECL(VPOR8, VPO, N18_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR8, VPOOFF1, N18_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR8, VPOOFF2, N18_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR8, SIG_EXPR_PTR(VPOR8, VPO),
+ SIG_EXPR_PTR(VPOR8, VPOOFF1), SIG_EXPR_PTR(VPOR8, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT13, SALT13, N18_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD6, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA6, GPIOAA6);
+MS_PIN_DECL_(N18, SIG_EXPR_LIST_PTR(VPOR8), SIG_EXPR_LIST_PTR(SALT13),
+ SIG_EXPR_LIST_PTR(NORD6), SIG_EXPR_LIST_PTR(GPIOAA6));
+FUNC_GROUP_DECL(SALT13, N18);
+
+#define P19 215
+#define P19_DESC SIG_DESC_SET(SCUA4, 31)
+SIG_EXPR_DECL(VPOR9, VPO, P19_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR9, VPOOFF1, P19_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR9, VPOOFF2, P19_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR9, SIG_EXPR_PTR(VPOR9, VPO),
+ SIG_EXPR_PTR(VPOR9, VPOOFF1), SIG_EXPR_PTR(VPOR9, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT14, SALT14, P19_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD7, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA7, GPIOAA7);
+MS_PIN_DECL_(P19, SIG_EXPR_LIST_PTR(VPOR9), SIG_EXPR_LIST_PTR(SALT14),
+ SIG_EXPR_LIST_PTR(NORD7), SIG_EXPR_LIST_PTR(GPIOAA7));
+FUNC_GROUP_DECL(SALT14, P19);
+
+#define N19 216
+#define N19_DESC SIG_DESC_SET(SCUA8, 0)
+SIG_EXPR_DECL(VPODE, VPO, N19_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPODE, VPOOFF1, N19_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPODE, VPOOFF2, N19_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPODE, SIG_EXPR_PTR(VPODE, VPO),
+ SIG_EXPR_PTR(VPODE, VPOOFF1), SIG_EXPR_PTR(VPODE, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NOROE, PNOR, PNOR_DESC);
+MS_PIN_DECL(N19, GPIOAB0, VPODE, NOROE);
+
+#define T21 217
+#define T21_DESC SIG_DESC_SET(SCUA8, 1)
+SIG_EXPR_DECL(VPOHS, VPO, T21_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOHS, VPOOFF1, T21_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOHS, VPOOFF2, T21_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOHS, SIG_EXPR_PTR(VPOHS, VPO),
+ SIG_EXPR_PTR(VPOHS, VPOOFF1), SIG_EXPR_PTR(VPOHS, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORWE, PNOR, PNOR_DESC);
+MS_PIN_DECL(T21, GPIOAB1, VPOHS, NORWE);
+
+FUNC_GROUP_DECL(PNOR, Y20, AB20, AB21, AA21, U21, W22, V22, W21, Y21, V21, Y22,
+ AA22, U22, T20, N18, P19, N19, T21);
+
+#define T22 218
+#define T22_DESC SIG_DESC_SET(SCUA8, 2)
+SIG_EXPR_DECL(VPOVS, VPO, T22_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOVS, VPOOFF1, T22_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOVS, VPOOFF2, T22_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOVS, SIG_EXPR_PTR(VPOVS, VPO),
+ SIG_EXPR_PTR(VPOVS, VPOOFF1), SIG_EXPR_PTR(VPOVS, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST1, WDTRST1, T22_DESC);
+MS_PIN_DECL(T22, GPIOAB2, VPOVS, WDTRST1);
+FUNC_GROUP_DECL(WDTRST1, T22);
+
+#define R20 219
+#define R20_DESC SIG_DESC_SET(SCUA8, 3)
+SIG_EXPR_DECL(VPOCLK, VPO, R20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOCLK, VPOOFF1, R20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOCLK, VPOOFF2, R20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOCLK, SIG_EXPR_PTR(VPOCLK, VPO),
+ SIG_EXPR_PTR(VPOCLK, VPOOFF1), SIG_EXPR_PTR(VPOCLK, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST2, WDTRST2, R20_DESC);
+MS_PIN_DECL(R20, GPIOAB3, VPOCLK, WDTRST2);
+FUNC_GROUP_DECL(WDTRST2, R20);
+
+FUNC_GROUP_DECL(VPO, V20, U19, R18, P18, R19, W20, U20, AA20, Y20, AB20,
+ AB21, AA21, U21, W22, V22, W21, Y21, V21, Y22, AA22, U22, T20,
+ N18, P19, N19, T21, T22, R20);
+
+#define ESPI_DESC SIG_DESC_SET(HW_STRAP1, 25)
+
+#define G21 224
+SIG_EXPR_LIST_DECL_SINGLE(ESPID0, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD0, LAD0, SIG_DESC_SET(SCUAC, 0));
+MS_PIN_DECL(G21, GPIOAC0, ESPID0, LAD0);
+FUNC_GROUP_DECL(LAD0, G21);
+
+#define G20 225
+SIG_EXPR_LIST_DECL_SINGLE(ESPID1, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD1, LAD1, SIG_DESC_SET(SCUAC, 1));
+MS_PIN_DECL(G20, GPIOAC1, ESPID1, LAD1);
+FUNC_GROUP_DECL(LAD1, G20);
+
+#define D22 226
+SIG_EXPR_LIST_DECL_SINGLE(ESPID2, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD2, LAD2, SIG_DESC_SET(SCUAC, 2));
+MS_PIN_DECL(D22, GPIOAC2, ESPID2, LAD2);
+FUNC_GROUP_DECL(LAD2, D22);
+
+#define E22 227
+SIG_EXPR_LIST_DECL_SINGLE(ESPID3, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD3, LAD3, SIG_DESC_SET(SCUAC, 3));
+MS_PIN_DECL(E22, GPIOAC3, ESPID3, LAD3);
+FUNC_GROUP_DECL(LAD3, E22);
+
+#define C22 228
+SIG_EXPR_LIST_DECL_SINGLE(ESPICK, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LCLK, LCLK, SIG_DESC_SET(SCUAC, 4));
+MS_PIN_DECL(C22, GPIOAC4, ESPICK, LCLK);
+FUNC_GROUP_DECL(LCLK, C22);
+
+#define F21 229
+SIG_EXPR_LIST_DECL_SINGLE(ESPICS, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LFRAME, LFRAME, SIG_DESC_SET(SCUAC, 5));
+MS_PIN_DECL(F21, GPIOAC5, ESPICS, LFRAME);
+FUNC_GROUP_DECL(LFRAME, F21);
+
+#define F22 230
+SIG_EXPR_LIST_DECL_SINGLE(ESPIALT, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LSIRQ, LSIRQ, SIG_DESC_SET(SCUAC, 6));
+MS_PIN_DECL(F22, GPIOAC6, ESPIALT, LSIRQ);
+FUNC_GROUP_DECL(LSIRQ, F22);
+
+#define G22 231
+SIG_EXPR_LIST_DECL_SINGLE(ESPIRST, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LPCRST, LPCRST, SIG_DESC_SET(SCUAC, 7));
+MS_PIN_DECL(G22, GPIOAC7, ESPIRST, LPCRST);
+FUNC_GROUP_DECL(LPCRST, G22);
+
+FUNC_GROUP_DECL(ESPI, G21, G20, D22, E22, C22, F21, F22, G22);
+
/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
@@ -641,12 +1733,32 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(A13),
ASPEED_PINCTRL_PIN(A14),
ASPEED_PINCTRL_PIN(A15),
+ ASPEED_PINCTRL_PIN(A16),
+ ASPEED_PINCTRL_PIN(A17),
+ ASPEED_PINCTRL_PIN(A18),
+ ASPEED_PINCTRL_PIN(A19),
ASPEED_PINCTRL_PIN(A2),
+ ASPEED_PINCTRL_PIN(A20),
+ ASPEED_PINCTRL_PIN(A21),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
ASPEED_PINCTRL_PIN(A5),
ASPEED_PINCTRL_PIN(A9),
+ ASPEED_PINCTRL_PIN(AA1),
+ ASPEED_PINCTRL_PIN(AA19),
+ ASPEED_PINCTRL_PIN(AA2),
+ ASPEED_PINCTRL_PIN(AA20),
+ ASPEED_PINCTRL_PIN(AA21),
+ ASPEED_PINCTRL_PIN(AA22),
ASPEED_PINCTRL_PIN(AA3),
+ ASPEED_PINCTRL_PIN(AA4),
+ ASPEED_PINCTRL_PIN(AA5),
+ ASPEED_PINCTRL_PIN(AB2),
+ ASPEED_PINCTRL_PIN(AB20),
+ ASPEED_PINCTRL_PIN(AB21),
+ ASPEED_PINCTRL_PIN(AB3),
+ ASPEED_PINCTRL_PIN(AB4),
+ ASPEED_PINCTRL_PIN(AB5),
ASPEED_PINCTRL_PIN(B1),
ASPEED_PINCTRL_PIN(B10),
ASPEED_PINCTRL_PIN(B11),
@@ -655,8 +1767,13 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(B14),
ASPEED_PINCTRL_PIN(B15),
ASPEED_PINCTRL_PIN(B16),
+ ASPEED_PINCTRL_PIN(B17),
+ ASPEED_PINCTRL_PIN(B18),
+ ASPEED_PINCTRL_PIN(B19),
ASPEED_PINCTRL_PIN(B2),
ASPEED_PINCTRL_PIN(B20),
+ ASPEED_PINCTRL_PIN(B21),
+ ASPEED_PINCTRL_PIN(B22),
ASPEED_PINCTRL_PIN(B3),
ASPEED_PINCTRL_PIN(B4),
ASPEED_PINCTRL_PIN(B5),
@@ -668,62 +1785,210 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(C14),
ASPEED_PINCTRL_PIN(C15),
ASPEED_PINCTRL_PIN(C16),
+ ASPEED_PINCTRL_PIN(C17),
ASPEED_PINCTRL_PIN(C18),
+ ASPEED_PINCTRL_PIN(C19),
ASPEED_PINCTRL_PIN(C2),
ASPEED_PINCTRL_PIN(C20),
+ ASPEED_PINCTRL_PIN(C21),
+ ASPEED_PINCTRL_PIN(C22),
ASPEED_PINCTRL_PIN(C3),
ASPEED_PINCTRL_PIN(C4),
ASPEED_PINCTRL_PIN(C5),
ASPEED_PINCTRL_PIN(D1),
ASPEED_PINCTRL_PIN(D10),
+ ASPEED_PINCTRL_PIN(D13),
+ ASPEED_PINCTRL_PIN(D14),
+ ASPEED_PINCTRL_PIN(D15),
+ ASPEED_PINCTRL_PIN(D16),
+ ASPEED_PINCTRL_PIN(D17),
+ ASPEED_PINCTRL_PIN(D18),
+ ASPEED_PINCTRL_PIN(D19),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D20),
+ ASPEED_PINCTRL_PIN(D21),
+ ASPEED_PINCTRL_PIN(D22),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
ASPEED_PINCTRL_PIN(D6),
ASPEED_PINCTRL_PIN(D7),
ASPEED_PINCTRL_PIN(D8),
ASPEED_PINCTRL_PIN(D9),
+ ASPEED_PINCTRL_PIN(E1),
ASPEED_PINCTRL_PIN(E10),
ASPEED_PINCTRL_PIN(E12),
ASPEED_PINCTRL_PIN(E13),
+ ASPEED_PINCTRL_PIN(E14),
ASPEED_PINCTRL_PIN(E15),
+ ASPEED_PINCTRL_PIN(E16),
+ ASPEED_PINCTRL_PIN(E17),
+ ASPEED_PINCTRL_PIN(E18),
+ ASPEED_PINCTRL_PIN(E19),
+ ASPEED_PINCTRL_PIN(E2),
+ ASPEED_PINCTRL_PIN(E20),
ASPEED_PINCTRL_PIN(E21),
+ ASPEED_PINCTRL_PIN(E22),
+ ASPEED_PINCTRL_PIN(E3),
ASPEED_PINCTRL_PIN(E6),
ASPEED_PINCTRL_PIN(E7),
ASPEED_PINCTRL_PIN(E9),
+ ASPEED_PINCTRL_PIN(F1),
+ ASPEED_PINCTRL_PIN(F17),
+ ASPEED_PINCTRL_PIN(F18),
ASPEED_PINCTRL_PIN(F19),
+ ASPEED_PINCTRL_PIN(F2),
ASPEED_PINCTRL_PIN(F20),
+ ASPEED_PINCTRL_PIN(F21),
+ ASPEED_PINCTRL_PIN(F22),
+ ASPEED_PINCTRL_PIN(F3),
+ ASPEED_PINCTRL_PIN(F4),
+ ASPEED_PINCTRL_PIN(F5),
ASPEED_PINCTRL_PIN(F9),
+ ASPEED_PINCTRL_PIN(G1),
+ ASPEED_PINCTRL_PIN(G17),
+ ASPEED_PINCTRL_PIN(G18),
+ ASPEED_PINCTRL_PIN(G2),
+ ASPEED_PINCTRL_PIN(G20),
+ ASPEED_PINCTRL_PIN(G21),
+ ASPEED_PINCTRL_PIN(G22),
+ ASPEED_PINCTRL_PIN(G3),
+ ASPEED_PINCTRL_PIN(G4),
+ ASPEED_PINCTRL_PIN(G5),
+ ASPEED_PINCTRL_PIN(H18),
+ ASPEED_PINCTRL_PIN(H19),
ASPEED_PINCTRL_PIN(H20),
+ ASPEED_PINCTRL_PIN(H21),
+ ASPEED_PINCTRL_PIN(H22),
+ ASPEED_PINCTRL_PIN(H3),
+ ASPEED_PINCTRL_PIN(H4),
+ ASPEED_PINCTRL_PIN(H5),
+ ASPEED_PINCTRL_PIN(J18),
+ ASPEED_PINCTRL_PIN(J19),
+ ASPEED_PINCTRL_PIN(J20),
+ ASPEED_PINCTRL_PIN(K18),
+ ASPEED_PINCTRL_PIN(K19),
ASPEED_PINCTRL_PIN(L1),
+ ASPEED_PINCTRL_PIN(L18),
+ ASPEED_PINCTRL_PIN(L19),
ASPEED_PINCTRL_PIN(L2),
ASPEED_PINCTRL_PIN(L3),
ASPEED_PINCTRL_PIN(L4),
+ ASPEED_PINCTRL_PIN(M18),
+ ASPEED_PINCTRL_PIN(M19),
+ ASPEED_PINCTRL_PIN(M20),
ASPEED_PINCTRL_PIN(N1),
+ ASPEED_PINCTRL_PIN(N18),
+ ASPEED_PINCTRL_PIN(N19),
ASPEED_PINCTRL_PIN(N2),
ASPEED_PINCTRL_PIN(N20),
ASPEED_PINCTRL_PIN(N21),
ASPEED_PINCTRL_PIN(N22),
ASPEED_PINCTRL_PIN(N3),
ASPEED_PINCTRL_PIN(N4),
+ ASPEED_PINCTRL_PIN(N5),
ASPEED_PINCTRL_PIN(P1),
+ ASPEED_PINCTRL_PIN(P18),
+ ASPEED_PINCTRL_PIN(P19),
ASPEED_PINCTRL_PIN(P2),
+ ASPEED_PINCTRL_PIN(P20),
+ ASPEED_PINCTRL_PIN(P21),
+ ASPEED_PINCTRL_PIN(P22),
+ ASPEED_PINCTRL_PIN(P3),
+ ASPEED_PINCTRL_PIN(P4),
+ ASPEED_PINCTRL_PIN(P5),
ASPEED_PINCTRL_PIN(R1),
+ ASPEED_PINCTRL_PIN(R18),
+ ASPEED_PINCTRL_PIN(R19),
+ ASPEED_PINCTRL_PIN(R2),
+ ASPEED_PINCTRL_PIN(R20),
+ ASPEED_PINCTRL_PIN(R21),
+ ASPEED_PINCTRL_PIN(R22),
+ ASPEED_PINCTRL_PIN(R3),
+ ASPEED_PINCTRL_PIN(R4),
+ ASPEED_PINCTRL_PIN(R5),
+ ASPEED_PINCTRL_PIN(T1),
+ ASPEED_PINCTRL_PIN(T17),
+ ASPEED_PINCTRL_PIN(T19),
+ ASPEED_PINCTRL_PIN(T2),
+ ASPEED_PINCTRL_PIN(T20),
+ ASPEED_PINCTRL_PIN(T21),
+ ASPEED_PINCTRL_PIN(T22),
+ ASPEED_PINCTRL_PIN(T3),
ASPEED_PINCTRL_PIN(T4),
+ ASPEED_PINCTRL_PIN(T5),
+ ASPEED_PINCTRL_PIN(U1),
+ ASPEED_PINCTRL_PIN(U19),
+ ASPEED_PINCTRL_PIN(U2),
+ ASPEED_PINCTRL_PIN(U20),
+ ASPEED_PINCTRL_PIN(U21),
+ ASPEED_PINCTRL_PIN(U22),
ASPEED_PINCTRL_PIN(U3),
+ ASPEED_PINCTRL_PIN(U4),
+ ASPEED_PINCTRL_PIN(U5),
+ ASPEED_PINCTRL_PIN(V1),
+ ASPEED_PINCTRL_PIN(V19),
ASPEED_PINCTRL_PIN(V2),
+ ASPEED_PINCTRL_PIN(V20),
+ ASPEED_PINCTRL_PIN(V21),
+ ASPEED_PINCTRL_PIN(V22),
ASPEED_PINCTRL_PIN(V3),
+ ASPEED_PINCTRL_PIN(V4),
+ ASPEED_PINCTRL_PIN(V5),
ASPEED_PINCTRL_PIN(V6),
+ ASPEED_PINCTRL_PIN(W1),
+ ASPEED_PINCTRL_PIN(W19),
ASPEED_PINCTRL_PIN(W2),
+ ASPEED_PINCTRL_PIN(W20),
+ ASPEED_PINCTRL_PIN(W21),
+ ASPEED_PINCTRL_PIN(W22),
ASPEED_PINCTRL_PIN(W3),
+ ASPEED_PINCTRL_PIN(W4),
+ ASPEED_PINCTRL_PIN(W5),
+ ASPEED_PINCTRL_PIN(W6),
+ ASPEED_PINCTRL_PIN(Y1),
+ ASPEED_PINCTRL_PIN(Y19),
+ ASPEED_PINCTRL_PIN(Y2),
+ ASPEED_PINCTRL_PIN(Y20),
+ ASPEED_PINCTRL_PIN(Y21),
+ ASPEED_PINCTRL_PIN(Y22),
ASPEED_PINCTRL_PIN(Y3),
+ ASPEED_PINCTRL_PIN(Y4),
+ ASPEED_PINCTRL_PIN(Y5),
+ ASPEED_PINCTRL_PIN(Y6),
};
static const struct aspeed_pin_group aspeed_g5_groups[] = {
+ ASPEED_PINCTRL_GROUP(ACPI),
+ ASPEED_PINCTRL_GROUP(ADC0),
+ ASPEED_PINCTRL_GROUP(ADC1),
+ ASPEED_PINCTRL_GROUP(ADC10),
+ ASPEED_PINCTRL_GROUP(ADC11),
+ ASPEED_PINCTRL_GROUP(ADC12),
+ ASPEED_PINCTRL_GROUP(ADC13),
+ ASPEED_PINCTRL_GROUP(ADC14),
+ ASPEED_PINCTRL_GROUP(ADC15),
+ ASPEED_PINCTRL_GROUP(ADC2),
+ ASPEED_PINCTRL_GROUP(ADC3),
+ ASPEED_PINCTRL_GROUP(ADC4),
+ ASPEED_PINCTRL_GROUP(ADC5),
+ ASPEED_PINCTRL_GROUP(ADC6),
+ ASPEED_PINCTRL_GROUP(ADC7),
+ ASPEED_PINCTRL_GROUP(ADC8),
+ ASPEED_PINCTRL_GROUP(ADC9),
+ ASPEED_PINCTRL_GROUP(BMCINT),
+ ASPEED_PINCTRL_GROUP(DDCCLK),
+ ASPEED_PINCTRL_GROUP(DDCDAT),
+ ASPEED_PINCTRL_GROUP(ESPI),
+ ASPEED_PINCTRL_GROUP(FWSPICS1),
+ ASPEED_PINCTRL_GROUP(FWSPICS2),
ASPEED_PINCTRL_GROUP(GPID0),
ASPEED_PINCTRL_GROUP(GPID2),
+ ASPEED_PINCTRL_GROUP(GPID4),
+ ASPEED_PINCTRL_GROUP(GPID6),
ASPEED_PINCTRL_GROUP(GPIE0),
+ ASPEED_PINCTRL_GROUP(GPIE2),
+ ASPEED_PINCTRL_GROUP(GPIE4),
+ ASPEED_PINCTRL_GROUP(GPIE6),
ASPEED_PINCTRL_GROUP(I2C10),
ASPEED_PINCTRL_GROUP(I2C11),
ASPEED_PINCTRL_GROUP(I2C12),
@@ -736,11 +2001,50 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(I2C7),
ASPEED_PINCTRL_GROUP(I2C8),
ASPEED_PINCTRL_GROUP(I2C9),
+ ASPEED_PINCTRL_GROUP(LAD0),
+ ASPEED_PINCTRL_GROUP(LAD1),
+ ASPEED_PINCTRL_GROUP(LAD2),
+ ASPEED_PINCTRL_GROUP(LAD3),
+ ASPEED_PINCTRL_GROUP(LCLK),
+ ASPEED_PINCTRL_GROUP(LFRAME),
+ ASPEED_PINCTRL_GROUP(LPCHC),
+ ASPEED_PINCTRL_GROUP(LPCPD),
+ ASPEED_PINCTRL_GROUP(LPCPLUS),
+ ASPEED_PINCTRL_GROUP(LPCPME),
+ ASPEED_PINCTRL_GROUP(LPCRST),
+ ASPEED_PINCTRL_GROUP(LPCSMI),
+ ASPEED_PINCTRL_GROUP(LSIRQ),
ASPEED_PINCTRL_GROUP(MAC1LINK),
+ ASPEED_PINCTRL_GROUP(MAC2LINK),
ASPEED_PINCTRL_GROUP(MDIO1),
ASPEED_PINCTRL_GROUP(MDIO2),
+ ASPEED_PINCTRL_GROUP(NCTS1),
+ ASPEED_PINCTRL_GROUP(NCTS2),
+ ASPEED_PINCTRL_GROUP(NCTS3),
+ ASPEED_PINCTRL_GROUP(NCTS4),
+ ASPEED_PINCTRL_GROUP(NDCD1),
+ ASPEED_PINCTRL_GROUP(NDCD2),
+ ASPEED_PINCTRL_GROUP(NDCD3),
+ ASPEED_PINCTRL_GROUP(NDCD4),
+ ASPEED_PINCTRL_GROUP(NDSR1),
+ ASPEED_PINCTRL_GROUP(NDSR2),
+ ASPEED_PINCTRL_GROUP(NDSR3),
+ ASPEED_PINCTRL_GROUP(NDSR4),
+ ASPEED_PINCTRL_GROUP(NDTR1),
+ ASPEED_PINCTRL_GROUP(NDTR2),
+ ASPEED_PINCTRL_GROUP(NDTR3),
+ ASPEED_PINCTRL_GROUP(NDTR4),
+ ASPEED_PINCTRL_GROUP(NRI1),
+ ASPEED_PINCTRL_GROUP(NRI2),
+ ASPEED_PINCTRL_GROUP(NRI3),
+ ASPEED_PINCTRL_GROUP(NRI4),
+ ASPEED_PINCTRL_GROUP(NRTS1),
+ ASPEED_PINCTRL_GROUP(NRTS2),
+ ASPEED_PINCTRL_GROUP(NRTS3),
+ ASPEED_PINCTRL_GROUP(NRTS4),
ASPEED_PINCTRL_GROUP(OSCCLK),
ASPEED_PINCTRL_GROUP(PEWAKE),
+ ASPEED_PINCTRL_GROUP(PNOR),
ASPEED_PINCTRL_GROUP(PWM0),
ASPEED_PINCTRL_GROUP(PWM1),
ASPEED_PINCTRL_GROUP(PWM2),
@@ -753,22 +2057,102 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(RGMII2),
ASPEED_PINCTRL_GROUP(RMII1),
ASPEED_PINCTRL_GROUP(RMII2),
+ ASPEED_PINCTRL_GROUP(RXD1),
+ ASPEED_PINCTRL_GROUP(RXD2),
+ ASPEED_PINCTRL_GROUP(RXD3),
+ ASPEED_PINCTRL_GROUP(RXD4),
+ ASPEED_PINCTRL_GROUP(SALT1),
+ ASPEED_PINCTRL_GROUP(SALT10),
+ ASPEED_PINCTRL_GROUP(SALT11),
+ ASPEED_PINCTRL_GROUP(SALT12),
+ ASPEED_PINCTRL_GROUP(SALT13),
+ ASPEED_PINCTRL_GROUP(SALT14),
+ ASPEED_PINCTRL_GROUP(SALT2),
+ ASPEED_PINCTRL_GROUP(SALT3),
+ ASPEED_PINCTRL_GROUP(SALT4),
+ ASPEED_PINCTRL_GROUP(SALT5),
+ ASPEED_PINCTRL_GROUP(SALT6),
+ ASPEED_PINCTRL_GROUP(SALT7),
+ ASPEED_PINCTRL_GROUP(SALT8),
+ ASPEED_PINCTRL_GROUP(SALT9),
+ ASPEED_PINCTRL_GROUP(SCL1),
+ ASPEED_PINCTRL_GROUP(SCL2),
ASPEED_PINCTRL_GROUP(SD1),
+ ASPEED_PINCTRL_GROUP(SD2),
+ ASPEED_PINCTRL_GROUP(SDA1),
+ ASPEED_PINCTRL_GROUP(SDA2),
+ ASPEED_PINCTRL_GROUP(SGPS1),
+ ASPEED_PINCTRL_GROUP(SGPS2),
+ ASPEED_PINCTRL_GROUP(SIOONCTRL),
+ ASPEED_PINCTRL_GROUP(SIOPBI),
+ ASPEED_PINCTRL_GROUP(SIOPBO),
+ ASPEED_PINCTRL_GROUP(SIOPWREQ),
+ ASPEED_PINCTRL_GROUP(SIOPWRGD),
+ ASPEED_PINCTRL_GROUP(SIOS3),
+ ASPEED_PINCTRL_GROUP(SIOS5),
+ ASPEED_PINCTRL_GROUP(SIOSCI),
ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1CS1),
ASPEED_PINCTRL_GROUP(SPI1DEBUG),
ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
+ ASPEED_PINCTRL_GROUP(SPI2CK),
+ ASPEED_PINCTRL_GROUP(SPI2CS0),
+ ASPEED_PINCTRL_GROUP(SPI2CS1),
+ ASPEED_PINCTRL_GROUP(SPI2MISO),
+ ASPEED_PINCTRL_GROUP(SPI2MOSI),
+ ASPEED_PINCTRL_GROUP(TIMER3),
ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
+ ASPEED_PINCTRL_GROUP(TXD1),
+ ASPEED_PINCTRL_GROUP(TXD2),
+ ASPEED_PINCTRL_GROUP(TXD3),
+ ASPEED_PINCTRL_GROUP(TXD4),
+ ASPEED_PINCTRL_GROUP(UART6),
+ ASPEED_PINCTRL_GROUP(USBCKI),
ASPEED_PINCTRL_GROUP(VGABIOSROM),
+ ASPEED_PINCTRL_GROUP(VGAHS),
+ ASPEED_PINCTRL_GROUP(VGAVS),
+ ASPEED_PINCTRL_GROUP(VPI24),
+ ASPEED_PINCTRL_GROUP(VPO),
+ ASPEED_PINCTRL_GROUP(WDTRST1),
+ ASPEED_PINCTRL_GROUP(WDTRST2),
};
static const struct aspeed_pin_function aspeed_g5_functions[] = {
+ ASPEED_PINCTRL_FUNC(ACPI),
+ ASPEED_PINCTRL_FUNC(ADC0),
+ ASPEED_PINCTRL_FUNC(ADC1),
+ ASPEED_PINCTRL_FUNC(ADC10),
+ ASPEED_PINCTRL_FUNC(ADC11),
+ ASPEED_PINCTRL_FUNC(ADC12),
+ ASPEED_PINCTRL_FUNC(ADC13),
+ ASPEED_PINCTRL_FUNC(ADC14),
+ ASPEED_PINCTRL_FUNC(ADC15),
+ ASPEED_PINCTRL_FUNC(ADC2),
+ ASPEED_PINCTRL_FUNC(ADC3),
+ ASPEED_PINCTRL_FUNC(ADC4),
+ ASPEED_PINCTRL_FUNC(ADC5),
+ ASPEED_PINCTRL_FUNC(ADC6),
+ ASPEED_PINCTRL_FUNC(ADC7),
+ ASPEED_PINCTRL_FUNC(ADC8),
+ ASPEED_PINCTRL_FUNC(ADC9),
+ ASPEED_PINCTRL_FUNC(BMCINT),
+ ASPEED_PINCTRL_FUNC(DDCCLK),
+ ASPEED_PINCTRL_FUNC(DDCDAT),
+ ASPEED_PINCTRL_FUNC(ESPI),
+ ASPEED_PINCTRL_FUNC(FWSPICS1),
+ ASPEED_PINCTRL_FUNC(FWSPICS2),
ASPEED_PINCTRL_FUNC(GPID0),
ASPEED_PINCTRL_FUNC(GPID2),
+ ASPEED_PINCTRL_FUNC(GPID4),
+ ASPEED_PINCTRL_FUNC(GPID6),
ASPEED_PINCTRL_FUNC(GPIE0),
+ ASPEED_PINCTRL_FUNC(GPIE2),
+ ASPEED_PINCTRL_FUNC(GPIE4),
+ ASPEED_PINCTRL_FUNC(GPIE6),
ASPEED_PINCTRL_FUNC(I2C10),
ASPEED_PINCTRL_FUNC(I2C11),
ASPEED_PINCTRL_FUNC(I2C12),
@@ -781,11 +2165,50 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(I2C7),
ASPEED_PINCTRL_FUNC(I2C8),
ASPEED_PINCTRL_FUNC(I2C9),
+ ASPEED_PINCTRL_FUNC(LAD0),
+ ASPEED_PINCTRL_FUNC(LAD1),
+ ASPEED_PINCTRL_FUNC(LAD2),
+ ASPEED_PINCTRL_FUNC(LAD3),
+ ASPEED_PINCTRL_FUNC(LCLK),
+ ASPEED_PINCTRL_FUNC(LFRAME),
+ ASPEED_PINCTRL_FUNC(LPCHC),
+ ASPEED_PINCTRL_FUNC(LPCPD),
+ ASPEED_PINCTRL_FUNC(LPCPLUS),
+ ASPEED_PINCTRL_FUNC(LPCPME),
+ ASPEED_PINCTRL_FUNC(LPCRST),
+ ASPEED_PINCTRL_FUNC(LPCSMI),
+ ASPEED_PINCTRL_FUNC(LSIRQ),
ASPEED_PINCTRL_FUNC(MAC1LINK),
+ ASPEED_PINCTRL_FUNC(MAC2LINK),
ASPEED_PINCTRL_FUNC(MDIO1),
ASPEED_PINCTRL_FUNC(MDIO2),
+ ASPEED_PINCTRL_FUNC(NCTS1),
+ ASPEED_PINCTRL_FUNC(NCTS2),
+ ASPEED_PINCTRL_FUNC(NCTS3),
+ ASPEED_PINCTRL_FUNC(NCTS4),
+ ASPEED_PINCTRL_FUNC(NDCD1),
+ ASPEED_PINCTRL_FUNC(NDCD2),
+ ASPEED_PINCTRL_FUNC(NDCD3),
+ ASPEED_PINCTRL_FUNC(NDCD4),
+ ASPEED_PINCTRL_FUNC(NDSR1),
+ ASPEED_PINCTRL_FUNC(NDSR2),
+ ASPEED_PINCTRL_FUNC(NDSR3),
+ ASPEED_PINCTRL_FUNC(NDSR4),
+ ASPEED_PINCTRL_FUNC(NDTR1),
+ ASPEED_PINCTRL_FUNC(NDTR2),
+ ASPEED_PINCTRL_FUNC(NDTR3),
+ ASPEED_PINCTRL_FUNC(NDTR4),
+ ASPEED_PINCTRL_FUNC(NRI1),
+ ASPEED_PINCTRL_FUNC(NRI2),
+ ASPEED_PINCTRL_FUNC(NRI3),
+ ASPEED_PINCTRL_FUNC(NRI4),
+ ASPEED_PINCTRL_FUNC(NRTS1),
+ ASPEED_PINCTRL_FUNC(NRTS2),
+ ASPEED_PINCTRL_FUNC(NRTS3),
+ ASPEED_PINCTRL_FUNC(NRTS4),
ASPEED_PINCTRL_FUNC(OSCCLK),
ASPEED_PINCTRL_FUNC(PEWAKE),
+ ASPEED_PINCTRL_FUNC(PNOR),
ASPEED_PINCTRL_FUNC(PWM0),
ASPEED_PINCTRL_FUNC(PWM1),
ASPEED_PINCTRL_FUNC(PWM2),
@@ -798,16 +2221,68 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(RGMII2),
ASPEED_PINCTRL_FUNC(RMII1),
ASPEED_PINCTRL_FUNC(RMII2),
+ ASPEED_PINCTRL_FUNC(RXD1),
+ ASPEED_PINCTRL_FUNC(RXD2),
+ ASPEED_PINCTRL_FUNC(RXD3),
+ ASPEED_PINCTRL_FUNC(RXD4),
+ ASPEED_PINCTRL_FUNC(SALT1),
+ ASPEED_PINCTRL_FUNC(SALT10),
+ ASPEED_PINCTRL_FUNC(SALT11),
+ ASPEED_PINCTRL_FUNC(SALT12),
+ ASPEED_PINCTRL_FUNC(SALT13),
+ ASPEED_PINCTRL_FUNC(SALT14),
+ ASPEED_PINCTRL_FUNC(SALT2),
+ ASPEED_PINCTRL_FUNC(SALT3),
+ ASPEED_PINCTRL_FUNC(SALT4),
+ ASPEED_PINCTRL_FUNC(SALT5),
+ ASPEED_PINCTRL_FUNC(SALT6),
+ ASPEED_PINCTRL_FUNC(SALT7),
+ ASPEED_PINCTRL_FUNC(SALT8),
+ ASPEED_PINCTRL_FUNC(SALT9),
+ ASPEED_PINCTRL_FUNC(SCL1),
+ ASPEED_PINCTRL_FUNC(SCL2),
ASPEED_PINCTRL_FUNC(SD1),
+ ASPEED_PINCTRL_FUNC(SD2),
+ ASPEED_PINCTRL_FUNC(SDA1),
+ ASPEED_PINCTRL_FUNC(SDA2),
+ ASPEED_PINCTRL_FUNC(SGPS1),
+ ASPEED_PINCTRL_FUNC(SGPS2),
+ ASPEED_PINCTRL_FUNC(SIOONCTRL),
+ ASPEED_PINCTRL_FUNC(SIOPBI),
+ ASPEED_PINCTRL_FUNC(SIOPBO),
+ ASPEED_PINCTRL_FUNC(SIOPWREQ),
+ ASPEED_PINCTRL_FUNC(SIOPWRGD),
+ ASPEED_PINCTRL_FUNC(SIOS3),
+ ASPEED_PINCTRL_FUNC(SIOS5),
+ ASPEED_PINCTRL_FUNC(SIOSCI),
ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1CS1),
ASPEED_PINCTRL_FUNC(SPI1DEBUG),
ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
+ ASPEED_PINCTRL_FUNC(SPI2CK),
+ ASPEED_PINCTRL_FUNC(SPI2CS0),
+ ASPEED_PINCTRL_FUNC(SPI2CS1),
+ ASPEED_PINCTRL_FUNC(SPI2MISO),
+ ASPEED_PINCTRL_FUNC(SPI2MOSI),
+ ASPEED_PINCTRL_FUNC(TIMER3),
ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
+ ASPEED_PINCTRL_FUNC(TXD1),
+ ASPEED_PINCTRL_FUNC(TXD2),
+ ASPEED_PINCTRL_FUNC(TXD3),
+ ASPEED_PINCTRL_FUNC(TXD4),
+ ASPEED_PINCTRL_FUNC(UART6),
+ ASPEED_PINCTRL_FUNC(USBCKI),
ASPEED_PINCTRL_FUNC(VGABIOSROM),
+ ASPEED_PINCTRL_FUNC(VGAHS),
+ ASPEED_PINCTRL_FUNC(VGAVS),
+ ASPEED_PINCTRL_FUNC(VPI24),
+ ASPEED_PINCTRL_FUNC(VPO),
+ ASPEED_PINCTRL_FUNC(WDTRST1),
+ ASPEED_PINCTRL_FUNC(WDTRST2),
};
static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
@@ -848,10 +2323,35 @@ static struct pinctrl_desc aspeed_g5_pinctrl_desc = {
static int aspeed_g5_pinctrl_probe(struct platform_device *pdev)
{
int i;
+ struct regmap *map;
+ struct device_node *node;
for (i = 0; i < ARRAY_SIZE(aspeed_g5_pins); i++)
aspeed_g5_pins[i].number = i;
+ node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 0);
+ map = syscon_node_to_regmap(node);
+ of_node_put(node);
+ if (IS_ERR(map)) {
+ dev_warn(&pdev->dev, "No GFX phandle found, some mux configurations may fail\n");
+ map = NULL;
+ }
+ aspeed_g5_pinctrl_data.maps[ASPEED_IP_GFX] = map;
+
+ node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 1);
+ if (node) {
+ map = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(map)) {
+ dev_warn(&pdev->dev, "LHC parent is not a syscon, some mux configurations may fail\n");
+ map = NULL;
+ }
+ } else {
+ dev_warn(&pdev->dev, "No LHC phandle found, some mux configurations may fail\n");
+ map = NULL;
+ }
+ of_node_put(node);
+ aspeed_g5_pinctrl_data.maps[ASPEED_IP_LPC] = map;
+
return aspeed_pinctrl_probe(pdev, &aspeed_g5_pinctrl_desc,
&aspeed_g5_pinctrl_data);
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 49aeba912531..76f62bd45f02 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -14,6 +14,12 @@
#include "../core.h"
#include "pinctrl-aspeed.h"
+static const char *const aspeed_pinmux_ips[] = {
+ [ASPEED_IP_SCU] = "SCU",
+ [ASPEED_IP_GFX] = "GFX",
+ [ASPEED_IP_LPC] = "LPC",
+};
+
int aspeed_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -78,7 +84,8 @@ int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
static inline void aspeed_sig_desc_print_val(
const struct aspeed_sig_desc *desc, bool enable, u32 rv)
{
- pr_debug("SCU%x[0x%08x]=0x%x, got 0x%x from 0x%08x\n", desc->reg,
+ pr_debug("Want %s%X[0x%08X]=0x%X, got 0x%X from 0x%08X\n",
+ aspeed_pinmux_ips[desc->ip], desc->reg,
desc->mask, enable ? desc->enable : desc->disable,
(rv & desc->mask) >> __ffs(desc->mask), rv);
}
@@ -88,10 +95,11 @@ static inline void aspeed_sig_desc_print_val(
*
* @desc: The signal descriptor of interest
* @enabled: True to query the enabled state, false to query disabled state
- * @regmap: The SCU regmap instance
+ * @regmap: The IP block's regmap instance
*
- * @return True if the descriptor's bitfield is configured to the state
- * selected by @enabled, false otherwise
+ * Return: 1 if the descriptor's bitfield is configured to the state
+ * selected by @enabled, 0 if not, and less than zero if an unrecoverable
+ * failure occurred
*
* Evaluation of descriptor state is non-trivial in that it is not a binary
* outcome: The bitfields can be greater than one bit in size and thus can take
@@ -99,14 +107,19 @@ static inline void aspeed_sig_desc_print_val(
* descriptor (typically this means a different function to the one of interest
* is enabled). Thus we must explicitly test for either condition as required.
*/
-static bool aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
+static int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
bool enabled, struct regmap *map)
{
+ int ret;
unsigned int raw;
u32 want;
- if (regmap_read(map, desc->reg, &raw) < 0)
- return false;
+ if (!map)
+ return -ENODEV;
+
+ ret = regmap_read(map, desc->reg, &raw);
+ if (ret)
+ return ret;
aspeed_sig_desc_print_val(desc, enabled, raw);
want = enabled ? desc->enable : desc->disable;
@@ -119,10 +132,10 @@ static bool aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
*
* @expr: An expression controlling the signal for a mux function on a pin
* @enabled: True to query the enabled state, false to query disabled state
- * @regmap: The SCU regmap instance
+ * @maps: The list of regmap instances
*
- * @return True if the expression composed by @enabled evaluates true, false
- * otherwise
+ * Return: 1 if the expression composed by @enabled evaluates true, 0 if not,
+ * and less than zero if an unrecoverable failure occurred.
*
* A mux function is enabled or disabled if the function's signal expression
* for each pin in the function's pin group evaluates true for the desired
@@ -135,19 +148,21 @@ static bool aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
* neither the enabled nor disabled state. Thus we must explicitly test for
* either condition as required.
*/
-static bool aspeed_sig_expr_eval(const struct aspeed_sig_expr *expr,
- bool enabled, struct regmap *map)
+static int aspeed_sig_expr_eval(const struct aspeed_sig_expr *expr,
+ bool enabled, struct regmap * const *maps)
{
int i;
+ int ret;
for (i = 0; i < expr->ndescs; i++) {
const struct aspeed_sig_desc *desc = &expr->descs[i];
- if (!aspeed_sig_desc_eval(desc, enabled, map))
- return false;
+ ret = aspeed_sig_desc_eval(desc, enabled, maps[desc->ip]);
+ if (ret <= 0)
+ return ret;
}
- return true;
+ return 1;
}
/**
@@ -158,19 +173,24 @@ static bool aspeed_sig_expr_eval(const struct aspeed_sig_expr *expr,
* configured
* @enable: true to enable an function's signal through a pin's signal
* expression, false to disable the function's signal
- * @map: The SCU's regmap instance for pinmux register access.
+ * @maps: The list of regmap instances for pinmux register access.
*
- * @return true if the expression is configured as requested, false otherwise
+ * Return: 0 if the expression is configured as requested and a negative error
+ * code otherwise
*/
-static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
- bool enable, struct regmap *map)
+static int aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
+ bool enable, struct regmap * const *maps)
{
+ int ret;
int i;
for (i = 0; i < expr->ndescs; i++) {
- bool ret;
const struct aspeed_sig_desc *desc = &expr->descs[i];
u32 pattern = enable ? desc->enable : desc->disable;
+ u32 val = (pattern << __ffs(desc->mask));
+
+ if (!maps[desc->ip])
+ return -ENODEV;
/*
* Strap registers are configured in hardware or by early-boot
@@ -179,64 +199,79 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
* deconfigured and is the reason we re-evaluate after writing
* all descriptor bits.
*/
- if (desc->reg == HW_STRAP1 || desc->reg == HW_STRAP2)
+ if ((desc->reg == HW_STRAP1 || desc->reg == HW_STRAP2) &&
+ desc->ip == ASPEED_IP_SCU)
continue;
- ret = regmap_update_bits(map, desc->reg, desc->mask,
- pattern << __ffs(desc->mask)) == 0;
+ ret = regmap_update_bits(maps[desc->ip], desc->reg,
+ desc->mask, val);
- if (!ret)
+ if (ret)
return ret;
}
- return aspeed_sig_expr_eval(expr, enable, map);
+ ret = aspeed_sig_expr_eval(expr, enable, maps);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EPERM;
+
+ return 0;
}
-static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
- struct regmap *map)
+static int aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
+ struct regmap * const *maps)
{
- if (aspeed_sig_expr_eval(expr, true, map))
- return true;
+ int ret;
+
+ ret = aspeed_sig_expr_eval(expr, true, maps);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return aspeed_sig_expr_set(expr, true, maps);
- return aspeed_sig_expr_set(expr, true, map);
+ return 0;
}
-static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
- struct regmap *map)
+static int aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
+ struct regmap * const *maps)
{
- if (!aspeed_sig_expr_eval(expr, true, map))
- return true;
+ int ret;
+
+ ret = aspeed_sig_expr_eval(expr, true, maps);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return aspeed_sig_expr_set(expr, false, maps);
- return aspeed_sig_expr_set(expr, false, map);
+ return 0;
}
/**
* Disable a signal on a pin by disabling all provided signal expressions.
*
* @exprs: The list of signal expressions (from a priority level on a pin)
- * @map: The SCU's regmap instance for pinmux register access.
+ * @maps: The list of regmap instances for pinmux register access.
*
- * @return true if all expressions in the list are successfully disabled, false
- * otherwise
+ * Return: 0 if all expressions are disabled, otherwise a negative error code
*/
-static bool aspeed_disable_sig(const struct aspeed_sig_expr **exprs,
- struct regmap *map)
+static int aspeed_disable_sig(const struct aspeed_sig_expr **exprs,
+ struct regmap * const *maps)
{
- bool disabled = true;
+ int ret = 0;
if (!exprs)
return true;
- while (*exprs) {
- bool ret;
-
- ret = aspeed_sig_expr_disable(*exprs, map);
- disabled = disabled && ret;
-
+ while (*exprs && !ret) {
+ ret = aspeed_sig_expr_disable(*exprs, maps);
exprs++;
}
- return disabled;
+ return ret;
}
/**
@@ -246,8 +281,8 @@ static bool aspeed_disable_sig(const struct aspeed_sig_expr **exprs,
* @exprs: List of signal expressions (haystack)
* @name: The name of the requested function (needle)
*
- * @return A pointer to the signal expression whose function tag matches the
- * provided name, otherwise NULL.
+ * Return: A pointer to the signal expression whose function tag matches the
+ * provided name, otherwise NULL.
*
*/
static const struct aspeed_sig_expr *aspeed_find_expr_by_name(
@@ -330,6 +365,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
unsigned int group)
{
int i;
+ int ret;
const struct aspeed_pinctrl_data *pdata =
pinctrl_dev_get_drvdata(pctldev);
const struct aspeed_pin_group *pgroup = &pdata->groups[group];
@@ -343,6 +379,8 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
const struct aspeed_sig_expr **funcs;
const struct aspeed_sig_expr ***prios;
+ pr_debug("Muxing pin %d for %s\n", pin, pfunc->name);
+
if (!pdesc)
return -EINVAL;
@@ -358,8 +396,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
if (expr)
break;
- if (!aspeed_disable_sig(funcs, pdata->map))
- return -EPERM;
+ ret = aspeed_disable_sig(funcs, pdata->maps);
+ if (ret)
+ return ret;
prios++;
}
@@ -377,8 +416,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
return -ENXIO;
}
- if (!aspeed_sig_expr_enable(expr, pdata->map))
- return -EPERM;
+ ret = aspeed_sig_expr_enable(expr, pdata->maps);
+ if (ret)
+ return ret;
}
return 0;
@@ -414,6 +454,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
+ int ret;
const struct aspeed_pinctrl_data *pdata =
pinctrl_dev_get_drvdata(pctldev);
const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data;
@@ -432,8 +473,9 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
if (aspeed_gpio_in_exprs(funcs))
break;
- if (!aspeed_disable_sig(funcs, pdata->map))
- return -EPERM;
+ ret = aspeed_disable_sig(funcs, pdata->maps);
+ if (ret)
+ return ret;
prios++;
}
@@ -462,10 +504,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
* If GPIO is not the lowest priority signal type, assume there is only
* one expression defined to enable the GPIO function
*/
- if (!aspeed_sig_expr_enable(expr, pdata->map))
- return -EPERM;
-
- return 0;
+ return aspeed_sig_expr_enable(expr, pdata->maps);
}
int aspeed_pinctrl_probe(struct platform_device *pdev,
@@ -481,10 +520,10 @@ int aspeed_pinctrl_probe(struct platform_device *pdev,
return -ENODEV;
}
- pdata->map = syscon_node_to_regmap(parent->of_node);
- if (IS_ERR(pdata->map)) {
+ pdata->maps[ASPEED_IP_SCU] = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(pdata->maps[ASPEED_IP_SCU])) {
dev_err(&pdev->dev, "No regmap for syscon pincontroller parent\n");
- return PTR_ERR(pdata->map);
+ return PTR_ERR(pdata->maps[ASPEED_IP_SCU]);
}
pctl = pinctrl_register(pdesc, &pdev->dev, pdata);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index 3e72ef8c54bf..08a10d4db229 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -232,6 +232,11 @@
* group.
*/
+#define ASPEED_IP_SCU 0
+#define ASPEED_IP_GFX 1
+#define ASPEED_IP_LPC 2
+#define ASPEED_NR_PINMUX_IPS 3
+
/*
* The "Multi-function Pins Mapping and Control" table in the SoC datasheet
* references registers by the device/offset mnemonic. The register macros
@@ -255,13 +260,16 @@
#define SCUA0 0xA0 /* Multi-function Pin Control #7 */
#define SCUA4 0xA4 /* Multi-function Pin Control #8 */
#define SCUA8 0xA8 /* Multi-function Pin Control #9 */
+#define SCUAC 0xAC /* Multi-function Pin Control #10 */
#define HW_STRAP2 0xD0 /* Strapping */
/**
* A signal descriptor, which describes the register, bits and the
* enable/disable values that should be compared or written.
*
- * @reg: The register offset from base in bytes
+ * @ip: The IP block identifier, used as an index into the regmap array in
+ * struct aspeed_pinctrl_data
+ * @reg: The register offset with respect to the base address of the IP block
* @mask: The mask to apply to the register. The lowest set bit of the mask is
* used to derive the shift value.
* @enable: The value that enables the function. Value should be in the LSBs,
@@ -270,6 +278,7 @@
* LSBs, not at the position of the mask.
*/
struct aspeed_sig_desc {
+ unsigned int ip;
unsigned int reg;
u32 mask;
u32 enable;
@@ -313,24 +322,30 @@ struct aspeed_pin_desc {
/* Macro hell */
+#define SIG_DESC_IP_BIT(ip, reg, idx, val) \
+ { ip, reg, BIT_MASK(idx), val, (((val) + 1) & 1) }
+
/**
- * Short-hand macro for describing a configuration enabled by the state of one
- * bit. The disable value is derived.
+ * Short-hand macro for describing an SCU descriptor enabled by the state of
+ * one bit. The disable value is derived.
*
* @reg: The signal's associated register, offset from base
* @idx: The signal's bit index in the register
* @val: The value (0 or 1) that enables the function
*/
#define SIG_DESC_BIT(reg, idx, val) \
- { reg, BIT_MASK(idx), val, (((val) + 1) & 1) }
+ SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, val)
+
+#define SIG_DESC_IP_SET(ip, reg, idx) SIG_DESC_IP_BIT(ip, reg, idx, 1)
/**
- * A further short-hand macro describing a configuration enabled with a set bit.
+ * A further short-hand macro expanding to an SCU descriptor enabled by a set
+ * bit.
*
- * @reg: The configuration's associated register, offset from base
- * @idx: The configuration's bit index in the register
+ * @reg: The register, offset from base
+ * @idx: The bit index in the register
*/
-#define SIG_DESC_SET(reg, idx) SIG_DESC_BIT(reg, idx, 1)
+#define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
#define SIG_DESC_LIST_SYM(sig, func) sig_descs_ ## sig ## _ ## func
#define SIG_DESC_LIST_DECL(sig, func, ...) \
@@ -500,7 +515,7 @@ struct aspeed_pin_desc {
MS_PIN_DECL_(pin, SIG_EXPR_LIST_PTR(gpio))
struct aspeed_pinctrl_data {
- struct regmap *map;
+ struct regmap *maps[ASPEED_NR_PINMUX_IPS];
const struct pinctrl_pin_desc *pins;
const unsigned int npins;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index a5331fdfc795..810a81786f62 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1106,7 +1106,7 @@ static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
int i;
enum pin_config_param param;
- u16 arg;
+ u32 arg;
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
@@ -1222,7 +1222,7 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
int i, j;
enum pin_config_param param;
- u16 arg;
+ u32 arg;
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
@@ -1292,7 +1292,7 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
int i;
enum pin_config_param param;
- u16 arg;
+ u32 arg;
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 5d1e505c3c63..3ca925dfefd1 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -619,7 +619,7 @@ static int iproc_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
{
struct iproc_gpio *chip = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
unsigned i, gpio = iproc_pin_to_gpio(pin);
int ret = -ENOTSUPP;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 13a4c2774157..4b5cf0e0f16e 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -703,7 +703,7 @@ static int ns2_pin_get_enable(struct pinctrl_dev *pctrldev, unsigned int pin)
}
static int ns2_pin_set_slew(struct pinctrl_dev *pctrldev, unsigned int pin,
- u16 slew)
+ u32 slew)
{
struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
@@ -793,7 +793,7 @@ static void ns2_pin_get_pull(struct pinctrl_dev *pctrldev,
}
static int ns2_pin_set_strength(struct pinctrl_dev *pctrldev, unsigned int pin,
- u16 strength)
+ u32 strength)
{
struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
@@ -904,7 +904,7 @@ static int ns2_pin_config_set(struct pinctrl_dev *pctrldev, unsigned int pin,
struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
enum pin_config_param param;
unsigned int i;
- u16 arg;
+ u32 arg;
int ret = -ENOTSUPP;
if (pin_data->pin_conf.base == -1)
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index c8deb8be1da7..91ea32dc1e7f 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -366,7 +366,7 @@ static const struct pinctrl_ops nsp_pctrl_ops = {
.dt_free_map = pinctrl_utils_free_map,
};
-static int nsp_gpio_set_slew(struct nsp_gpio *chip, unsigned gpio, u16 slew)
+static int nsp_gpio_set_slew(struct nsp_gpio *chip, unsigned gpio, u32 slew)
{
if (slew)
nsp_set_bit(chip, IO_CTRL, NSP_GPIO_SLEW_RATE_EN, gpio, true);
@@ -403,7 +403,7 @@ static void nsp_gpio_get_pull(struct nsp_gpio *chip, unsigned gpio,
}
static int nsp_gpio_set_strength(struct nsp_gpio *chip, unsigned gpio,
- u16 strength)
+ u32 strength)
{
u32 offset, shift, i;
u32 val;
@@ -522,7 +522,7 @@ static int nsp_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
{
struct nsp_gpio *chip = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
unsigned int i, gpio;
int ret = -ENOTSUPP;
diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c
index fabe728ae268..bf2e17d0d6e4 100644
--- a/drivers/pinctrl/berlin/berlin-bg2.c
+++ b/drivers/pinctrl/berlin/berlin-bg2.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -227,7 +227,6 @@ static const struct of_device_id berlin2_pinctrl_match[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, berlin2_pinctrl_match);
static int berlin2_pinctrl_probe(struct platform_device *pdev)
{
@@ -244,8 +243,4 @@ static struct platform_driver berlin2_pinctrl_driver = {
.of_match_table = berlin2_pinctrl_match,
},
};
-module_platform_driver(berlin2_pinctrl_driver);
-
-MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Berlin BG2 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin2_pinctrl_driver);
diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c
index ad8c75861373..9bee7bd1650f 100644
--- a/drivers/pinctrl/berlin/berlin-bg2cd.c
+++ b/drivers/pinctrl/berlin/berlin-bg2cd.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -172,7 +172,6 @@ static const struct of_device_id berlin2cd_pinctrl_match[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, berlin2cd_pinctrl_match);
static int berlin2cd_pinctrl_probe(struct platform_device *pdev)
{
@@ -189,8 +188,4 @@ static struct platform_driver berlin2cd_pinctrl_driver = {
.of_match_table = berlin2cd_pinctrl_match,
},
};
-module_platform_driver(berlin2cd_pinctrl_driver);
-
-MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Berlin BG2CD pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin2cd_pinctrl_driver);
diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c
index cd171aea8ca8..eee6763f114c 100644
--- a/drivers/pinctrl/berlin/berlin-bg2q.c
+++ b/drivers/pinctrl/berlin/berlin-bg2q.c
@@ -10,7 +10,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -389,7 +389,6 @@ static const struct of_device_id berlin2q_pinctrl_match[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, berlin2q_pinctrl_match);
static int berlin2q_pinctrl_probe(struct platform_device *pdev)
{
@@ -406,8 +405,4 @@ static struct platform_driver berlin2q_pinctrl_driver = {
.of_match_table = berlin2q_pinctrl_match,
},
};
-module_platform_driver(berlin2q_pinctrl_driver);
-
-MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Berlin BG2Q pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin2q_pinctrl_driver);
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 09172043d589..e6740656ee7c 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -18,7 +18,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
- BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
@@ -457,7 +457,6 @@ static const struct of_device_id berlin4ct_pinctrl_match[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, berlin4ct_pinctrl_match);
static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
{
@@ -496,8 +495,4 @@ static struct platform_driver berlin4ct_pinctrl_driver = {
.of_match_table = berlin4ct_pinctrl_match,
},
};
-module_platform_driver(berlin4ct_pinctrl_driver);
-
-MODULE_AUTHOR("Jisheng Zhang <jszhang@marvell.com>");
-MODULE_DESCRIPTION("Marvell berlin4ct pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin4ct_pinctrl_driver);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index fb38e208f32d..d69046537b75 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -237,10 +237,8 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
}
pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
- if (pindesc == NULL) {
- dev_err(pctldev->dev, "failed to alloc struct pin_desc\n");
+ if (!pindesc)
return -ENOMEM;
- }
/* Set owner */
pindesc->pctldev = pctldev;
@@ -540,6 +538,182 @@ void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
}
EXPORT_SYMBOL_GPL(pinctrl_remove_gpio_range);
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+
+/**
+ * pinctrl_generic_get_group_count() - returns the number of pin groups
+ * @pctldev: pin controller device
+ */
+int pinctrl_generic_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return pctldev->num_groups;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_count);
+
+/**
+ * pinctrl_generic_get_group_name() - returns the name of a pin group
+ * @pctldev: pin controller device
+ * @selector: group number
+ */
+const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group)
+ return NULL;
+
+ return group->name;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_name);
+
+/**
+ * pinctrl_generic_get_group_pins() - gets the pin group pins
+ * @pctldev: pin controller device
+ * @selector: group number
+ * @pins: pins in the group
+ * @num_pins: number of pins in the group
+ */
+int pinctrl_generic_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group) {
+ dev_err(pctldev->dev, "%s could not find pingroup%i\n",
+ __func__, selector);
+ return -EINVAL;
+ }
+
+ *pins = group->pins;
+ *num_pins = group->num_pins;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_pins);
+
+/**
+ * pinctrl_generic_get_group() - returns a pin group based on the number
+ * @pctldev: pin controller device
+ * @gselector: group number
+ */
+struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group)
+ return NULL;
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group);
+
+/**
+ * pinctrl_generic_add_group() - adds a new pin group
+ * @pctldev: pin controller device
+ * @name: name of the pin group
+ * @pins: pins in the pin group
+ * @num_pins: number of pins in the pin group
+ * @data: pin controller driver specific data
+ *
+ * Note that the caller must take care of locking.
+ */
+int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
+ int *pins, int num_pins, void *data)
+{
+ struct group_desc *group;
+
+ group = devm_kzalloc(pctldev->dev, sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ group->name = name;
+ group->pins = pins;
+ group->num_pins = num_pins;
+ group->data = data;
+
+ radix_tree_insert(&pctldev->pin_group_tree, pctldev->num_groups,
+ group);
+
+ pctldev->num_groups++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_add_group);
+
+/**
+ * pinctrl_generic_remove_group() - removes a numbered pin group
+ * @pctldev: pin controller device
+ * @selector: group number
+ *
+ * Note that the caller must take care of locking.
+ */
+int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group)
+ return -ENOENT;
+
+ radix_tree_delete(&pctldev->pin_group_tree, selector);
+ devm_kfree(pctldev->dev, group);
+
+ pctldev->num_groups--;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
+
+/**
+ * pinctrl_generic_free_groups() - removes all pin groups
+ * @pctldev: pin controller device
+ *
+ * Note that the caller must take care of locking.
+ */
+static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
+{
+ struct radix_tree_iter iter;
+ struct group_desc *group;
+ unsigned long *indices;
+ void **slot;
+ int i = 0;
+
+ indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
+ pctldev->num_groups, GFP_KERNEL);
+ if (!indices)
+ return;
+
+ radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
+ indices[i++] = iter.index;
+
+ for (i = 0; i < pctldev->num_groups; i++) {
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ indices[i]);
+ radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
+ devm_kfree(pctldev->dev, group);
+ }
+
+ pctldev->num_groups = 0;
+}
+
+#else
+static inline void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
+{
+}
+#endif /* CONFIG_GENERIC_PINCTRL_GROUPS */
+
/**
* pinctrl_get_group_selector() - returns the group selector for a group
* @pctldev: the pin controller handling the group
@@ -688,6 +862,35 @@ int pinctrl_gpio_direction_output(unsigned gpio)
}
EXPORT_SYMBOL_GPL(pinctrl_gpio_direction_output);
+/**
+ * pinctrl_gpio_set_config() - Apply config to given GPIO pin
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @config: the configuration to apply to the GPIO
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers, if
+ * they need to call the underlying pin controller to change GPIO config
+ * (for example set debounce time).
+ */
+int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+{
+ unsigned long configs[] = { config };
+ struct pinctrl_gpio_range *range;
+ struct pinctrl_dev *pctldev;
+ int ret, pin;
+
+ ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ if (ret)
+ return ret;
+
+ mutex_lock(&pctldev->mutex);
+ pin = gpio_to_pin(range, gpio);
+ ret = pinconf_set_config(pctldev, pin, configs, ARRAY_SIZE(configs));
+ mutex_unlock(&pctldev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pinctrl_gpio_set_config);
+
static struct pinctrl_state *find_state(struct pinctrl *p,
const char *name)
{
@@ -706,11 +909,8 @@ static struct pinctrl_state *create_state(struct pinctrl *p,
struct pinctrl_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state == NULL) {
- dev_err(p->dev,
- "failed to alloc struct pinctrl_state\n");
+ if (!state)
return ERR_PTR(-ENOMEM);
- }
state->name = name;
INIT_LIST_HEAD(&state->settings);
@@ -720,7 +920,8 @@ static struct pinctrl_state *create_state(struct pinctrl *p,
return state;
}
-static int add_setting(struct pinctrl *p, struct pinctrl_map const *map)
+static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
+ struct pinctrl_map const *map)
{
struct pinctrl_state *state;
struct pinctrl_setting *setting;
@@ -736,15 +937,16 @@ static int add_setting(struct pinctrl *p, struct pinctrl_map const *map)
return 0;
setting = kzalloc(sizeof(*setting), GFP_KERNEL);
- if (setting == NULL) {
- dev_err(p->dev,
- "failed to alloc struct pinctrl_setting\n");
+ if (!setting)
return -ENOMEM;
- }
setting->type = map->type;
- setting->pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
+ if (pctldev)
+ setting->pctldev = pctldev;
+ else
+ setting->pctldev =
+ get_pinctrl_dev_from_devname(map->ctrl_dev_name);
if (setting->pctldev == NULL) {
kfree(setting);
/* Do not defer probing of hogs (circular loop) */
@@ -800,7 +1002,8 @@ static struct pinctrl *find_pinctrl(struct device *dev)
static void pinctrl_free(struct pinctrl *p, bool inlist);
-static struct pinctrl *create_pinctrl(struct device *dev)
+static struct pinctrl *create_pinctrl(struct device *dev,
+ struct pinctrl_dev *pctldev)
{
struct pinctrl *p;
const char *devname;
@@ -815,15 +1018,13 @@ static struct pinctrl *create_pinctrl(struct device *dev)
* a pin control handle with pinctrl_get()
*/
p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL) {
- dev_err(dev, "failed to alloc struct pinctrl\n");
+ if (!p)
return ERR_PTR(-ENOMEM);
- }
p->dev = dev;
INIT_LIST_HEAD(&p->states);
INIT_LIST_HEAD(&p->dt_maps);
- ret = pinctrl_dt_to_map(p);
+ ret = pinctrl_dt_to_map(p, pctldev);
if (ret < 0) {
kfree(p);
return ERR_PTR(ret);
@@ -838,7 +1039,7 @@ static struct pinctrl *create_pinctrl(struct device *dev)
if (strcmp(map->dev_name, devname))
continue;
- ret = add_setting(p, map);
+ ret = add_setting(p, pctldev, map);
/*
* At this point the adding of a setting may:
*
@@ -899,7 +1100,7 @@ struct pinctrl *pinctrl_get(struct device *dev)
return p;
}
- return create_pinctrl(dev);
+ return create_pinctrl(dev, NULL);
}
EXPORT_SYMBOL_GPL(pinctrl_get);
@@ -1175,10 +1376,8 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
}
maps_node = kzalloc(sizeof(*maps_node), GFP_KERNEL);
- if (!maps_node) {
- pr_err("failed to alloc struct pinctrl_maps\n");
+ if (!maps_node)
return -ENOMEM;
- }
maps_node->num_maps = num_maps;
if (dup) {
@@ -1731,20 +1930,18 @@ static int pinctrl_check_ops(struct pinctrl_dev *pctldev)
!ops->get_group_name)
return -EINVAL;
- if (ops->dt_node_to_map && !ops->dt_free_map)
- return -EINVAL;
-
return 0;
}
/**
- * pinctrl_register() - register a pin controller device
+ * pinctrl_init_controller() - init a pin controller device
* @pctldesc: descriptor for this pin controller
* @dev: parent device for this pin controller
* @driver_data: private pin controller data for this pin controller
*/
-struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
- struct device *dev, void *driver_data)
+struct pinctrl_dev *pinctrl_init_controller(struct pinctrl_desc *pctldesc,
+ struct device *dev,
+ void *driver_data)
{
struct pinctrl_dev *pctldev;
int ret;
@@ -1755,17 +1952,22 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
return ERR_PTR(-EINVAL);
pctldev = kzalloc(sizeof(*pctldev), GFP_KERNEL);
- if (pctldev == NULL) {
- dev_err(dev, "failed to alloc struct pinctrl_dev\n");
+ if (!pctldev)
return ERR_PTR(-ENOMEM);
- }
/* Initialize pin control device struct */
pctldev->owner = pctldesc->owner;
pctldev->desc = pctldesc;
pctldev->driver_data = driver_data;
INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL);
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+ INIT_RADIX_TREE(&pctldev->pin_group_tree, GFP_KERNEL);
+#endif
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+ INIT_RADIX_TREE(&pctldev->pin_function_tree, GFP_KERNEL);
+#endif
INIT_LIST_HEAD(&pctldev->gpio_ranges);
+ INIT_LIST_HEAD(&pctldev->node);
pctldev->dev = dev;
mutex_init(&pctldev->mutex);
@@ -1800,21 +2002,28 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
goto out_err;
}
- mutex_lock(&pinctrldev_list_mutex);
- list_add_tail(&pctldev->node, &pinctrldev_list);
- mutex_unlock(&pinctrldev_list_mutex);
+ return pctldev;
- pctldev->p = pinctrl_get(pctldev->dev);
+out_err:
+ mutex_destroy(&pctldev->mutex);
+ kfree(pctldev);
+ return ERR_PTR(ret);
+}
+static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
+{
+ pctldev->p = create_pinctrl(pctldev->dev, pctldev);
if (!IS_ERR(pctldev->p)) {
+ kref_get(&pctldev->p->users);
pctldev->hog_default =
pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
if (IS_ERR(pctldev->hog_default)) {
- dev_dbg(dev, "failed to lookup the default state\n");
+ dev_dbg(pctldev->dev,
+ "failed to lookup the default state\n");
} else {
if (pinctrl_select_state(pctldev->p,
pctldev->hog_default))
- dev_err(dev,
+ dev_err(pctldev->dev,
"failed to select default state\n");
}
@@ -1822,20 +2031,85 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pinctrl_lookup_state(pctldev->p,
PINCTRL_STATE_SLEEP);
if (IS_ERR(pctldev->hog_sleep))
- dev_dbg(dev, "failed to lookup the sleep state\n");
+ dev_dbg(pctldev->dev,
+ "failed to lookup the sleep state\n");
}
+ mutex_lock(&pinctrldev_list_mutex);
+ list_add_tail(&pctldev->node, &pinctrldev_list);
+ mutex_unlock(&pinctrldev_list_mutex);
+
pinctrl_init_device_debugfs(pctldev);
+ return 0;
+}
+
+/**
+ * pinctrl_register() - register a pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ *
+ * Note that pinctrl_register() is known to have problems as the pin
+ * controller driver functions are called before the driver has a
+ * struct pinctrl_dev handle. To avoid issues later on, please use the
+ * new pinctrl_register_and_init() below instead.
+ */
+struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data)
+{
+ struct pinctrl_dev *pctldev;
+ int error;
+
+ pctldev = pinctrl_init_controller(pctldesc, dev, driver_data);
+ if (IS_ERR(pctldev))
+ return pctldev;
+
+ error = pinctrl_create_and_start(pctldev);
+ if (error) {
+ mutex_destroy(&pctldev->mutex);
+ kfree(pctldev);
+
+ return ERR_PTR(error);
+ }
+
return pctldev;
-out_err:
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pinctrl_register);
+int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data,
+ struct pinctrl_dev **pctldev)
+{
+ struct pinctrl_dev *p;
+ int error;
+
+ p = pinctrl_init_controller(pctldesc, dev, driver_data);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ /*
+ * We have pinctrl_start() call functions in the pin controller
+ * driver with create_pinctrl() for at least dt_node_to_map(). So
+ * let's make sure pctldev is properly initialized for the
+ * pin controller driver before we do anything.
+ */
+ *pctldev = p;
+
+ error = pinctrl_create_and_start(p);
+ if (error) {
+ mutex_destroy(&p->mutex);
+ kfree(p);
+ *pctldev = NULL;
+
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
+
/**
* pinctrl_unregister() - unregister pinmux
* @pctldev: pin controller to unregister
@@ -1845,6 +2119,7 @@ EXPORT_SYMBOL_GPL(pinctrl_register);
void pinctrl_unregister(struct pinctrl_dev *pctldev)
{
struct pinctrl_gpio_range *range, *n;
+
if (pctldev == NULL)
return;
@@ -1852,13 +2127,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
pinctrl_remove_device_debugfs(pctldev);
mutex_unlock(&pctldev->mutex);
- if (!IS_ERR(pctldev->p))
+ if (!IS_ERR_OR_NULL(pctldev->p))
pinctrl_put(pctldev->p);
mutex_lock(&pinctrldev_list_mutex);
mutex_lock(&pctldev->mutex);
/* TODO: check that no pinmuxes are still active? */
list_del(&pctldev->node);
+ pinmux_generic_free_functions(pctldev);
+ pinctrl_generic_free_groups(pctldev);
/* Destroy descriptor tree */
pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
pctldev->desc->npins);
@@ -1925,6 +2202,42 @@ struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
EXPORT_SYMBOL_GPL(devm_pinctrl_register);
/**
+ * devm_pinctrl_register_and_init() - Resource managed pinctrl register and init
+ * @dev: parent device for this pin controller
+ * @pctldesc: descriptor for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ *
+ * Returns an error pointer if pincontrol register failed. Otherwise
+ * it returns valid pinctrl handle.
+ *
+ * The pinctrl device will be automatically released when the device is unbound.
+ */
+int devm_pinctrl_register_and_init(struct device *dev,
+ struct pinctrl_desc *pctldesc,
+ void *driver_data,
+ struct pinctrl_dev **pctldev)
+{
+ struct pinctrl_dev **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pinctrl_dev_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pinctrl_register_and_init(pctldesc, dev, driver_data, pctldev);
+ if (error) {
+ devres_free(ptr);
+ return error;
+ }
+
+ *ptr = *pctldev;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_register_and_init);
+
+/**
* devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
* @dev: device for which which resource was allocated
* @pctldev: the pinctrl device to unregister.
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 747c423c11f3..1c35de59a658 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -24,6 +24,10 @@ struct pinctrl_gpio_range;
* controller
* @pin_desc_tree: each pin descriptor for this pin controller is stored in
* this radix tree
+ * @pin_group_tree: optionally each pin group can be stored in this radix tree
+ * @num_groups: optionally number of groups can be kept here
+ * @pin_function_tree: optionally each function can be stored in this radix tree
+ * @num_functions: optionally number of functions can be kept here
* @gpio_ranges: a list of GPIO ranges that is handled by this pin controller,
* ranges are added to this list at runtime
* @dev: the device entry for this pin controller
@@ -40,6 +44,14 @@ struct pinctrl_dev {
struct list_head node;
struct pinctrl_desc *desc;
struct radix_tree_root pin_desc_tree;
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+ struct radix_tree_root pin_group_tree;
+ unsigned int num_groups;
+#endif
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+ struct radix_tree_root pin_function_tree;
+ unsigned int num_functions;
+#endif
struct list_head gpio_ranges;
struct device *dev;
struct module *owner;
@@ -171,6 +183,49 @@ struct pinctrl_maps {
unsigned num_maps;
};
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+
+/**
+ * struct group_desc - generic pin group descriptor
+ * @name: name of the pin group
+ * @pins: array of pins that belong to the group
+ * @num_pins: number of pins in the group
+ * @data: pin controller driver specific data
+ */
+struct group_desc {
+ const char *name;
+ int *pins;
+ int num_pins;
+ void *data;
+};
+
+int pinctrl_generic_get_group_count(struct pinctrl_dev *pctldev);
+
+const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group_selector);
+
+int pinctrl_generic_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group_selector,
+ const unsigned int **pins,
+ unsigned int *npins);
+
+struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
+ unsigned int group_selector);
+
+int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
+ int *gpins, int ngpins, void *data);
+
+int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
+ unsigned int group_selector);
+
+static inline int
+pinctrl_generic_remove_last_group(struct pinctrl_dev *pctldev)
+{
+ return pinctrl_generic_remove_group(pctldev, pctldev->num_groups - 1);
+}
+
+#endif /* CONFIG_GENERIC_PINCTRL_GROUPS */
+
struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np);
int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 260908480075..0e5c9f14a706 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -42,7 +42,8 @@ static void dt_free_map(struct pinctrl_dev *pctldev,
{
if (pctldev) {
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
- ops->dt_free_map(pctldev, map, num_maps);
+ if (ops->dt_free_map)
+ ops->dt_free_map(pctldev, map, num_maps);
} else {
/* There is no pctldev for PIN_MAP_TYPE_DUMMY_STATE */
kfree(map);
@@ -100,11 +101,12 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
return get_pinctrl_dev_from_of_node(np);
}
-static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
+static int dt_to_map_one_config(struct pinctrl *p,
+ struct pinctrl_dev *pctldev,
+ const char *statename,
struct device_node *np_config)
{
struct device_node *np_pctldev;
- struct pinctrl_dev *pctldev;
const struct pinctrl_ops *ops;
int ret;
struct pinctrl_map *map;
@@ -121,7 +123,8 @@ static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
/* OK let's just assume this will appear later then */
return -EPROBE_DEFER;
}
- pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
+ if (!pctldev)
+ pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
if (pctldev)
break;
/* Do not defer probing of hogs (circular loop) */
@@ -166,7 +169,22 @@ static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
return dt_remember_or_free_map(p, statename, NULL, map, 1);
}
-int pinctrl_dt_to_map(struct pinctrl *p)
+bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
+{
+ struct device_node *np;
+ struct property *prop;
+ int size;
+
+ np = pctldev->dev->of_node;
+ if (!np)
+ return false;
+
+ prop = of_find_property(np, "pinctrl-0", &size);
+
+ return prop ? true : false;
+}
+
+int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
{
struct device_node *np = p->dev->of_node;
int state, ret;
@@ -233,7 +251,8 @@ int pinctrl_dt_to_map(struct pinctrl *p)
}
/* Parse the node */
- ret = dt_to_map_one_config(p, statename, np_config);
+ ret = dt_to_map_one_config(p, pctldev, statename,
+ np_config);
of_node_put(np_config);
if (ret < 0)
goto err;
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
index c2d1a5505850..43d8d19aa5ee 100644
--- a/drivers/pinctrl/devicetree.h
+++ b/drivers/pinctrl/devicetree.h
@@ -20,8 +20,10 @@ struct of_phandle_args;
#ifdef CONFIG_OF
+bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev);
+
void pinctrl_dt_free_maps(struct pinctrl *p);
-int pinctrl_dt_to_map(struct pinctrl *p);
+int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev);
int pinctrl_count_index_with_args(const struct device_node *np,
const char *list_name);
@@ -32,7 +34,13 @@ int pinctrl_parse_index_with_args(const struct device_node *np,
#else
-static inline int pinctrl_dt_to_map(struct pinctrl *p)
+static inline bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
+{
+ return false;
+}
+
+static inline int pinctrl_dt_to_map(struct pinctrl *p,
+ struct pinctrl_dev *pctldev)
{
return 0;
}
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index fc8cbf611723..cae05e76c111 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -1,6 +1,7 @@
config PINCTRL_IMX
bool
- select PINMUX
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
select PINCONF
select REGMAP
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 5ef7e875b50e..a7ace9e1ad81 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -27,6 +27,7 @@
#include <linux/regmap.h>
#include "../core.h"
+#include "../pinmux.h"
#include "pinctrl-imx.h"
/* The bits in CONFIG cell defined in binding doc*/
@@ -42,59 +43,25 @@ struct imx_pinctrl {
struct pinctrl_dev *pctl;
void __iomem *base;
void __iomem *input_sel_base;
- const struct imx_pinctrl_soc_info *info;
+ struct imx_pinctrl_soc_info *info;
};
-static inline const struct imx_pin_group *imx_pinctrl_find_group_by_name(
- const struct imx_pinctrl_soc_info *info,
+static inline const struct group_desc *imx_pinctrl_find_group_by_name(
+ struct pinctrl_dev *pctldev,
const char *name)
{
- const struct imx_pin_group *grp = NULL;
+ const struct group_desc *grp = NULL;
int i;
- for (i = 0; i < info->ngroups; i++) {
- if (!strcmp(info->groups[i].name, name)) {
- grp = &info->groups[i];
+ for (i = 0; i < pctldev->num_groups; i++) {
+ grp = pinctrl_generic_get_group(pctldev, i);
+ if (grp && !strcmp(grp->name, name))
break;
- }
}
return grp;
}
-static int imx_get_groups_count(struct pinctrl_dev *pctldev)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->ngroups;
-}
-
-static const char *imx_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->groups[selector].name;
-}
-
-static int imx_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- const unsigned **pins,
- unsigned *npins)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- if (selector >= info->ngroups)
- return -EINVAL;
-
- *pins = info->groups[selector].pin_ids;
- *npins = info->groups[selector].npins;
-
- return 0;
-}
-
static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned offset)
{
@@ -106,8 +73,8 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map, unsigned *num_maps)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
- const struct imx_pin_group *grp;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
+ const struct group_desc *grp;
struct pinctrl_map *new_map;
struct device_node *parent;
int map_num = 1;
@@ -117,15 +84,17 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
* first find the group of this node and check if we need create
* config maps for pins
*/
- grp = imx_pinctrl_find_group_by_name(info, np->name);
+ grp = imx_pinctrl_find_group_by_name(pctldev, np->name);
if (!grp) {
dev_err(info->dev, "unable to find group for node %s\n",
np->name);
return -EINVAL;
}
- for (i = 0; i < grp->npins; i++) {
- if (!(grp->pins[i].config & IMX_NO_PAD_CTL))
+ for (i = 0; i < grp->num_pins; i++) {
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
+ if (!(pin->config & IMX_NO_PAD_CTL))
map_num++;
}
@@ -149,12 +118,14 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create config map */
new_map++;
- for (i = j = 0; i < grp->npins; i++) {
- if (!(grp->pins[i].config & IMX_NO_PAD_CTL)) {
+ for (i = j = 0; i < grp->num_pins; i++) {
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
+ if (!(pin->config & IMX_NO_PAD_CTL)) {
new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN;
new_map[j].data.configs.group_or_pin =
- pin_get_name(pctldev, grp->pins[i].pin);
- new_map[j].data.configs.configs = &grp->pins[i].config;
+ pin_get_name(pctldev, pin->pin);
+ new_map[j].data.configs.configs = &pin->config;
new_map[j].data.configs.num_configs = 1;
j++;
}
@@ -173,9 +144,9 @@ static void imx_dt_free_map(struct pinctrl_dev *pctldev,
}
static const struct pinctrl_ops imx_pctrl_ops = {
- .get_groups_count = imx_get_groups_count,
- .get_group_name = imx_get_group_name,
- .get_group_pins = imx_get_group_pins,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
.pin_dbg_show = imx_pin_dbg_show,
.dt_node_to_map = imx_dt_node_to_map,
.dt_free_map = imx_dt_free_map,
@@ -186,24 +157,33 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
unsigned int npins, pin_id;
int i;
- struct imx_pin_group *grp;
+ struct group_desc *grp = NULL;
+ struct function_desc *func = NULL;
/*
* Configure the mux mode for each pin in the group for a specific
* function.
*/
- grp = &info->groups[group];
- npins = grp->npins;
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ npins = grp->num_pins;
dev_dbg(ipctl->dev, "enable function %s group %s\n",
- info->functions[selector].name, grp->name);
+ func->name, grp->name);
for (i = 0; i < npins; i++) {
- struct imx_pin *pin = &grp->pins[i];
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
pin_id = pin->pin;
pin_reg = &info->pin_regs[pin_id];
@@ -272,43 +252,13 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static int imx_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->nfunctions;
-}
-
-static const char *imx_pmx_get_func_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->functions[selector].name;
-}
-
-static int imx_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- *groups = info->functions[selector].groups;
- *num_groups = info->functions[selector].num_groups;
-
- return 0;
-}
-
static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned offset)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
- struct imx_pin_group *grp;
+ struct group_desc *grp;
struct imx_pin *imx_pin;
unsigned int pin, group;
u32 reg;
@@ -322,10 +272,12 @@ static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
return -EINVAL;
/* Find the pinctrl config with GPIO mux mode for the requested pin */
- for (group = 0; group < info->ngroups; group++) {
- grp = &info->groups[group];
- for (pin = 0; pin < grp->npins; pin++) {
- imx_pin = &grp->pins[pin];
+ for (group = 0; group < pctldev->num_groups; group++) {
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ continue;
+ for (pin = 0; pin < grp->num_pins; pin++) {
+ imx_pin = &((struct imx_pin *)(grp->data))[pin];
if (imx_pin->pin == offset && !imx_pin->mux_mode)
goto mux_pin;
}
@@ -346,7 +298,7 @@ static void imx_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned offset)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
u32 reg;
@@ -371,7 +323,7 @@ static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned offset, bool input)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
u32 reg;
@@ -398,9 +350,9 @@ static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
}
static const struct pinmux_ops imx_pmx_ops = {
- .get_functions_count = imx_pmx_get_funcs_count,
- .get_function_name = imx_pmx_get_func_name,
- .get_function_groups = imx_pmx_get_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
.set_mux = imx_pmx_set,
.gpio_request_enable = imx_pmx_gpio_request_enable,
.gpio_disable_free = imx_pmx_gpio_disable_free,
@@ -411,7 +363,7 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin_id, unsigned long *config)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
if (pin_reg->conf_reg == -1) {
@@ -433,7 +385,7 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev,
unsigned num_configs)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
int i;
@@ -467,7 +419,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
unsigned long config;
@@ -483,20 +435,22 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned group)
{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
- struct imx_pin_group *grp;
+ struct group_desc *grp;
unsigned long config;
const char *name;
int i, ret;
- if (group > info->ngroups)
+ if (group > pctldev->num_groups)
return;
seq_printf(s, "\n");
- grp = &info->groups[group];
- for (i = 0; i < grp->npins; i++) {
- struct imx_pin *pin = &grp->pins[i];
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return;
+
+ for (i = 0; i < grp->num_pins; i++) {
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
name = pin_get_name(pctldev, pin->pin);
ret = imx_pinconf_get(pctldev, pin->pin, &config);
if (ret)
@@ -520,7 +474,7 @@ static const struct pinconf_ops imx_pinconf_ops = {
#define SHARE_FSL_PIN_SIZE 20
static int imx_pinctrl_parse_groups(struct device_node *np,
- struct imx_pin_group *grp,
+ struct group_desc *grp,
struct imx_pinctrl_soc_info *info,
u32 index)
{
@@ -554,20 +508,20 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
return -EINVAL;
}
- grp->npins = size / pin_size;
- grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(struct imx_pin),
- GFP_KERNEL);
- grp->pin_ids = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
- GFP_KERNEL);
- if (!grp->pins || ! grp->pin_ids)
+ grp->num_pins = size / pin_size;
+ grp->data = devm_kzalloc(info->dev, grp->num_pins *
+ sizeof(struct imx_pin), GFP_KERNEL);
+ grp->pins = devm_kzalloc(info->dev, grp->num_pins *
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!grp->pins || !grp->data)
return -ENOMEM;
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->num_pins; i++) {
u32 mux_reg = be32_to_cpu(*list++);
u32 conf_reg;
unsigned int pin_id;
struct imx_pin_reg *pin_reg;
- struct imx_pin *pin = &grp->pins[i];
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
if (!(info->flags & ZERO_OFFSET_VALID) && !mux_reg)
mux_reg = -1;
@@ -583,7 +537,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin_id = (mux_reg != -1) ? mux_reg / 4 : conf_reg / 4;
pin_reg = &info->pin_regs[pin_id];
pin->pin = pin_id;
- grp->pin_ids[i] = pin_id;
+ grp->pins[i] = pin_id;
pin_reg->mux_reg = mux_reg;
pin_reg->conf_reg = conf_reg;
pin->input_reg = be32_to_cpu(*list++);
@@ -604,31 +558,46 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
}
static int imx_pinctrl_parse_functions(struct device_node *np,
- struct imx_pinctrl_soc_info *info,
+ struct imx_pinctrl *ipctl,
u32 index)
{
+ struct pinctrl_dev *pctl = ipctl->pctl;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
struct device_node *child;
- struct imx_pmx_func *func;
- struct imx_pin_group *grp;
+ struct function_desc *func;
+ struct group_desc *grp;
u32 i = 0;
dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
- func = &info->functions[index];
+ func = pinmux_generic_get_function(pctl, index);
+ if (!func)
+ return -EINVAL;
/* Initialise function */
func->name = np->name;
- func->num_groups = of_get_child_count(np);
- if (func->num_groups == 0) {
+ func->num_group_names = of_get_child_count(np);
+ if (func->num_group_names == 0) {
dev_err(info->dev, "no groups defined in %s\n", np->full_name);
return -EINVAL;
}
- func->groups = devm_kzalloc(info->dev,
- func->num_groups * sizeof(char *), GFP_KERNEL);
+ func->group_names = devm_kzalloc(info->dev,
+ func->num_group_names *
+ sizeof(char *), GFP_KERNEL);
for_each_child_of_node(np, child) {
- func->groups[i] = child->name;
- grp = &info->groups[info->group_index++];
+ func->group_names[i] = child->name;
+
+ grp = devm_kzalloc(info->dev, sizeof(struct group_desc),
+ GFP_KERNEL);
+ if (!grp)
+ return -ENOMEM;
+
+ mutex_lock(&info->mutex);
+ radix_tree_insert(&pctl->pin_group_tree,
+ info->group_index++, grp);
+ mutex_unlock(&info->mutex);
+
imx_pinctrl_parse_groups(child, grp, info, i++);
}
@@ -659,10 +628,12 @@ static bool imx_pinctrl_dt_is_flat_functions(struct device_node *np)
}
static int imx_pinctrl_probe_dt(struct platform_device *pdev,
- struct imx_pinctrl_soc_info *info)
+ struct imx_pinctrl *ipctl)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
+ struct pinctrl_dev *pctl = ipctl->pctl;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
u32 nfuncs = 0;
u32 i = 0;
bool flat_funcs;
@@ -681,35 +652,50 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
}
}
- info->nfunctions = nfuncs;
- info->functions = devm_kzalloc(&pdev->dev, nfuncs * sizeof(struct imx_pmx_func),
+ for (i = 0; i < nfuncs; i++) {
+ struct function_desc *function;
+
+ function = devm_kzalloc(&pdev->dev, sizeof(*function),
GFP_KERNEL);
- if (!info->functions)
- return -ENOMEM;
+ if (!function)
+ return -ENOMEM;
+
+ mutex_lock(&info->mutex);
+ radix_tree_insert(&pctl->pin_function_tree, i, function);
+ mutex_unlock(&info->mutex);
+ }
+ pctl->num_functions = nfuncs;
info->group_index = 0;
if (flat_funcs) {
- info->ngroups = of_get_child_count(np);
+ pctl->num_groups = of_get_child_count(np);
} else {
- info->ngroups = 0;
+ pctl->num_groups = 0;
for_each_child_of_node(np, child)
- info->ngroups += of_get_child_count(child);
+ pctl->num_groups += of_get_child_count(child);
}
- info->groups = devm_kzalloc(&pdev->dev, info->ngroups * sizeof(struct imx_pin_group),
- GFP_KERNEL);
- if (!info->groups)
- return -ENOMEM;
if (flat_funcs) {
- imx_pinctrl_parse_functions(np, info, 0);
+ imx_pinctrl_parse_functions(np, ipctl, 0);
} else {
+ i = 0;
for_each_child_of_node(np, child)
- imx_pinctrl_parse_functions(child, info, i++);
+ imx_pinctrl_parse_functions(child, ipctl, i++);
}
return 0;
}
+/*
+ * imx_free_resources() - free memory used by this driver
+ * @info: info driver instance
+ */
+static void imx_free_resources(struct imx_pinctrl *ipctl)
+{
+ if (ipctl->pctl)
+ pinctrl_unregister(ipctl->pctl);
+}
+
int imx_pinctrl_probe(struct platform_device *pdev,
struct imx_pinctrl_soc_info *info)
{
@@ -783,23 +769,31 @@ int imx_pinctrl_probe(struct platform_device *pdev,
imx_pinctrl_desc->confops = &imx_pinconf_ops;
imx_pinctrl_desc->owner = THIS_MODULE;
- ret = imx_pinctrl_probe_dt(pdev, info);
- if (ret) {
- dev_err(&pdev->dev, "fail to probe dt properties\n");
- return ret;
- }
+ mutex_init(&info->mutex);
ipctl->info = info;
ipctl->dev = info->dev;
platform_set_drvdata(pdev, ipctl);
- ipctl->pctl = devm_pinctrl_register(&pdev->dev,
- imx_pinctrl_desc, ipctl);
- if (IS_ERR(ipctl->pctl)) {
+ ret = devm_pinctrl_register_and_init(&pdev->dev,
+ imx_pinctrl_desc, ipctl,
+ &ipctl->pctl);
+ if (ret) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
- return PTR_ERR(ipctl->pctl);
+ goto free;
+ }
+
+ ret = imx_pinctrl_probe_dt(pdev, ipctl);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to probe dt properties\n");
+ goto free;
}
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
return 0;
+
+free:
+ imx_free_resources(ipctl);
+
+ return ret;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 8af8aa2897ab..ff2d3e56b7c5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -18,7 +18,7 @@
struct platform_device;
/**
- * struct imx_pin_group - describes a single i.MX pin
+ * struct imx_pin - describes a single i.MX pin
* @pin: the pin_id of this pin
* @mux_mode: the mux mode for this pin.
* @input_reg: the select input register offset for this pin if any
@@ -35,33 +35,6 @@ struct imx_pin {
};
/**
- * struct imx_pin_group - describes an IMX pin group
- * @name: the name of this specific pin group
- * @npins: the number of pins in this group array, i.e. the number of
- * elements in .pins so we can iterate over that array
- * @pin_ids: array of pin_ids. pinctrl forces us to maintain such an array
- * @pins: array of pins
- */
-struct imx_pin_group {
- const char *name;
- unsigned npins;
- unsigned int *pin_ids;
- struct imx_pin *pins;
-};
-
-/**
- * struct imx_pmx_func - describes IMX pinmux functions
- * @name: the name of this specific function
- * @groups: corresponding pin groups
- * @num_groups: the number of groups
- */
-struct imx_pmx_func {
- const char *name;
- const char **groups;
- unsigned num_groups;
-};
-
-/**
* struct imx_pin_reg - describe a pin reg map
* @mux_reg: mux register offset
* @conf_reg: config register offset
@@ -76,13 +49,10 @@ struct imx_pinctrl_soc_info {
const struct pinctrl_pin_desc *pins;
unsigned int npins;
struct imx_pin_reg *pin_regs;
- struct imx_pin_group *groups;
- unsigned int ngroups;
unsigned int group_index;
- struct imx_pmx_func *functions;
- unsigned int nfunctions;
unsigned int flags;
const char *gpr_compatible;
+ struct mutex mutex;
};
#define SHARE_MUX_CONF_REG 0x1
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 00fb055a4897..396830a41127 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -56,6 +56,14 @@ config PINCTRL_BROXTON
Broxton pinctrl driver provides an interface that allows
configuring of SoC pins and using them as GPIOs.
+config PINCTRL_GEMINILAKE
+ tristate "Intel Gemini Lake SoC pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Gemini Lake SoC pins and using them as GPIOs.
+
config PINCTRL_SUNRISEPOINT
tristate "Intel Sunrisepoint pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 30803078f09e..12f3af5b2ca5 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
+obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 37300634b7d2..fa3c5758ac67 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
int reg)
{
struct byt_community *comm = byt_get_community(vg, offset);
- u32 reg_offset = 0;
+ u32 reg_offset;
if (!comm)
return NULL;
offset -= comm->pin_base;
- if (reg == BYT_INT_STAT_REG)
+ switch (reg) {
+ case BYT_INT_STAT_REG:
reg_offset = (offset / 32) * 4;
- else
+ break;
+ case BYT_DEBOUNCE_REG:
+ reg_offset = 0;
+ break;
+ default:
reg_offset = comm->pad_map[offset] * 16;
+ break;
+ }
return comm->reg_base + reg_offset + reg;
}
@@ -1092,6 +1099,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
enum pin_config_param param = pinconf_to_config_param(*config);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
unsigned long flags;
u32 conf, pull, val, debounce;
u16 arg = 0;
@@ -1128,7 +1136,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
return -EINVAL;
raw_spin_lock_irqsave(&vg->lock, flags);
- debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+ debounce = readl(db_reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1184,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
unsigned int param, arg;
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
unsigned long flags;
u32 conf, val, debounce;
int i, ret = 0;
@@ -1238,36 +1247,44 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
- debounce = readl(byt_gpio_reg(vg, offset,
- BYT_DEBOUNCE_REG));
- conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+ debounce = readl(db_reg);
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
+
+ if (arg)
+ conf |= BYT_DEBOUNCE_EN;
+ else
+ conf &= ~BYT_DEBOUNCE_EN;
switch (arg) {
case 375:
- conf |= BYT_DEBOUNCE_PULSE_375US;
+ debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
case 750:
- conf |= BYT_DEBOUNCE_PULSE_750US;
+ debounce |= BYT_DEBOUNCE_PULSE_750US;
break;
case 1500:
- conf |= BYT_DEBOUNCE_PULSE_1500US;
+ debounce |= BYT_DEBOUNCE_PULSE_1500US;
break;
case 3000:
- conf |= BYT_DEBOUNCE_PULSE_3MS;
+ debounce |= BYT_DEBOUNCE_PULSE_3MS;
break;
case 6000:
- conf |= BYT_DEBOUNCE_PULSE_6MS;
+ debounce |= BYT_DEBOUNCE_PULSE_6MS;
break;
case 12000:
- conf |= BYT_DEBOUNCE_PULSE_12MS;
+ debounce |= BYT_DEBOUNCE_PULSE_12MS;
break;
case 24000:
- conf |= BYT_DEBOUNCE_PULSE_24MS;
+ debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
- ret = -EINVAL;
+ if (arg)
+ ret = -EINVAL;
+ break;
}
+ if (!ret)
+ writel(debounce, db_reg);
break;
default:
ret = -ENOTSUPP;
@@ -1449,7 +1466,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
val & BYT_INPUT_EN ? " " : "in",
val & BYT_OUTPUT_EN ? " " : "out",
val & BYT_LEVEL ? "hi" : "lo",
- comm->pad_map[i], comm->pad_map[i] * 32,
+ comm->pad_map[i], comm->pad_map[i] * 16,
conf0 & 0x7,
conf0 & BYT_TRIG_NEG ? " fall" : " ",
conf0 & BYT_TRIG_POS ? " rise" : " ",
@@ -1606,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
continue;
}
+ raw_spin_lock(&vg->lock);
pending = readl(reg);
+ raw_spin_unlock(&vg->lock);
for_each_set_bit(pin, &pending, 32) {
virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
generic_handle_irq(virq);
@@ -1617,6 +1636,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
{
+ struct gpio_chip *gc = &vg->chip;
+ struct device *dev = &vg->pdev->dev;
void __iomem *reg;
u32 base, value;
int i;
@@ -1638,10 +1659,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
}
value = readl(reg);
- if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
- !(value & BYT_DIRECT_IRQ_EN)) {
+ if (value & BYT_DIRECT_IRQ_EN) {
+ clear_bit(i, gc->irq_valid_mask);
+ dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+ } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
- dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+ dev_dbg(dev, "disabling GPIO %d\n", i);
}
}
@@ -1680,12 +1703,13 @@ static int byt_gpio_probe(struct byt_gpio *vg)
gc->can_sleep = false;
gc->parent = &vg->pdev->dev;
gc->ngpio = vg->soc_data->npins;
+ gc->irq_need_valid_mask = true;
#ifdef CONFIG_PM_SLEEP
vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
sizeof(*vg->saved_context), GFP_KERNEL);
#endif
- ret = gpiochip_add_data(gc, vg);
+ ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
if (ret) {
dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
return ret;
@@ -1695,7 +1719,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
0, 0, vg->soc_data->npins);
if (ret) {
dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n");
- goto fail;
+ return ret;
}
/* set up interrupts */
@@ -1706,7 +1730,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(&vg->pdev->dev, "failed to add irqchip\n");
- goto fail;
+ return ret;
}
gpiochip_set_chained_irqchip(gc, &byt_irqchip,
@@ -1715,11 +1739,6 @@ static int byt_gpio_probe(struct byt_gpio *vg)
}
return ret;
-
-fail:
- gpiochip_remove(&vg->chip);
-
- return ret;
}
static int byt_set_soc_data(struct byt_gpio *vg,
@@ -1802,7 +1821,7 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
vg->pctl_desc.pins = vg->soc_data->pins;
vg->pctl_desc.npins = vg->soc_data->npins;
- vg->pctl_dev = pinctrl_register(&vg->pctl_desc, &pdev->dev, vg);
+ vg->pctl_dev = devm_pinctrl_register(&pdev->dev, &vg->pctl_desc, vg);
if (IS_ERR(vg->pctl_dev)) {
dev_err(&pdev->dev, "failed to register pinctrl driver\n");
return PTR_ERR(vg->pctl_dev);
@@ -1811,10 +1830,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
raw_spin_lock_init(&vg->lock);
ret = byt_gpio_probe(vg);
- if (ret) {
- pinctrl_unregister(vg->pctl_dev);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, vg);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 59cb7a6fc5be..e6e6fd112585 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -19,7 +19,7 @@
#define BXT_PAD_OWN 0x020
#define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
#define BXT_GPI_IE 0x110
#define BXT_COMMUNITY(s, e) \
@@ -1004,8 +1004,8 @@ static const struct acpi_device_id bxt_pinctrl_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, bxt_pinctrl_acpi_match);
static const struct platform_device_id bxt_pinctrl_platform_ids[] = {
- { "apl-pinctrl", (kernel_ulong_t)&apl_pinctrl_soc_data },
- { "broxton-pinctrl", (kernel_ulong_t)&bxt_pinctrl_soc_data },
+ { "apollolake-pinctrl", (kernel_ulong_t)apl_pinctrl_soc_data },
+ { "broxton-pinctrl", (kernel_ulong_t)bxt_pinctrl_soc_data },
{ },
};
@@ -1058,7 +1058,6 @@ static const struct dev_pm_ops bxt_pinctrl_pm_ops = {
static struct platform_driver bxt_pinctrl_driver = {
.probe = bxt_pinctrl_probe,
- .remove = intel_pinctrl_remove,
.driver = {
.name = "broxton-pinctrl",
.acpi_match_table = bxt_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 5e66860a5e67..f80134e3e0b6 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1059,7 +1059,7 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
}
static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
- enum pin_config_param param, u16 arg)
+ enum pin_config_param param, u32 arg)
{
void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
unsigned long flags;
@@ -1151,7 +1151,7 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
int i, ret;
- u16 arg;
+ u32 arg;
if (chv_pad_locked(pctrl, pin))
return -EBUSY;
diff --git a/drivers/pinctrl/intel/pinctrl-geminilake.c b/drivers/pinctrl/intel/pinctrl-geminilake.c
new file mode 100644
index 000000000000..a6b94c930007
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-geminilake.c
@@ -0,0 +1,512 @@
+/*
+ * Intel Gemini Lake SoC pinctrl/GPIO driver
+ *
+ * Copyright (C) 2017 Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define GLK_PAD_OWN 0x020
+#define GLK_HOSTSW_OWN 0x0b0
+#define GLK_PADCFGLOCK 0x080
+#define GLK_GPI_IE 0x110
+
+#define GLK_COMMUNITY(s, e) \
+ { \
+ .padown_offset = GLK_PAD_OWN, \
+ .padcfglock_offset = GLK_PADCFGLOCK, \
+ .hostown_offset = GLK_HOSTSW_OWN, \
+ .ie_offset = GLK_GPI_IE, \
+ .gpp_size = 32, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ }
+
+/* GLK */
+static const struct pinctrl_pin_desc glk_northwest_pins[] = {
+ PINCTRL_PIN(0, "TCK"),
+ PINCTRL_PIN(1, "TRST_B"),
+ PINCTRL_PIN(2, "TMS"),
+ PINCTRL_PIN(3, "TDI"),
+ PINCTRL_PIN(4, "TDO"),
+ PINCTRL_PIN(5, "JTAGX"),
+ PINCTRL_PIN(6, "CX_PREQ_B"),
+ PINCTRL_PIN(7, "CX_PRDY_B"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GP_INTD_DSI_TE1"),
+ PINCTRL_PIN(43, "GP_INTD_DSI_TE2"),
+ PINCTRL_PIN(44, "USB_OC0_B"),
+ PINCTRL_PIN(45, "USB_OC1_B"),
+ PINCTRL_PIN(46, "DSI_I2C_SDA"),
+ PINCTRL_PIN(47, "DSI_I2C_SCL"),
+ PINCTRL_PIN(48, "PMC_I2C_SDA"),
+ PINCTRL_PIN(49, "PMC_I2C_SCL"),
+ PINCTRL_PIN(50, "LPSS_I2C0_SDA"),
+ PINCTRL_PIN(51, "LPSS_I2C0_SCL"),
+ PINCTRL_PIN(52, "LPSS_I2C1_SDA"),
+ PINCTRL_PIN(53, "LPSS_I2C1_SCL"),
+ PINCTRL_PIN(54, "LPSS_I2C2_SDA"),
+ PINCTRL_PIN(55, "LPSS_I2C2_SCL"),
+ PINCTRL_PIN(56, "LPSS_I2C3_SDA"),
+ PINCTRL_PIN(57, "LPSS_I2C3_SCL"),
+ PINCTRL_PIN(58, "LPSS_I2C4_SDA"),
+ PINCTRL_PIN(59, "LPSS_I2C4_SCL"),
+ PINCTRL_PIN(60, "LPSS_UART0_RXD"),
+ PINCTRL_PIN(61, "LPSS_UART0_TXD"),
+ PINCTRL_PIN(62, "LPSS_UART0_RTS_B"),
+ PINCTRL_PIN(63, "LPSS_UART0_CTS_B"),
+ PINCTRL_PIN(64, "LPSS_UART2_RXD"),
+ PINCTRL_PIN(65, "LPSS_UART2_TXD"),
+ PINCTRL_PIN(66, "LPSS_UART2_RTS_B"),
+ PINCTRL_PIN(67, "LPSS_UART2_CTS_B"),
+ PINCTRL_PIN(68, "PMC_SPI_FS0"),
+ PINCTRL_PIN(69, "PMC_SPI_FS1"),
+ PINCTRL_PIN(70, "PMC_SPI_FS2"),
+ PINCTRL_PIN(71, "PMC_SPI_RXD"),
+ PINCTRL_PIN(72, "PMC_SPI_TXD"),
+ PINCTRL_PIN(73, "PMC_SPI_CLK"),
+ PINCTRL_PIN(74, "THERMTRIP_B"),
+ PINCTRL_PIN(75, "PROCHOT_B"),
+ PINCTRL_PIN(76, "EMMC_RST_B"),
+ PINCTRL_PIN(77, "GPIO_212"),
+ PINCTRL_PIN(78, "GPIO_213"),
+ PINCTRL_PIN(79, "GPIO_214"),
+};
+
+static const unsigned int glk_northwest_uart1_pins[] = { 26, 27, 28, 29 };
+static const unsigned int glk_northwest_pwm0_pins[] = { 42 };
+static const unsigned int glk_northwest_pwm1_pins[] = { 43 };
+static const unsigned int glk_northwest_pwm2_pins[] = { 44 };
+static const unsigned int glk_northwest_pwm3_pins[] = { 45 };
+static const unsigned int glk_northwest_i2c0_pins[] = { 50, 51 };
+static const unsigned int glk_northwest_i2c1_pins[] = { 52, 53 };
+static const unsigned int glk_northwest_i2c2_pins[] = { 54, 55 };
+static const unsigned int glk_northwest_i2c3_pins[] = { 56, 57 };
+static const unsigned int glk_northwest_i2c4_pins[] = { 58, 59 };
+static const unsigned int glk_northwest_uart0_pins[] = { 60, 61, 62, 63 };
+static const unsigned int glk_northwest_uart2_pins[] = { 64, 65, 66, 67 };
+
+static const struct intel_pingroup glk_northwest_groups[] = {
+ PIN_GROUP("uart1_grp", glk_northwest_uart1_pins, 2),
+ PIN_GROUP("pwm0_grp", glk_northwest_pwm0_pins, 2),
+ PIN_GROUP("pwm1_grp", glk_northwest_pwm1_pins, 2),
+ PIN_GROUP("pwm2_grp", glk_northwest_pwm2_pins, 2),
+ PIN_GROUP("pwm3_grp", glk_northwest_pwm3_pins, 2),
+ PIN_GROUP("i2c0_grp", glk_northwest_i2c0_pins, 1),
+ PIN_GROUP("i2c1_grp", glk_northwest_i2c1_pins, 1),
+ PIN_GROUP("i2c2_grp", glk_northwest_i2c2_pins, 1),
+ PIN_GROUP("i2c3_grp", glk_northwest_i2c3_pins, 1),
+ PIN_GROUP("i2c4_grp", glk_northwest_i2c4_pins, 1),
+ PIN_GROUP("uart0_grp", glk_northwest_uart0_pins, 1),
+ PIN_GROUP("uart2_grp", glk_northwest_uart2_pins, 1),
+};
+
+static const char * const glk_northwest_uart1_groups[] = { "uart1_grp" };
+static const char * const glk_northwest_pwm0_groups[] = { "pwm0_grp" };
+static const char * const glk_northwest_pwm1_groups[] = { "pwm1_grp" };
+static const char * const glk_northwest_pwm2_groups[] = { "pwm2_grp" };
+static const char * const glk_northwest_pwm3_groups[] = { "pwm3_grp" };
+static const char * const glk_northwest_i2c0_groups[] = { "i2c0_grp" };
+static const char * const glk_northwest_i2c1_groups[] = { "i2c1_grp" };
+static const char * const glk_northwest_i2c2_groups[] = { "i2c2_grp" };
+static const char * const glk_northwest_i2c3_groups[] = { "i2c3_grp" };
+static const char * const glk_northwest_i2c4_groups[] = { "i2c4_grp" };
+static const char * const glk_northwest_uart0_groups[] = { "uart0_grp" };
+static const char * const glk_northwest_uart2_groups[] = { "uart2_grp" };
+
+static const struct intel_function glk_northwest_functions[] = {
+ FUNCTION("uart1", glk_northwest_uart1_groups),
+ FUNCTION("pmw0", glk_northwest_pwm0_groups),
+ FUNCTION("pmw1", glk_northwest_pwm1_groups),
+ FUNCTION("pmw2", glk_northwest_pwm2_groups),
+ FUNCTION("pmw3", glk_northwest_pwm3_groups),
+ FUNCTION("i2c0", glk_northwest_i2c0_groups),
+ FUNCTION("i2c1", glk_northwest_i2c1_groups),
+ FUNCTION("i2c2", glk_northwest_i2c2_groups),
+ FUNCTION("i2c3", glk_northwest_i2c3_groups),
+ FUNCTION("i2c4", glk_northwest_i2c4_groups),
+ FUNCTION("uart0", glk_northwest_uart0_groups),
+ FUNCTION("uart2", glk_northwest_uart2_groups),
+};
+
+static const struct intel_community glk_northwest_communities[] = {
+ GLK_COMMUNITY(0, 79),
+};
+
+static const struct intel_pinctrl_soc_data glk_northwest_soc_data = {
+ .uid = "1",
+ .pins = glk_northwest_pins,
+ .npins = ARRAY_SIZE(glk_northwest_pins),
+ .groups = glk_northwest_groups,
+ .ngroups = ARRAY_SIZE(glk_northwest_groups),
+ .functions = glk_northwest_functions,
+ .nfunctions = ARRAY_SIZE(glk_northwest_functions),
+ .communities = glk_northwest_communities,
+ .ncommunities = ARRAY_SIZE(glk_northwest_communities),
+};
+
+static const struct pinctrl_pin_desc glk_north_pins[] = {
+ PINCTRL_PIN(0, "SVID0_ALERT_B"),
+ PINCTRL_PIN(1, "SVID0_DATA"),
+ PINCTRL_PIN(2, "SVID0_CLK"),
+ PINCTRL_PIN(3, "LPSS_SPI_0_CLK"),
+ PINCTRL_PIN(4, "LPSS_SPI_0_FS0"),
+ PINCTRL_PIN(5, "LPSS_SPI_0_FS1"),
+ PINCTRL_PIN(6, "LPSS_SPI_0_RXD"),
+ PINCTRL_PIN(7, "LPSS_SPI_0_TXD"),
+ PINCTRL_PIN(8, "LPSS_SPI_1_CLK"),
+ PINCTRL_PIN(9, "LPSS_SPI_1_FS0"),
+ PINCTRL_PIN(10, "LPSS_SPI_1_FS1"),
+ PINCTRL_PIN(11, "LPSS_SPI_1_FS2"),
+ PINCTRL_PIN(12, "LPSS_SPI_1_RXD"),
+ PINCTRL_PIN(13, "LPSS_SPI_1_TXD"),
+ PINCTRL_PIN(14, "FST_SPI_CS0_B"),
+ PINCTRL_PIN(15, "FST_SPI_CS1_B"),
+ PINCTRL_PIN(16, "FST_SPI_MOSI_IO0"),
+ PINCTRL_PIN(17, "FST_SPI_MISO_IO1"),
+ PINCTRL_PIN(18, "FST_SPI_IO2"),
+ PINCTRL_PIN(19, "FST_SPI_IO3"),
+ PINCTRL_PIN(20, "FST_SPI_CLK"),
+ PINCTRL_PIN(21, "FST_SPI_CLK_FB"),
+ PINCTRL_PIN(22, "PMU_PLTRST_B"),
+ PINCTRL_PIN(23, "PMU_PWRBTN_B"),
+ PINCTRL_PIN(24, "PMU_SLP_S0_B"),
+ PINCTRL_PIN(25, "PMU_SLP_S3_B"),
+ PINCTRL_PIN(26, "PMU_SLP_S4_B"),
+ PINCTRL_PIN(27, "SUSPWRDNACK"),
+ PINCTRL_PIN(28, "EMMC_PWR_EN_B"),
+ PINCTRL_PIN(29, "PMU_AC_PRESENT"),
+ PINCTRL_PIN(30, "PMU_BATLOW_B"),
+ PINCTRL_PIN(31, "PMU_RESETBUTTON_B"),
+ PINCTRL_PIN(32, "PMU_SUSCLK"),
+ PINCTRL_PIN(33, "SUS_STAT_B"),
+ PINCTRL_PIN(34, "LPSS_I2C5_SDA"),
+ PINCTRL_PIN(35, "LPSS_I2C5_SCL"),
+ PINCTRL_PIN(36, "LPSS_I2C6_SDA"),
+ PINCTRL_PIN(37, "LPSS_I2C6_SCL"),
+ PINCTRL_PIN(38, "LPSS_I2C7_SDA"),
+ PINCTRL_PIN(39, "LPSS_I2C7_SCL"),
+ PINCTRL_PIN(40, "PCIE_WAKE0_B"),
+ PINCTRL_PIN(41, "PCIE_WAKE1_B"),
+ PINCTRL_PIN(42, "PCIE_WAKE2_B"),
+ PINCTRL_PIN(43, "PCIE_WAKE3_B"),
+ PINCTRL_PIN(44, "PCIE_CLKREQ0_B"),
+ PINCTRL_PIN(45, "PCIE_CLKREQ1_B"),
+ PINCTRL_PIN(46, "PCIE_CLKREQ2_B"),
+ PINCTRL_PIN(47, "PCIE_CLKREQ3_B"),
+ PINCTRL_PIN(48, "HV_DDI0_DDC_SDA"),
+ PINCTRL_PIN(49, "HV_DDI0_DDC_SCL"),
+ PINCTRL_PIN(50, "HV_DDI1_DDC_SDA"),
+ PINCTRL_PIN(51, "HV_DDI1_DDC_SCL"),
+ PINCTRL_PIN(52, "PANEL0_VDDEN"),
+ PINCTRL_PIN(53, "PANEL0_BKLTEN"),
+ PINCTRL_PIN(54, "PANEL0_BKLTCTL"),
+ PINCTRL_PIN(55, "HV_DDI0_HPD"),
+ PINCTRL_PIN(56, "HV_DDI1_HPD"),
+ PINCTRL_PIN(57, "HV_EDP_HPD"),
+ PINCTRL_PIN(58, "GPIO_134"),
+ PINCTRL_PIN(59, "GPIO_135"),
+ PINCTRL_PIN(60, "GPIO_136"),
+ PINCTRL_PIN(61, "GPIO_137"),
+ PINCTRL_PIN(62, "GPIO_138"),
+ PINCTRL_PIN(63, "GPIO_139"),
+ PINCTRL_PIN(64, "GPIO_140"),
+ PINCTRL_PIN(65, "GPIO_141"),
+ PINCTRL_PIN(66, "GPIO_142"),
+ PINCTRL_PIN(67, "GPIO_143"),
+ PINCTRL_PIN(68, "GPIO_144"),
+ PINCTRL_PIN(69, "GPIO_145"),
+ PINCTRL_PIN(70, "GPIO_146"),
+ PINCTRL_PIN(71, "LPC_ILB_SERIRQ"),
+ PINCTRL_PIN(72, "LPC_CLKOUT0"),
+ PINCTRL_PIN(73, "LPC_CLKOUT1"),
+ PINCTRL_PIN(74, "LPC_AD0"),
+ PINCTRL_PIN(75, "LPC_AD1"),
+ PINCTRL_PIN(76, "LPC_AD2"),
+ PINCTRL_PIN(77, "LPC_AD3"),
+ PINCTRL_PIN(78, "LPC_CLKRUNB"),
+ PINCTRL_PIN(79, "LPC_FRAMEB"),
+};
+
+static const unsigned int glk_north_spi0_pins[] = { 3, 4, 5, 6, 7 };
+static const unsigned int glk_north_spi1_pins[] = { 8, 9, 10, 11, 12, 13 };
+static const unsigned int glk_north_i2c5_pins[] = { 34, 35 };
+static const unsigned int glk_north_i2c6_pins[] = { 36, 37 };
+static const unsigned int glk_north_i2c7_pins[] = { 38, 39 };
+static const unsigned int glk_north_uart0_pins[] = { 62, 63, 64, 65 };
+static const unsigned int glk_north_spi0b_pins[] = { 66, 67, 68, 69, 70 };
+
+static const struct intel_pingroup glk_north_groups[] = {
+ PIN_GROUP("spi0_grp", glk_north_spi0_pins, 1),
+ PIN_GROUP("spi1_grp", glk_north_spi1_pins, 1),
+ PIN_GROUP("i2c5_grp", glk_north_i2c5_pins, 1),
+ PIN_GROUP("i2c6_grp", glk_north_i2c6_pins, 1),
+ PIN_GROUP("i2c7_grp", glk_north_i2c7_pins, 1),
+ PIN_GROUP("uart0_grp", glk_north_uart0_pins, 2),
+ PIN_GROUP("spi0b_grp", glk_north_spi0b_pins, 2),
+};
+
+static const char * const glk_north_spi0_groups[] = { "spi0_grp", "spi0b_grp" };
+static const char * const glk_north_spi1_groups[] = { "spi1_grp" };
+static const char * const glk_north_i2c5_groups[] = { "i2c5_grp" };
+static const char * const glk_north_i2c6_groups[] = { "i2c6_grp" };
+static const char * const glk_north_i2c7_groups[] = { "i2c7_grp" };
+static const char * const glk_north_uart0_groups[] = { "uart0_grp" };
+
+static const struct intel_function glk_north_functions[] = {
+ FUNCTION("spi0", glk_north_spi0_groups),
+ FUNCTION("spi1", glk_north_spi1_groups),
+ FUNCTION("i2c5", glk_north_i2c5_groups),
+ FUNCTION("i2c6", glk_north_i2c6_groups),
+ FUNCTION("i2c7", glk_north_i2c7_groups),
+ FUNCTION("uart0", glk_north_uart0_groups),
+};
+
+static const struct intel_community glk_north_communities[] = {
+ GLK_COMMUNITY(0, 79),
+};
+
+static const struct intel_pinctrl_soc_data glk_north_soc_data = {
+ .uid = "2",
+ .pins = glk_north_pins,
+ .npins = ARRAY_SIZE(glk_north_pins),
+ .groups = glk_north_groups,
+ .ngroups = ARRAY_SIZE(glk_north_groups),
+ .functions = glk_north_functions,
+ .nfunctions = ARRAY_SIZE(glk_north_functions),
+ .communities = glk_north_communities,
+ .ncommunities = ARRAY_SIZE(glk_north_communities),
+};
+
+static const struct pinctrl_pin_desc glk_audio_pins[] = {
+ PINCTRL_PIN(0, "AVS_I2S0_MCLK"),
+ PINCTRL_PIN(1, "AVS_I2S0_BCLK"),
+ PINCTRL_PIN(2, "AVS_I2S0_WS_SYNC"),
+ PINCTRL_PIN(3, "AVS_I2S0_SDI"),
+ PINCTRL_PIN(4, "AVS_I2S0_SDO"),
+ PINCTRL_PIN(5, "AVS_I2S1_MCLK"),
+ PINCTRL_PIN(6, "AVS_I2S1_BCLK"),
+ PINCTRL_PIN(7, "AVS_I2S1_WS_SYNC"),
+ PINCTRL_PIN(8, "AVS_I2S1_SDI"),
+ PINCTRL_PIN(9, "AVS_I2S1_SDO"),
+ PINCTRL_PIN(10, "AVS_HDA_BCLK"),
+ PINCTRL_PIN(11, "AVS_HDA_WS_SYNC"),
+ PINCTRL_PIN(12, "AVS_HDA_SDI"),
+ PINCTRL_PIN(13, "AVS_HDA_SDO"),
+ PINCTRL_PIN(14, "AVS_HDA_RSTB"),
+ PINCTRL_PIN(15, "AVS_M_CLK_A1"),
+ PINCTRL_PIN(16, "AVS_M_CLK_B1"),
+ PINCTRL_PIN(17, "AVS_M_DATA_1"),
+ PINCTRL_PIN(18, "AVS_M_CLK_AB2"),
+ PINCTRL_PIN(19, "AVS_M_DATA_2"),
+};
+
+static const struct intel_community glk_audio_communities[] = {
+ GLK_COMMUNITY(0, 19),
+};
+
+static const struct intel_pinctrl_soc_data glk_audio_soc_data = {
+ .uid = "3",
+ .pins = glk_audio_pins,
+ .npins = ARRAY_SIZE(glk_audio_pins),
+ .communities = glk_audio_communities,
+ .ncommunities = ARRAY_SIZE(glk_audio_communities),
+};
+
+static const struct pinctrl_pin_desc glk_scc_pins[] = {
+ PINCTRL_PIN(0, "SMB_ALERTB"),
+ PINCTRL_PIN(1, "SMB_CLK"),
+ PINCTRL_PIN(2, "SMB_DATA"),
+ PINCTRL_PIN(3, "SDCARD_LVL_WP"),
+ PINCTRL_PIN(4, "SDCARD_CLK"),
+ PINCTRL_PIN(5, "SDCARD_CLK_FB"),
+ PINCTRL_PIN(6, "SDCARD_D0"),
+ PINCTRL_PIN(7, "SDCARD_D1"),
+ PINCTRL_PIN(8, "SDCARD_D2"),
+ PINCTRL_PIN(9, "SDCARD_D3"),
+ PINCTRL_PIN(10, "SDCARD_CMD"),
+ PINCTRL_PIN(11, "SDCARD_CD_B"),
+ PINCTRL_PIN(12, "SDCARD_PWR_DOWN_B"),
+ PINCTRL_PIN(13, "GPIO_210"),
+ PINCTRL_PIN(14, "OSC_CLK_OUT_0"),
+ PINCTRL_PIN(15, "OSC_CLK_OUT_1"),
+ PINCTRL_PIN(16, "CNV_BRI_DT"),
+ PINCTRL_PIN(17, "CNV_BRI_RSP"),
+ PINCTRL_PIN(18, "CNV_RGI_DT"),
+ PINCTRL_PIN(19, "CNV_RGI_RSP"),
+ PINCTRL_PIN(20, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(21, "XTAL_CLKREQ"),
+ PINCTRL_PIN(22, "SDIO_CLK_FB"),
+ PINCTRL_PIN(23, "EMMC0_CLK"),
+ PINCTRL_PIN(24, "EMMC0_CLK_FB"),
+ PINCTRL_PIN(25, "EMMC0_D0"),
+ PINCTRL_PIN(26, "EMMC0_D1"),
+ PINCTRL_PIN(27, "EMMC0_D2"),
+ PINCTRL_PIN(28, "EMMC0_D3"),
+ PINCTRL_PIN(29, "EMMC0_D4"),
+ PINCTRL_PIN(30, "EMMC0_D5"),
+ PINCTRL_PIN(31, "EMMC0_D6"),
+ PINCTRL_PIN(32, "EMMC0_D7"),
+ PINCTRL_PIN(33, "EMMC0_CMD"),
+ PINCTRL_PIN(34, "EMMC0_STROBE"),
+};
+
+static const unsigned int glk_scc_i2c7_pins[] = { 1, 2 };
+static const unsigned int glk_scc_sdcard_pins[] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+static const unsigned int glk_scc_sdio_pins[] = { 16, 17, 18, 19, 20, 21, 22 };
+static const unsigned int glk_scc_uart1_pins[] = { 16, 17, 18, 19 };
+static const unsigned int glk_scc_emmc_pins[] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+};
+
+static const struct intel_pingroup glk_scc_groups[] = {
+ PIN_GROUP("i2c7_grp", glk_scc_i2c7_pins, 2),
+ PIN_GROUP("sdcard_grp", glk_scc_sdcard_pins, 1),
+ PIN_GROUP("sdio_grp", glk_scc_sdio_pins, 2),
+ PIN_GROUP("uart1_grp", glk_scc_uart1_pins, 3),
+ PIN_GROUP("emmc_grp", glk_scc_emmc_pins, 1),
+};
+
+static const char * const glk_scc_i2c7_groups[] = { "i2c7_grp" };
+static const char * const glk_scc_sdcard_groups[] = { "sdcard_grp" };
+static const char * const glk_scc_sdio_groups[] = { "sdio_grp" };
+static const char * const glk_scc_uart1_groups[] = { "uart1_grp" };
+static const char * const glk_scc_emmc_groups[] = { "emmc_grp" };
+
+static const struct intel_function glk_scc_functions[] = {
+ FUNCTION("i2c7", glk_scc_i2c7_groups),
+ FUNCTION("sdcard", glk_scc_sdcard_groups),
+ FUNCTION("sdio", glk_scc_sdio_groups),
+ FUNCTION("uart1", glk_scc_uart1_groups),
+ FUNCTION("emmc", glk_scc_emmc_groups),
+};
+
+static const struct intel_community glk_scc_communities[] = {
+ GLK_COMMUNITY(0, 34),
+};
+
+static const struct intel_pinctrl_soc_data glk_scc_soc_data = {
+ .uid = "4",
+ .pins = glk_scc_pins,
+ .npins = ARRAY_SIZE(glk_scc_pins),
+ .groups = glk_scc_groups,
+ .ngroups = ARRAY_SIZE(glk_scc_groups),
+ .functions = glk_scc_functions,
+ .nfunctions = ARRAY_SIZE(glk_scc_functions),
+ .communities = glk_scc_communities,
+ .ncommunities = ARRAY_SIZE(glk_scc_communities),
+};
+
+static const struct intel_pinctrl_soc_data *glk_pinctrl_soc_data[] = {
+ &glk_northwest_soc_data,
+ &glk_north_soc_data,
+ &glk_audio_soc_data,
+ &glk_scc_soc_data,
+ NULL,
+};
+
+static const struct acpi_device_id glk_pinctrl_acpi_match[] = {
+ { "INT3453" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, glk_pinctrl_acpi_match);
+
+static int glk_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *soc_data = NULL;
+ struct acpi_device *adev;
+ int i;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return -ENODEV;
+
+ for (i = 0; glk_pinctrl_soc_data[i]; i++) {
+ if (!strcmp(adev->pnp.unique_id,
+ glk_pinctrl_soc_data[i]->uid)) {
+ soc_data = glk_pinctrl_soc_data[i];
+ break;
+ }
+ }
+
+ if (!soc_data)
+ return -ENODEV;
+
+ return intel_pinctrl_probe(pdev, soc_data);
+}
+
+static const struct dev_pm_ops glk_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static struct platform_driver glk_pinctrl_driver = {
+ .probe = glk_pinctrl_probe,
+ .driver = {
+ .name = "geminilake-pinctrl",
+ .acpi_match_table = glk_pinctrl_acpi_match,
+ .pm = &glk_pinctrl_pm_ops,
+ },
+};
+
+static int __init glk_pinctrl_init(void)
+{
+ return platform_driver_register(&glk_pinctrl_driver);
+}
+subsys_initcall(glk_pinctrl_init);
+
+static void __exit glk_pinctrl_exit(void)
+{
+ platform_driver_unregister(&glk_pinctrl_driver);
+}
+module_exit(glk_pinctrl_exit);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Gemini Lake SoC pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e139672f1af..592b465e981e 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gpio/driver.h>
+#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -23,6 +24,10 @@
#include "pinctrl-intel.h"
/* Offset from regs */
+#define REVID 0x000
+#define REVID_SHIFT 16
+#define REVID_MASK GENMASK(31, 16)
+
#define PADBAR 0x00c
#define GPI_IS 0x100
#define GPI_GPE_STS 0x140
@@ -41,6 +46,7 @@
#define PADCFG0_RXEVCFG_EDGE 1
#define PADCFG0_RXEVCFG_DISABLED 2
#define PADCFG0_RXEVCFG_EDGE_BOTH 3
+#define PADCFG0_PREGFRXSEL BIT(24)
#define PADCFG0_RXINV BIT(23)
#define PADCFG0_GPIROUTIOXAPIC BIT(20)
#define PADCFG0_GPIROUTSCI BIT(19)
@@ -62,9 +68,17 @@
#define PADCFG1_TERM_5K 2
#define PADCFG1_TERM_1K 1
+#define PADCFG2 0x008
+#define PADCFG2_DEBEN BIT(0)
+#define PADCFG2_DEBOUNCE_SHIFT 1
+#define PADCFG2_DEBOUNCE_MASK GENMASK(4, 1)
+
+#define DEBOUNCE_PERIOD 31250 /* ns */
+
struct intel_pad_context {
u32 padcfg0;
u32 padcfg1;
+ u32 padcfg2;
};
struct intel_community_context {
@@ -126,13 +140,19 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
{
const struct intel_community *community;
unsigned padno;
+ size_t nregs;
community = intel_get_community(pctrl, pin);
if (!community)
return NULL;
padno = pin_to_padno(community, pin);
- return community->pad_regs + reg + padno * 8;
+ nregs = (community->features & PINCTRL_FEATURE_DEBOUNCE) ? 4 : 2;
+
+ if (reg == PADCFG2 && !(community->features & PINCTRL_FEATURE_DEBOUNCE))
+ return NULL;
+
+ return community->pad_regs + reg + padno * nregs * 4;
}
static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
@@ -244,6 +264,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned pin)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *padcfg;
u32 cfg0, cfg1, mode;
bool locked, acpi;
@@ -263,6 +284,11 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1);
+ /* Dump the additional PADCFG registers if available */
+ padcfg = intel_get_padcfg(pctrl, pin, PADCFG2);
+ if (padcfg)
+ seq_printf(s, " 0x%08x", readl(padcfg));
+
locked = intel_pad_locked(pctrl, pin);
acpi = intel_pad_acpi_mode(pctrl, pin);
@@ -353,6 +379,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
return 0;
}
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+ u32 value;
+
+ value = readl(padcfg0);
+ if (input) {
+ value &= ~PADCFG0_GPIORXDIS;
+ value |= PADCFG0_GPIOTXDIS;
+ } else {
+ value &= ~PADCFG0_GPIOTXDIS;
+ value |= PADCFG0_GPIORXDIS;
+ }
+ writel(value, padcfg0);
+}
+
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
@@ -375,11 +416,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
- /* Disable TX buffer and enable RX (this will be input) */
- value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
+ /* Disable TX buffer and enable RX (this will be input) */
+ __intel_gpio_set_direction(padcfg0, true);
+
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -392,18 +433,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
- u32 value;
raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
- value = readl(padcfg0);
- if (input)
- value |= PADCFG0_GPIOTXDIS;
- else
- value &= ~PADCFG0_GPIOTXDIS;
- writel(value, padcfg0);
+ __intel_gpio_set_direction(padcfg0, input);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -424,12 +458,14 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct intel_community *community;
u32 value, term;
- u16 arg = 0;
+ u32 arg = 0;
if (!intel_pad_owned_by_host(pctrl, pin))
return -ENOTSUPP;
+ community = intel_get_community(pctrl, pin);
value = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT;
@@ -465,6 +501,11 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin,
return -EINVAL;
switch (term) {
+ case PADCFG1_TERM_1K:
+ if (!(community->features & PINCTRL_FEATURE_1K_PD))
+ return -EINVAL;
+ arg = 1000;
+ break;
case PADCFG1_TERM_5K:
arg = 5000;
break;
@@ -475,6 +516,24 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin,
break;
+ case PIN_CONFIG_INPUT_DEBOUNCE: {
+ void __iomem *padcfg2;
+ u32 v;
+
+ padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
+ if (!padcfg2)
+ return -ENOTSUPP;
+
+ v = readl(padcfg2);
+ if (!(v & PADCFG2_DEBEN))
+ return -EINVAL;
+
+ v = (v & PADCFG2_DEBOUNCE_MASK) >> PADCFG2_DEBOUNCE_SHIFT;
+ arg = BIT(v) * DEBOUNCE_PERIOD / 1000;
+
+ break;
+ }
+
default:
return -ENOTSUPP;
}
@@ -488,6 +547,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
{
unsigned param = pinconf_to_config_param(config);
unsigned arg = pinconf_to_config_argument(config);
+ const struct intel_community *community;
void __iomem *padcfg1;
unsigned long flags;
int ret = 0;
@@ -495,6 +555,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
raw_spin_lock_irqsave(&pctrl->lock, flags);
+ community = intel_get_community(pctrl, pin);
padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
value = readl(padcfg1);
@@ -537,6 +598,13 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
case 5000:
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
break;
+ case 1000:
+ if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
+ ret = -EINVAL;
+ break;
+ }
+ value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ break;
default:
ret = -EINVAL;
}
@@ -552,6 +620,53 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
return ret;
}
+static int intel_config_set_debounce(struct intel_pinctrl *pctrl, unsigned pin,
+ unsigned debounce)
+{
+ void __iomem *padcfg0, *padcfg2;
+ unsigned long flags;
+ u32 value0, value2;
+ int ret = 0;
+
+ padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
+ if (!padcfg2)
+ return -ENOTSUPP;
+
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ value0 = readl(padcfg0);
+ value2 = readl(padcfg2);
+
+ /* Disable glitch filter and debouncer */
+ value0 &= ~PADCFG0_PREGFRXSEL;
+ value2 &= ~(PADCFG2_DEBEN | PADCFG2_DEBOUNCE_MASK);
+
+ if (debounce) {
+ unsigned long v;
+
+ v = order_base_2(debounce * 1000 / DEBOUNCE_PERIOD);
+ if (v < 3 || v > 15) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ } else {
+ /* Enable glitch filter and debouncer */
+ value0 |= PADCFG0_PREGFRXSEL;
+ value2 |= v << PADCFG2_DEBOUNCE_SHIFT;
+ value2 |= PADCFG2_DEBEN;
+ }
+ }
+
+ writel(value0, padcfg0);
+ writel(value2, padcfg2);
+
+exit_unlock:
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return ret;
+}
+
static int intel_config_set(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long *configs, unsigned nconfigs)
{
@@ -571,6 +686,13 @@ static int intel_config_set(struct pinctrl_dev *pctldev, unsigned pin,
return ret;
break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ ret = intel_config_set_debounce(pctrl, pin,
+ pinconf_to_config_argument(configs[i]));
+ if (ret)
+ return ret;
+ break;
+
default:
return -ENOTSUPP;
}
@@ -645,6 +767,7 @@ static const struct gpio_chip intel_gpio_chip = {
.direction_output = intel_gpio_direction_output,
.get = intel_gpio_get,
.set = intel_gpio_set,
+ .set_config = gpiochip_generic_config,
};
static void intel_gpio_irq_ack(struct irq_data *d)
@@ -884,7 +1007,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip.base = -1;
pctrl->irq = irq;
- ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
@@ -894,7 +1017,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
0, 0, pctrl->soc->npins);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
- goto fail;
+ return ret;
}
/*
@@ -907,24 +1030,19 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
dev_name(pctrl->dev), pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to request interrupt\n");
- goto fail;
+ return ret;
}
ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add irqchip\n");
- goto fail;
+ return ret;
}
gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
NULL);
return 0;
-
-fail:
- gpiochip_remove(&pctrl->chip);
-
- return ret;
}
static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
@@ -1005,6 +1123,20 @@ int intel_pinctrl_probe(struct platform_device *pdev,
if (IS_ERR(regs))
return PTR_ERR(regs);
+ /*
+ * Determine community features based on the revision if
+ * not specified already.
+ */
+ if (!community->features) {
+ u32 rev;
+
+ rev = (readl(regs + REVID) & REVID_MASK) >> REVID_SHIFT;
+ if (rev >= 0x94) {
+ community->features |= PINCTRL_FEATURE_DEBOUNCE;
+ community->features |= PINCTRL_FEATURE_1K_PD;
+ }
+ }
+
/* Read offset of the pad configuration registers */
padbar = readl(regs + PADBAR);
@@ -1046,16 +1178,6 @@ int intel_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(intel_pinctrl_probe);
-int intel_pinctrl_remove(struct platform_device *pdev)
-{
- struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pctrl->chip);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
-
#ifdef CONFIG_PM_SLEEP
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
{
@@ -1088,6 +1210,7 @@ int intel_pinctrl_suspend(struct device *dev)
pads = pctrl->context.pads;
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
+ void __iomem *padcfg;
u32 val;
if (!intel_pinctrl_should_save(pctrl, desc->number))
@@ -1097,6 +1220,10 @@ int intel_pinctrl_suspend(struct device *dev)
pads[i].padcfg0 = val & ~PADCFG0_GPIORXSTATE;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG1));
pads[i].padcfg1 = val;
+
+ padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG2);
+ if (padcfg)
+ pads[i].padcfg2 = readl(padcfg);
}
communities = pctrl->context.communities;
@@ -1169,6 +1296,16 @@ int intel_pinctrl_resume(struct device *dev)
dev_dbg(dev, "restored pin %u padcfg1 %#08x\n",
desc->number, readl(padcfg));
}
+
+ padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG2);
+ if (padcfg) {
+ val = readl(padcfg);
+ if (val != pads[i].padcfg2) {
+ writel(pads[i].padcfg2, padcfg);
+ dev_dbg(dev, "restored pin %u padcfg2 %#08x\n",
+ desc->number, readl(padcfg));
+ }
+ }
}
communities = pctrl->context.communities;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index b60215793017..fe9521f345b5 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -58,6 +58,7 @@ struct intel_function {
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
* HOSTSW_OWN, GPI_IS, GPI_IE, etc.
* @npins: Number of pins in this community
+ * @features: Additional features supported by the hardware
* @regs: Community specific common registers (reserved for core driver)
* @pad_regs: Community specific pad registers (reserved for core driver)
* @ngpps: Number of groups (hw groups) in this community (reserved for
@@ -72,11 +73,16 @@ struct intel_community {
unsigned pin_base;
unsigned gpp_size;
size_t npins;
+ unsigned features;
void __iomem *regs;
void __iomem *pad_regs;
size_t ngpps;
};
+/* Additional features supported by the hardware */
+#define PINCTRL_FEATURE_DEBOUNCE BIT(0)
+#define PINCTRL_FEATURE_1K_PD BIT(1)
+
#define PIN_GROUP(n, p, m) \
{ \
.name = (n), \
@@ -121,8 +127,6 @@ struct intel_pinctrl_soc_data {
int intel_pinctrl_probe(struct platform_device *pdev,
const struct intel_pinctrl_soc_data *soc_data);
-int intel_pinctrl_remove(struct platform_device *pdev);
-
#ifdef CONFIG_PM_SLEEP
int intel_pinctrl_suspend(struct device *dev);
int intel_pinctrl_resume(struct device *dev);
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index b21896126f76..4d4ef42a39b5 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned int i;
int ret;
+ if (!mrfld_buf_available(mp, pin))
+ return -ENOTSUPP;
+
for (i = 0; i < nconfigs; i++) {
switch (pinconf_to_config_param(configs[i])) {
case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index c725a5313b4e..9877526c0807 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -574,7 +574,6 @@ static const struct dev_pm_ops spt_pinctrl_pm_ops = {
static struct platform_driver spt_pinctrl_driver = {
.probe = spt_pinctrl_probe,
- .remove = intel_pinctrl_remove,
.driver = {
.name = "sunrisepoint-pinctrl",
.acpi_match_table = spt_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 4f0bc8a103f4..80fe3b48796c 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -10,25 +10,29 @@ config PINCTRL_MTK
# For ARMv7 SoCs
config PINCTRL_MT2701
- bool "Mediatek MT2701 pin control" if COMPILE_TEST && !MACH_MT2701
+ bool "Mediatek MT2701 pin control"
+ depends on MACH_MT2701 || COMPILE_TEST
depends on OF
default MACH_MT2701
select PINCTRL_MTK
config PINCTRL_MT7623
- bool "Mediatek MT7623 pin control" if COMPILE_TEST && !MACH_MT7623
+ bool "Mediatek MT7623 pin control"
+ depends on MACH_MT7623 || COMPILE_TEST
depends on OF
default MACH_MT7623
select PINCTRL_MTK_COMMON
config PINCTRL_MT8135
- bool "Mediatek MT8135 pin control" if COMPILE_TEST && !MACH_MT8135
+ bool "Mediatek MT8135 pin control"
+ depends on MACH_MT8135 || COMPILE_TEST
depends on OF
default MACH_MT8135
select PINCTRL_MTK
config PINCTRL_MT8127
- bool "Mediatek MT8127 pin control" if COMPILE_TEST && !MACH_MT8127
+ bool "Mediatek MT8127 pin control"
+ depends on MACH_MT8127 || COMPILE_TEST
depends on OF
default MACH_MT8127
select PINCTRL_MTK
@@ -43,7 +47,8 @@ config PINCTRL_MT8173
# For PMIC
config PINCTRL_MT6397
- bool "Mediatek MT6397 pin control" if COMPILE_TEST && !MFD_MT6397
+ bool "Mediatek MT6397 pin control"
+ depends on MFD_MT6397 || COMPILE_TEST
depends on OF
default MFD_MT6397
select PINCTRL_MTK
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
index 67895f8234e3..fa28dd6b871b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index f9aef2ac03a1..3cf384f8b122 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1054,6 +1054,18 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
return 0;
}
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return mtk_gpio_set_debounce(chip, offset, debounce);
+}
+
static const struct gpio_chip mtk_gpio_chip = {
.owner = THIS_MODULE,
.request = gpiochip_generic_request,
@@ -1064,7 +1076,7 @@ static const struct gpio_chip mtk_gpio_chip = {
.get = mtk_gpio_get,
.set = mtk_gpio_set,
.to_irq = mtk_gpio_to_irq,
- .set_debounce = mtk_gpio_set_debounce,
+ .set_config = mtk_gpio_set_config,
.of_gpio_n_cells = 2,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
index 3472a76ad422..e06cfc40da0f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index c3928aa3fefa..7671424d46cb 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -232,6 +232,10 @@ static const unsigned int pwm_e_pins[] = { PIN(GPIOX_19, EE_OFF) };
static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
static const unsigned int pwm_f_y_pins[] = { PIN(GPIOY_15, EE_OFF) };
+static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
+static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
+static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
+
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
MESON_PIN(GPIOAO_0, 0),
MESON_PIN(GPIOAO_1, 0),
@@ -253,9 +257,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
- PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
@@ -440,6 +443,11 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(eth_txd2, 6, 3),
GROUP(eth_txd3, 6, 2),
+ /* Bank H */
+ GROUP(hdmi_hpd, 1, 26),
+ GROUP(hdmi_sda, 1, 25),
+ GROUP(hdmi_scl, 1, 24),
+
/* Bank DV */
GROUP(uart_tx_b, 2, 29),
GROUP(uart_rx_b, 2, 28),
@@ -498,7 +506,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GPIO_GROUP(GPIOAO_13, 0),
/* bank AO */
- GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_tx_ao_b, 0, 24),
GROUP(uart_rx_ao_b, 0, 25),
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
@@ -636,6 +644,14 @@ static const char * const pwm_f_y_groups[] = {
"pwm_f_y",
};
+static const char * const hdmi_hpd_groups[] = {
+ "hdmi_hpd",
+};
+
+static const char * const hdmi_i2c_groups[] = {
+ "hdmi_sda", "hdmi_scl",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -699,6 +715,8 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(pwm_e),
FUNCTION(pwm_f_x),
FUNCTION(pwm_f_y),
+ FUNCTION(hdmi_hpd),
+ FUNCTION(hdmi_i2c),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 25694f7094c7..4ab94a85e306 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -197,6 +197,10 @@ static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
static const unsigned int pwm_e_pins[] = { PIN(GPIOX_16, EE_OFF) };
+static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
+static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
+static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
+
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
MESON_PIN(GPIOAO_0, 0),
MESON_PIN(GPIOAO_1, 0),
@@ -214,14 +218,15 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
- PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
+static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_9, 0) };
+
static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0, EE_OFF),
GPIO_GROUP(GPIOZ_1, EE_OFF),
@@ -363,6 +368,11 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(eth_txd2, 4, 11),
GROUP(eth_txd3, 4, 10),
+ /* Bank H */
+ GROUP(hdmi_hpd, 6, 31),
+ GROUP(hdmi_sda, 6, 30),
+ GROUP(hdmi_scl, 6, 29),
+
/* Bank DV */
GROUP(uart_tx_b, 2, 16),
GROUP(uart_rx_b, 2, 15),
@@ -409,7 +419,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GPIO_GROUP(GPIOAO_9, 0),
/* bank AO */
- GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_tx_ao_b, 0, 24),
GROUP(uart_rx_ao_b, 0, 25),
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
@@ -418,6 +428,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GROUP(uart_cts_ao_b, 0, 8),
GROUP(uart_rts_ao_b, 0, 7),
GROUP(remote_input_ao, 0, 0),
+ GROUP(pwm_ao_b, 0, 3),
};
static const char * const gpio_periphs_groups[] = {
@@ -506,6 +517,14 @@ static const char * const pwm_e_groups[] = {
"pwm_e",
};
+static const char * const hdmi_hpd_groups[] = {
+ "hdmi_hpd",
+};
+
+static const char * const hdmi_i2c_groups[] = {
+ "hdmi_sda", "hdmi_scl",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -523,6 +542,10 @@ static const char * const remote_input_ao_groups[] = {
"remote_input_ao",
};
+static const char * const pwm_ao_b_groups[] = {
+ "pwm_ao_b",
+};
+
static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
@@ -537,6 +560,8 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(i2c_c),
FUNCTION(eth),
FUNCTION(pwm_e),
+ FUNCTION(hdmi_hpd),
+ FUNCTION(hdmi_i2c),
};
static struct meson_pmx_func meson_gxl_aobus_functions[] = {
@@ -544,6 +569,7 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
FUNCTION(remote_input_ao),
+ FUNCTION(pwm_ao_b),
};
static struct meson_bank meson_gxl_periphs_banks[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 620c231a2889..cf1686e04378 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -260,7 +260,6 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
enum pin_config_param param;
unsigned int reg, bit;
int i, ret;
- u16 arg;
ret = meson_get_bank(pc, pin, &bank);
if (ret)
@@ -268,7 +267,6 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 9cc1cc3f5c34..9feba9a5ccb7 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -23,18 +22,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_370_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_370_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
MPP_MODE(0,
MPP_FUNCTION(0x0, "gpio", NULL),
@@ -384,8 +371,8 @@ static const struct of_device_id armada_370_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 65, NULL, armada_370_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 65, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
@@ -397,12 +384,6 @@ static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
static int armada_370_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
soc->variant = 0; /* no variants for Armada 370 */
soc->controls = mv88f6710_mpp_controls;
@@ -414,7 +395,7 @@ static int armada_370_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_370_pinctrl_driver = {
@@ -424,9 +405,4 @@ static struct platform_driver armada_370_pinctrl_driver = {
},
.probe = armada_370_pinctrl_probe,
};
-
-module_platform_driver(armada_370_pinctrl_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Armada 370 pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada_370_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
index 070651431ca4..b7de8abccd48 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -23,18 +22,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_375_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_375_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
MPP_MODE(0,
MPP_FUNCTION(0x0, "gpio", NULL),
@@ -402,8 +389,8 @@ static const struct of_device_id armada_375_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 69, NULL, armada_375_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 69, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
@@ -415,12 +402,6 @@ static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
static int armada_375_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_375_pinctrl_info;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
soc->variant = 0; /* no variants for Armada 375 */
soc->controls = mv88f6720_mpp_controls;
@@ -432,7 +413,7 @@ static int armada_375_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_375_pinctrl_driver = {
@@ -442,9 +423,4 @@ static struct platform_driver armada_375_pinctrl_driver = {
},
.probe = armada_375_pinctrl_probe,
};
-
-module_platform_driver(armada_375_pinctrl_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Armada 375 pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada_375_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 4e84c8e4938c..de2e1538a26f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -22,18 +21,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_38x_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_38x_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
enum {
V_88F6810 = BIT(0),
V_88F6820 = BIT(1),
@@ -409,8 +396,8 @@ static const struct of_device_id armada_38x_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 59, NULL, armada_38x_mpp_ctrl),
+static const struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 59, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
@@ -423,16 +410,10 @@ static int armada_38x_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
- struct resource *res;
if (!match)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
-
soc->variant = (unsigned) match->data & 0xff;
soc->controls = armada_38x_mpp_controls;
soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
@@ -443,7 +424,7 @@ static int armada_38x_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_38x_pinctrl_driver = {
@@ -453,9 +434,4 @@ static struct platform_driver armada_38x_pinctrl_driver = {
},
.probe = armada_38x_pinctrl_probe,
};
-
-module_platform_driver(armada_38x_pinctrl_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Armada 38x pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada_38x_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
index e288f8ba0bf1..627f57c88372 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -22,18 +21,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_39x_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_39x_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
enum {
V_88F6920 = BIT(0),
V_88F6925 = BIT(1),
@@ -391,8 +378,8 @@ static const struct of_device_id armada_39x_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 59, NULL, armada_39x_mpp_ctrl),
+static const struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 59, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
@@ -405,16 +392,10 @@ static int armada_39x_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_39x_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_39x_pinctrl_of_match, &pdev->dev);
- struct resource *res;
if (!match)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
-
soc->variant = (unsigned) match->data & 0xff;
soc->controls = armada_39x_mpp_controls;
soc->ncontrols = ARRAY_SIZE(armada_39x_mpp_controls);
@@ -425,7 +406,7 @@ static int armada_39x_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_39x_pinctrl_driver = {
@@ -435,9 +416,4 @@ static struct platform_driver armada_39x_pinctrl_driver = {
},
.probe = armada_39x_pinctrl_probe,
};
-
-module_platform_driver(armada_39x_pinctrl_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Armada 39x pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada_39x_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index e4ea71a9d985..b854f1ee5de5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -20,7 +20,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -30,25 +29,18 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
static u32 *mpp_saved_regs;
-static int armada_xp_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_xp_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
enum armada_xp_variant {
V_MV78230 = BIT(0),
V_MV78260 = BIT(1),
V_MV78460 = BIT(2),
V_MV78230_PLUS = (V_MV78230 | V_MV78260 | V_MV78460),
V_MV78260_PLUS = (V_MV78260 | V_MV78460),
+ V_98DX3236 = BIT(3),
+ V_98DX3336 = BIT(4),
+ V_98DX4251 = BIT(5),
+ V_98DX3236_PLUS = (V_98DX3236 | V_98DX3336 | V_98DX4251),
};
static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
@@ -360,6 +352,131 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
MPP_VAR_FUNCTION(0x1, "dev", "ad31", V_MV78260_PLUS)),
};
+static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "mosi", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad8", V_98DX3236_PLUS)),
+ MPP_MODE(1,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "miso", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad9", V_98DX3236_PLUS)),
+ MPP_MODE(2,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "sck", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad10", V_98DX3236_PLUS)),
+ MPP_MODE(3,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "cs0", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad11", V_98DX3236_PLUS)),
+ MPP_MODE(4,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "cs1", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "smi", "mdc", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "cs0", V_98DX3236_PLUS)),
+ MPP_MODE(5,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "pex", "rsto", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "cmd", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "bootcs", V_98DX3236_PLUS)),
+ MPP_MODE(6,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "clk", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "a2", V_98DX3236_PLUS)),
+ MPP_MODE(7,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d0", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ale0", V_98DX3236_PLUS)),
+ MPP_MODE(8,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d1", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ale1", V_98DX3236_PLUS)),
+ MPP_MODE(9,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d2", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ready0", V_98DX3236_PLUS)),
+ MPP_MODE(10,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d3", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad12", V_98DX3236_PLUS)),
+ MPP_MODE(11,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart1", "rxd", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart0", "cts", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad13", V_98DX3236_PLUS)),
+ MPP_MODE(12,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart1", "txd", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart0", "rts", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad14", V_98DX3236_PLUS)),
+ MPP_MODE(13,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "intr", "out", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad15", V_98DX3236_PLUS)),
+ MPP_MODE(14,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)),
+ MPP_MODE(15,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)),
+ MPP_MODE(16,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)),
+ MPP_MODE(17,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "clkout", V_98DX3236_PLUS)),
+ MPP_MODE(18,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart1", "txd", V_98DX3236_PLUS)),
+ MPP_MODE(19,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "rb", V_98DX3236_PLUS)),
+ MPP_MODE(20,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "we0", V_98DX3236_PLUS)),
+ MPP_MODE(21,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad0", V_98DX3236_PLUS)),
+ MPP_MODE(22,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad1", V_98DX3236_PLUS)),
+ MPP_MODE(23,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad2", V_98DX3236_PLUS)),
+ MPP_MODE(24,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad3", V_98DX3236_PLUS)),
+ MPP_MODE(25,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad4", V_98DX3236_PLUS)),
+ MPP_MODE(26,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad5", V_98DX3236_PLUS)),
+ MPP_MODE(27,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad6", V_98DX3236_PLUS)),
+ MPP_MODE(28,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad7", V_98DX3236_PLUS)),
+ MPP_MODE(29,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "a0", V_98DX3236_PLUS)),
+ MPP_MODE(30,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "a1", V_98DX3236_PLUS)),
+ MPP_MODE(31,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "slv_smi", "mdc", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "smi", "mdc", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "we1", V_98DX3236_PLUS)),
+ MPP_MODE(32,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "slv_smi", "mdio", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "smi", "mdio", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "cs1", V_98DX3236_PLUS)),
+};
+
static struct mvebu_pinctrl_soc_info armada_xp_pinctrl_info;
static const struct of_device_id armada_xp_pinctrl_of_match[] = {
@@ -375,11 +492,19 @@ static const struct of_device_id armada_xp_pinctrl_of_match[] = {
.compatible = "marvell,mv78460-pinctrl",
.data = (void *) V_MV78460,
},
+ {
+ .compatible = "marvell,98dx3236-pinctrl",
+ .data = (void *) V_98DX3236,
+ },
+ {
+ .compatible = "marvell,98dx4251-pinctrl",
+ .data = (void *) V_98DX4251,
+ },
{ },
};
-static struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 48, NULL, armada_xp_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 48, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
@@ -387,8 +512,8 @@ static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(1, 32, 32, 17),
};
-static struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 66, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
@@ -397,8 +522,8 @@ static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(2, 64, 64, 3),
};
-static struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 66, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
@@ -407,6 +532,14 @@ static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(2, 64, 64, 3),
};
+static struct mvebu_mpp_ctrl mv98dx3236_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 32, NULL, mvebu_mmio_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range mv98dx3236_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+};
+
static int armada_xp_pinctrl_suspend(struct platform_device *pdev,
pm_message_t state)
{
@@ -417,7 +550,7 @@ static int armada_xp_pinctrl_suspend(struct platform_device *pdev,
nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
for (i = 0; i < nregs; i++)
- mpp_saved_regs[i] = readl(mpp_base + i * 4);
+ mpp_saved_regs[i] = readl(soc->control_data[0].base + i * 4);
return 0;
}
@@ -431,7 +564,7 @@ static int armada_xp_pinctrl_resume(struct platform_device *pdev)
nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
for (i = 0; i < nregs; i++)
- writel(mpp_saved_regs[i], mpp_base + i * 4);
+ writel(mpp_saved_regs[i], soc->control_data[0].base + i * 4);
return 0;
}
@@ -441,17 +574,11 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
- struct resource *res;
int nregs;
if (!match)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
-
soc->variant = (unsigned) match->data & 0xff;
switch (soc->variant) {
@@ -488,6 +615,17 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
soc->gpioranges = mv78460_mpp_gpio_ranges;
soc->ngpioranges = ARRAY_SIZE(mv78460_mpp_gpio_ranges);
break;
+ case V_98DX3236:
+ case V_98DX3336:
+ case V_98DX4251:
+ /* fall-through */
+ soc->controls = mv98dx3236_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv98dx3236_mpp_controls);
+ soc->modes = mv98dx3236_mpp_modes;
+ soc->nmodes = mv98dx3236_mpp_controls[0].npins;
+ soc->gpioranges = mv98dx3236_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv98dx3236_mpp_gpio_ranges);
+ break;
}
nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
@@ -499,7 +637,7 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_xp_pinctrl_driver = {
@@ -511,9 +649,4 @@ static struct platform_driver armada_xp_pinctrl_driver = {
.suspend = armada_xp_pinctrl_suspend,
.resume = armada_xp_pinctrl_resume,
};
-
-module_platform_driver(armada_xp_pinctrl_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Armada XP pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada_xp_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index f93ae0dcef9c..8472f61f2bbe 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -61,30 +60,20 @@
#define CONFIG_PMU BIT(4)
-static void __iomem *mpp_base;
static void __iomem *mpp4_base;
static void __iomem *pmu_base;
static struct regmap *gconfmap;
-static int dove_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int dove_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
-static int dove_pmu_mpp_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_pmu_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long *config)
{
unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
if ((pmu & BIT(pid)) == 0)
- return default_mpp_ctrl_get(mpp_base, pid, config);
+ return mvebu_mmio_mpp_ctrl_get(data, pid, config);
func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
*config = (func >> shift) & MVEBU_MPP_MASK;
@@ -93,19 +82,20 @@ static int dove_pmu_mpp_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_pmu_mpp_ctrl_set(unsigned pid, unsigned long config)
+static int dove_pmu_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long config)
{
unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
if ((config & CONFIG_PMU) == 0) {
- writel(pmu & ~BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
- return default_mpp_ctrl_set(mpp_base, pid, config);
+ writel(pmu & ~BIT(pid), data->base + PMU_MPP_GENERAL_CTRL);
+ return mvebu_mmio_mpp_ctrl_set(data, pid, config);
}
- writel(pmu | BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+ writel(pmu | BIT(pid), data->base + PMU_MPP_GENERAL_CTRL);
func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
func &= ~(MVEBU_MPP_MASK << shift);
func |= (config & MVEBU_MPP_MASK) << shift;
@@ -114,7 +104,8 @@ static int dove_pmu_mpp_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static int dove_mpp4_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
@@ -144,7 +135,8 @@ static int dove_mpp4_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_mpp4_ctrl_set(unsigned pid, unsigned long config)
+static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
@@ -178,7 +170,8 @@ static int dove_mpp4_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static int dove_nand_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_nand_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned int gmpp;
@@ -188,7 +181,8 @@ static int dove_nand_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_nand_ctrl_set(unsigned pid, unsigned long config)
+static int dove_nand_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
NAND_GPIO_EN,
@@ -196,28 +190,31 @@ static int dove_nand_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static int dove_audio0_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_audio0_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
*config = ((pmu & AU0_AC97_SEL) != 0);
return 0;
}
-static int dove_audio0_ctrl_set(unsigned pid, unsigned long config)
+static int dove_audio0_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
pmu &= ~AU0_AC97_SEL;
if (config)
pmu |= AU0_AC97_SEL;
- writel(pmu, mpp_base + PMU_MPP_GENERAL_CTRL);
+ writel(pmu, data->base + PMU_MPP_GENERAL_CTRL);
return 0;
}
-static int dove_audio1_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned int mpp4 = readl(mpp4_base);
unsigned int sspc1;
@@ -247,7 +244,8 @@ static int dove_audio1_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_audio1_ctrl_set(unsigned pid, unsigned long config)
+static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
unsigned int mpp4 = readl(mpp4_base);
@@ -274,11 +272,12 @@ static int dove_audio1_ctrl_set(unsigned pid, unsigned long config)
* break other functions. If you require all mpps as gpio
* enforce gpio setting by pinctrl mapping.
*/
-static int dove_audio1_ctrl_gpio_req(unsigned pid)
+static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid)
{
unsigned long config;
- dove_audio1_ctrl_get(pid, &config);
+ dove_audio1_ctrl_get(data, pid, &config);
switch (config) {
case 0x02: /* i2s1 : gpio[56:57] */
@@ -301,14 +300,16 @@ static int dove_audio1_ctrl_gpio_req(unsigned pid)
}
/* mpp[52:57] has gpio pins capable of in and out */
-static int dove_audio1_ctrl_gpio_dir(unsigned pid, bool input)
+static int dove_audio1_ctrl_gpio_dir(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, bool input)
{
if (pid < 52 || pid > 57)
return -ENOTSUPP;
return 0;
}
-static int dove_twsi_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_twsi_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned int gcfg1;
unsigned int gcfg2;
@@ -327,7 +328,8 @@ static int dove_twsi_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_twsi_ctrl_set(unsigned pid, unsigned long config)
+static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
unsigned int gcfg1 = 0;
unsigned int gcfg2 = 0;
@@ -354,9 +356,9 @@ static int dove_twsi_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static struct mvebu_mpp_ctrl dove_mpp_controls[] = {
+static const struct mvebu_mpp_ctrl dove_mpp_controls[] = {
MPP_FUNC_CTRL(0, 15, NULL, dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(16, 23, NULL, dove_mpp_ctrl),
+ MPP_FUNC_CTRL(16, 23, NULL, mvebu_mmio_mpp_ctrl),
MPP_FUNC_CTRL(24, 39, "mpp_camera", dove_mpp4_ctrl),
MPP_FUNC_CTRL(40, 45, "mpp_sdio0", dove_mpp4_ctrl),
MPP_FUNC_CTRL(46, 51, "mpp_sdio1", dove_mpp4_ctrl),
@@ -769,6 +771,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
struct resource fb_res;
const struct of_device_id *match =
of_match_device(dove_pinctrl_of_match, &pdev->dev);
+ struct mvebu_mpp_ctrl_data *mpp_data;
+ void __iomem *base;
+ int i;
+
pdev->dev.platform_data = (void *)match->data;
/*
@@ -783,9 +789,18 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
mpp_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, mpp_res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
+ base = devm_ioremap_resource(&pdev->dev, mpp_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols,
+ sizeof(*mpp_data), GFP_KERNEL);
+ if (!mpp_data)
+ return -ENOMEM;
+
+ dove_pinctrl_info.control_data = mpp_data;
+ for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++)
+ mpp_data[i].base = base;
/* prepare fallback resource */
memcpy(&fb_res, mpp_res, sizeof(struct resource));
@@ -838,24 +853,12 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int dove_pinctrl_remove(struct platform_device *pdev)
-{
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
- return 0;
-}
-
static struct platform_driver dove_pinctrl_driver = {
.driver = {
.name = "dove-pinctrl",
+ .suppress_bind_attrs = true,
.of_match_table = dove_pinctrl_of_match,
},
.probe = dove_pinctrl_probe,
- .remove = dove_pinctrl_remove,
};
-
-module_platform_driver(dove_pinctrl_driver);
-
-MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
-MODULE_DESCRIPTION("Marvell Dove pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(dove_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 5f89c26f3292..5995a19abde5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -21,18 +20,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int kirkwood_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int kirkwood_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
#define V(f6180, f6190, f6192, f6281, f6282, dx4122) \
((f6180 << 0) | (f6190 << 1) | (f6192 << 2) | \
(f6281 << 3) | (f6282 << 4) | (dx4122 << 5))
@@ -370,8 +357,8 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
MPP_VAR_FUNCTION(0xb, "lcd", "d17", V(0, 0, 0, 0, 1, 0))),
};
-static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 44, NULL, kirkwood_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 44, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
@@ -379,8 +366,8 @@ static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
MPP_GPIO_RANGE(1, 35, 35, 10),
};
-static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 35, NULL, kirkwood_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 35, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
@@ -388,8 +375,8 @@ static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
MPP_GPIO_RANGE(1, 32, 32, 4),
};
-static struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 49, NULL, kirkwood_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 49, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f628x_gpio_ranges[] = {
@@ -469,17 +456,12 @@ static const struct of_device_id kirkwood_pinctrl_of_match[] = {
static int kirkwood_pinctrl_probe(struct platform_device *pdev)
{
- struct resource *res;
const struct of_device_id *match =
of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
- pdev->dev.platform_data = (void *)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
+ pdev->dev.platform_data = (void *)match->data;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver kirkwood_pinctrl_driver = {
@@ -489,9 +471,4 @@ static struct platform_driver kirkwood_pinctrl_driver = {
},
.probe = kirkwood_pinctrl_probe,
};
-
-module_platform_driver(kirkwood_pinctrl_driver);
-
-MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
-MODULE_DESCRIPTION("Marvell Kirkwood pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(kirkwood_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index b6ec6db78351..e4dda12d371a 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -11,7 +11,6 @@
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -23,6 +22,8 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pinctrl-mvebu.h"
@@ -38,7 +39,8 @@ struct mvebu_pinctrl_function {
struct mvebu_pinctrl_group {
const char *name;
- struct mvebu_mpp_ctrl *ctrl;
+ const struct mvebu_mpp_ctrl *ctrl;
+ struct mvebu_mpp_ctrl_data *data;
struct mvebu_mpp_ctrl_setting *settings;
unsigned num_settings;
unsigned gid;
@@ -57,6 +59,30 @@ struct mvebu_pinctrl {
u8 variant;
};
+int mvebu_mmio_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+
+ *config = (readl(data->base + off) >> shift) & MVEBU_MPP_MASK;
+
+ return 0;
+}
+
+int mvebu_mmio_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long reg;
+
+ reg = readl(data->base + off) & ~(MVEBU_MPP_MASK << shift);
+ writel(reg | (config << shift), data->base + off);
+
+ return 0;
+}
+
static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_pid(
struct mvebu_pinctrl *pctl, unsigned pid)
{
@@ -146,7 +172,7 @@ static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
if (!grp->ctrl)
return -EINVAL;
- return grp->ctrl->mpp_get(grp->pins[0], config);
+ return grp->ctrl->mpp_get(grp->data, grp->pins[0], config);
}
static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -161,7 +187,7 @@ static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
return -EINVAL;
for (i = 0; i < num_configs; i++) {
- ret = grp->ctrl->mpp_set(grp->pins[0], configs[i]);
+ ret = grp->ctrl->mpp_set(grp->data, grp->pins[0], configs[i]);
if (ret)
return ret;
} /* for each config */
@@ -188,18 +214,19 @@ static void mvebu_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
if (curr->subname)
seq_printf(s, "(%s)", curr->subname);
if (curr->flags & (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
- seq_printf(s, "(");
+ seq_putc(s, '(');
if (curr->flags & MVEBU_SETTING_GPI)
- seq_printf(s, "i");
+ seq_putc(s, 'i');
if (curr->flags & MVEBU_SETTING_GPO)
- seq_printf(s, "o");
- seq_printf(s, ")");
+ seq_putc(s, 'o');
+ seq_putc(s, ')');
}
- } else
- seq_printf(s, "current: UNKNOWN");
+ } else {
+ seq_puts(s, "current: UNKNOWN");
+ }
if (grp->num_settings > 1) {
- seq_printf(s, ", available = [");
+ seq_puts(s, ", available = [");
for (n = 0; n < grp->num_settings; n++) {
if (curr == &grp->settings[n])
continue;
@@ -214,17 +241,16 @@ static void mvebu_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "(%s)", grp->settings[n].subname);
if (grp->settings[n].flags &
(MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
- seq_printf(s, "(");
+ seq_putc(s, '(');
if (grp->settings[n].flags & MVEBU_SETTING_GPI)
- seq_printf(s, "i");
+ seq_putc(s, 'i');
if (grp->settings[n].flags & MVEBU_SETTING_GPO)
- seq_printf(s, "o");
- seq_printf(s, ")");
+ seq_putc(s, 'o');
+ seq_putc(s, ')');
}
}
- seq_printf(s, " ]");
+ seq_puts(s, " ]");
}
- return;
}
static const struct pinconf_ops mvebu_pinconf_ops = {
@@ -302,7 +328,7 @@ static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_req)
- return grp->ctrl->mpp_gpio_req(offset);
+ return grp->ctrl->mpp_gpio_req(grp->data, offset);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -325,7 +351,7 @@ static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_dir)
- return grp->ctrl->mpp_gpio_dir(offset, input);
+ return grp->ctrl->mpp_gpio_dir(grp->data, offset, input);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -398,13 +424,9 @@ static int mvebu_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
}
- *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
- if (*map == NULL) {
- dev_err(pctl->dev,
- "cannot allocate pinctrl_map memory for %s\n",
- np->name);
+ *map = kmalloc_array(nmaps, sizeof(**map), GFP_KERNEL);
+ if (!*map)
return -ENOMEM;
- }
n = 0;
of_property_for_each_string(np, "marvell,pins", prop, group) {
@@ -563,10 +585,8 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl),
GFP_KERNEL);
- if (!pctl) {
- dev_err(&pdev->dev, "unable to alloc driver\n");
+ if (!pctl)
return -ENOMEM;
- }
pctl->desc.name = dev_name(&pdev->dev);
pctl->desc.owner = THIS_MODULE;
@@ -582,7 +602,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->num_groups = 0;
pctl->desc.npins = 0;
for (n = 0; n < soc->ncontrols; n++) {
- struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ const struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
pctl->desc.npins += ctrl->npins;
/* initialize control's pins[] array */
@@ -604,10 +624,8 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pdesc = devm_kzalloc(&pdev->dev, pctl->desc.npins *
sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
- if (!pdesc) {
- dev_err(&pdev->dev, "failed to alloc pinctrl pins\n");
+ if (!pdesc)
return -ENOMEM;
- }
for (n = 0; n < pctl->desc.npins; n++)
pdesc[n].number = n;
@@ -628,9 +646,13 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
/* assign mpp controls to groups */
gid = 0;
for (n = 0; n < soc->ncontrols; n++) {
- struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ const struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ struct mvebu_mpp_ctrl_data *data = soc->control_data ?
+ &soc->control_data[n] : NULL;
+
pctl->groups[gid].gid = gid;
pctl->groups[gid].ctrl = ctrl;
+ pctl->groups[gid].data = data;
pctl->groups[gid].name = ctrl->name;
pctl->groups[gid].pins = ctrl->pins;
pctl->groups[gid].npins = ctrl->npins;
@@ -650,6 +672,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
gid++;
pctl->groups[gid].gid = gid;
pctl->groups[gid].ctrl = ctrl;
+ pctl->groups[gid].data = data;
pctl->groups[gid].name = noname_buf;
pctl->groups[gid].pins = &ctrl->pins[k];
pctl->groups[gid].npins = 1;
@@ -725,3 +748,94 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+
+/*
+ * mvebu_pinctrl_simple_mmio_probe - probe a simple mmio pinctrl
+ * @pdev: platform device (with platform data already attached)
+ *
+ * Initialise a simple (single base address) mmio pinctrl driver,
+ * assigning the MMIO base address to all mvebu mpp ctrl instances.
+ */
+int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
+ struct mvebu_mpp_ctrl_data *mpp_data;
+ struct resource *res;
+ void __iomem *base;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mpp_data = devm_kcalloc(&pdev->dev, soc->ncontrols, sizeof(*mpp_data),
+ GFP_KERNEL);
+ if (!mpp_data)
+ return -ENOMEM;
+
+ for (i = 0; i < soc->ncontrols; i++)
+ mpp_data[i].base = base;
+
+ soc->control_data = mpp_data;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+int mvebu_regmap_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap.map, data->regmap.offset + off, &val);
+ if (err)
+ return err;
+
+ *config = (val >> shift) & MVEBU_MPP_MASK;
+
+ return 0;
+}
+
+int mvebu_regmap_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+
+ return regmap_update_bits(data->regmap.map, data->regmap.offset + off,
+ MVEBU_MPP_MASK << shift, config << shift);
+}
+
+int mvebu_pinctrl_simple_regmap_probe(struct platform_device *pdev,
+ struct device *syscon_dev)
+{
+ struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
+ struct mvebu_mpp_ctrl_data *mpp_data;
+ struct regmap *regmap;
+ u32 offset;
+ int i;
+
+ regmap = syscon_node_to_regmap(syscon_dev->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset", &offset))
+ return -EINVAL;
+
+ mpp_data = devm_kcalloc(&pdev->dev, soc->ncontrols, sizeof(*mpp_data),
+ GFP_KERNEL);
+ if (!mpp_data)
+ return -ENOMEM;
+
+ for (i = 0; i < soc->ncontrols; i++) {
+ mpp_data[i].regmap.map = regmap;
+ mpp_data[i].regmap.offset = offset;
+ }
+
+ soc->control_data = mpp_data;
+
+ return mvebu_pinctrl_probe(pdev);
+}
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index b75a5f4adf3b..c90704e74884 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -14,6 +14,22 @@
#define __PINCTRL_MVEBU_H__
/**
+ * struct mvebu_mpp_ctrl_data - private data for the mpp ctrl operations
+ * @base: base address of pinctrl hardware
+ * @regmap.map: regmap structure
+ * @regmap.offset: regmap offset
+ */
+struct mvebu_mpp_ctrl_data {
+ union {
+ void __iomem *base;
+ struct {
+ struct regmap *map;
+ u32 offset;
+ } regmap;
+ };
+};
+
+/**
* struct mvebu_mpp_ctrl - describe a mpp control
* @name: name of the control group
* @pid: first pin id handled by this control
@@ -37,10 +53,13 @@ struct mvebu_mpp_ctrl {
u8 pid;
u8 npins;
unsigned *pins;
- int (*mpp_get)(unsigned pid, unsigned long *config);
- int (*mpp_set)(unsigned pid, unsigned long config);
- int (*mpp_gpio_req)(unsigned pid);
- int (*mpp_gpio_dir)(unsigned pid, bool input);
+ int (*mpp_get)(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config);
+ int (*mpp_set)(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config);
+ int (*mpp_gpio_req)(struct mvebu_mpp_ctrl_data *data, unsigned pid);
+ int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ bool input);
};
/**
@@ -93,6 +112,7 @@ struct mvebu_mpp_mode {
* struct mvebu_pinctrl_soc_info - SoC specific info passed to pinctrl-mvebu
* @variant: variant mask of soc_info
* @controls: list of available mvebu_mpp_ctrls
+ * @control_data: optional array, one entry for each control
* @ncontrols: number of available mvebu_mpp_ctrls
* @modes: list of available mvebu_mpp_modes
* @nmodes: number of available mvebu_mpp_modes
@@ -105,7 +125,8 @@ struct mvebu_mpp_mode {
*/
struct mvebu_pinctrl_soc_info {
u8 variant;
- struct mvebu_mpp_ctrl *controls;
+ const struct mvebu_mpp_ctrl *controls;
+ struct mvebu_mpp_ctrl_data *control_data;
int ncontrols;
struct mvebu_mpp_mode *modes;
int nmodes;
@@ -177,30 +198,18 @@ struct mvebu_pinctrl_soc_info {
#define MVEBU_MPP_BITS 4
#define MVEBU_MPP_MASK 0xf
-static inline int default_mpp_ctrl_get(void __iomem *base, unsigned int pid,
- unsigned long *config)
-{
- unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
-
- *config = (readl(base + off) >> shift) & MVEBU_MPP_MASK;
-
- return 0;
-}
-
-static inline int default_mpp_ctrl_set(void __iomem *base, unsigned int pid,
- unsigned long config)
-{
- unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned long reg;
-
- reg = readl(base + off) & ~(MVEBU_MPP_MASK << shift);
- writel(reg | (config << shift), base + off);
-
- return 0;
-}
+int mvebu_mmio_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config);
+int mvebu_mmio_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config);
+int mvebu_regmap_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config);
+int mvebu_regmap_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config);
int mvebu_pinctrl_probe(struct platform_device *pdev);
+int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev);
+int mvebu_pinctrl_simple_regmap_probe(struct platform_device *pdev,
+ struct device *syscon_dev);
#endif
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 84e144167b44..69cb4d9f0114 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -20,7 +20,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -32,7 +31,8 @@
static void __iomem *mpp_base;
static void __iomem *high_mpp_base;
-static int orion_mpp_ctrl_get(unsigned pid, unsigned long *config)
+static int orion_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long *config)
{
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
@@ -47,7 +47,8 @@ static int orion_mpp_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int orion_mpp_ctrl_set(unsigned pid, unsigned long config)
+static int orion_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long config)
{
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
@@ -161,7 +162,7 @@ static struct mvebu_mpp_mode orion_mpp_modes[] = {
MPP_VAR_FUNCTION(0x5, "gpio", NULL, V_5182)),
};
-static struct mvebu_mpp_ctrl orion_mpp_controls[] = {
+static const struct mvebu_mpp_ctrl orion_mpp_controls[] = {
MPP_FUNC_CTRL(0, 19, NULL, orion_mpp_ctrl),
};
@@ -247,9 +248,4 @@ static struct platform_driver orion_pinctrl_driver = {
},
.probe = orion_pinctrl_probe,
};
-
-module_platform_driver(orion_pinctrl_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell Orion pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(orion_pinctrl_driver);
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 799048f3c8d4..c1c1ccc58267 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -200,6 +200,18 @@ int pinconf_apply_setting(struct pinctrl_setting const *setting)
return 0;
}
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, size_t nconfigs)
+{
+ const struct pinconf_ops *ops;
+
+ ops = pctldev->desc->confops;
+ if (!ops)
+ return -ENOTSUPP;
+
+ return ops->pin_config_set(pctldev, pin, configs, nconfigs);
+}
+
#ifdef CONFIG_DEBUG_FS
static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index 55c75780b3b2..bf8aff9abf32 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -20,6 +20,9 @@ int pinconf_map_to_setting(struct pinctrl_map const *map,
void pinconf_free_setting(struct pinctrl_setting const *setting);
int pinconf_apply_setting(struct pinctrl_setting const *setting);
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, size_t nconfigs);
+
/*
* You will only be interested in these if you're using PINCONF
* so don't supply any stubs for these.
@@ -56,6 +59,12 @@ static inline int pinconf_apply_setting(struct pinctrl_setting const *setting)
return 0;
}
+static inline int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, size_t nconfigs)
+{
+ return -ENOTSUPP;
+}
+
#endif
#if defined(CONFIG_PINCONF) && defined(CONFIG_DEBUG_FS)
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index c9a146948192..d69e357a7a98 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -164,6 +164,18 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
return ret;
}
+static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return amd_gpio_set_debounce(gc, offset, debounce);
+}
+
#ifdef CONFIG_DEBUG_FS
static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
@@ -186,7 +198,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *output_value;
char *output_enable;
- for (bank = 0; bank < AMD_GPIO_TOTAL_BANKS; bank++) {
+ for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
seq_printf(s, "GPIO bank%d\t", bank);
switch (bank) {
@@ -202,8 +214,14 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
i = 128;
pin_num = AMD_GPIO_PINS_BANK2 + i;
break;
+ case 3:
+ i = 192;
+ pin_num = AMD_GPIO_PINS_BANK3 + i;
+ break;
+ default:
+ /* Illegal bank number, ignore */
+ continue;
}
-
for (; i < pin_num; i++) {
seq_printf(s, "pin%d\t", i);
spin_lock_irqsave(&gpio_dev->lock, flags);
@@ -213,14 +231,14 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
interrupt_enable = "interrupt is enabled|";
- if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF))
- && !(pin_reg & BIT(ACTIVE_LEVEL_OFF+1)))
+ if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
+ !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
active_level = "Active low|";
- else if (pin_reg & BIT(ACTIVE_LEVEL_OFF)
- && !(pin_reg & BIT(ACTIVE_LEVEL_OFF+1)))
+ else if (pin_reg & BIT(ACTIVE_LEVEL_OFF) &&
+ !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
active_level = "Active high|";
- else if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF))
- && pin_reg & BIT(ACTIVE_LEVEL_OFF+1))
+ else if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
+ pin_reg & BIT(ACTIVE_LEVEL_OFF + 1))
active_level = "Active on both|";
else
active_level = "Unknow Active level|";
@@ -244,17 +262,17 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
interrupt_mask =
"interrupt is masked|";
- if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
wake_cntrl0 = "enable wakeup in S0i3 state|";
else
wake_cntrl0 = "disable wakeup in S0i3 state|";
- if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
wake_cntrl1 = "enable wakeup in S3 state|";
else
wake_cntrl1 = "disable wakeup in S3 state|";
- if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
wake_cntrl2 = "enable wakeup in S4/S5 state|";
else
wake_cntrl2 = "disable wakeup in S4/S5 state|";
@@ -474,6 +492,7 @@ static struct irq_chip amd_gpio_irqchip = {
.irq_unmask = amd_gpio_irq_unmask,
.irq_eoi = amd_gpio_irq_eoi,
.irq_set_type = amd_gpio_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static void amd_gpio_irq_handler(struct irq_desc *desc)
@@ -756,18 +775,19 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.direction_output = amd_gpio_direction_output;
gpio_dev->gc.get = amd_gpio_get_value;
gpio_dev->gc.set = amd_gpio_set_value;
- gpio_dev->gc.set_debounce = amd_gpio_set_debounce;
+ gpio_dev->gc.set_config = amd_gpio_set_config;
gpio_dev->gc.dbg_show = amd_gpio_dbg_show;
- gpio_dev->gc.base = 0;
+ gpio_dev->gc.base = -1;
gpio_dev->gc.label = pdev->name;
gpio_dev->gc.owner = THIS_MODULE;
gpio_dev->gc.parent = &pdev->dev;
- gpio_dev->gc.ngpio = TOTAL_NUMBER_OF_PINS;
+ gpio_dev->gc.ngpio = resource_size(res) / 4;
#if defined(CONFIG_OF_GPIO)
gpio_dev->gc.of_node = pdev->dev.of_node;
#endif
+ gpio_dev->hwbank_num = gpio_dev->gc.ngpio / 64;
gpio_dev->groups = kerncz_groups;
gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
@@ -784,7 +804,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
return ret;
ret = gpiochip_add_pin_range(&gpio_dev->gc, dev_name(&pdev->dev),
- 0, 0, TOTAL_NUMBER_OF_PINS);
+ 0, 0, gpio_dev->gc.ngpio);
if (ret) {
dev_err(&pdev->dev, "Failed to add pin range\n");
goto out2;
@@ -805,7 +825,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
&amd_gpio_irqchip,
irq_base,
amd_gpio_irq_handler);
-
platform_set_drvdata(pdev, gpio_dev);
dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 7bfea47dbb47..c03f77822069 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -13,13 +13,12 @@
#ifndef _PINCTRL_AMD_H
#define _PINCTRL_AMD_H
-#define TOTAL_NUMBER_OF_PINS 192
#define AMD_GPIO_PINS_PER_BANK 64
-#define AMD_GPIO_TOTAL_BANKS 3
#define AMD_GPIO_PINS_BANK0 63
#define AMD_GPIO_PINS_BANK1 64
#define AMD_GPIO_PINS_BANK2 56
+#define AMD_GPIO_PINS_BANK3 32
#define WAKE_INT_MASTER_REG 0xfc
#define EOI_MASK (1 << 29)
@@ -35,7 +34,9 @@
#define ACTIVE_LEVEL_OFF 9
#define INTERRUPT_ENABLE_OFF 11
#define INTERRUPT_MASK_OFF 12
-#define WAKE_CNTRL_OFF 13
+#define WAKE_CNTRL_OFF_S0I3 13
+#define WAKE_CNTRL_OFF_S3 14
+#define WAKE_CNTRL_OFF_S4 15
#define PIN_STS_OFF 16
#define DRV_STRENGTH_SEL_OFF 17
#define PULL_UP_SEL_OFF 19
@@ -93,6 +94,7 @@ struct amd_gpio {
u32 ngroups;
struct pinctrl_dev *pctrl;
struct gpio_chip gc;
+ unsigned int hwbank_num;
struct resource *res;
struct platform_device *pdev;
};
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
index b36a90a3f3e4..f41d3d948dd8 100644
--- a/drivers/pinctrl/pinctrl-da850-pupd.c
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -113,7 +113,6 @@ static int da850_pupd_pin_config_group_set(struct pinctrl_dev *pctldev,
struct da850_pupd_data *data = pinctrl_dev_get_drvdata(pctldev);
u32 ena, sel;
enum pin_config_param param;
- u16 arg;
int i;
ena = readl(data->base + DA850_PUPD_ENA);
@@ -121,7 +120,6 @@ static int da850_pupd_pin_config_group_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -194,6 +192,7 @@ static const struct of_device_id da850_pupd_of_match[] = {
{ .compatible = "ti,da850-pupd" },
{ }
};
+MODULE_DEVICE_TABLE(of, da850_pupd_of_match);
static struct platform_driver da850_pupd_driver = {
.driver = {
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 0b0fc2eb48e0..fb73dcbb5ef3 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -7,7 +7,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/gpio.h>
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index a4d647424600..41dc39c7a7b1 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/module.h>
diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h
index e137d139e494..0e4308b8f235 100644
--- a/drivers/pinctrl/pinctrl-lantiq.h
+++ b/drivers/pinctrl/pinctrl-lantiq.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#ifndef __PINCTRL_LANTIQ_H
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index e053f1fa5512..d090f37ca4a1 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -904,7 +904,7 @@ static int lpc18xx_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev,
enum pin_config_param param,
- u16 param_val, u32 *reg)
+ u32 param_val, u32 *reg)
{
switch (param) {
case PIN_CONFIG_LOW_POWER_MODE:
@@ -932,7 +932,7 @@ static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev,
static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
enum pin_config_param param,
- u16 param_val, u32 *reg,
+ u32 param_val, u32 *reg,
unsigned pin)
{
u8 shift;
@@ -982,7 +982,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
}
static int lpc18xx_pconf_set_gpio_pin_int(struct pinctrl_dev *pctldev,
- u16 param_val, unsigned pin)
+ u32 param_val, unsigned pin)
{
struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
u32 val, reg_val, reg_offset = LPC18XX_SCU_PINTSEL0;
@@ -1008,7 +1008,7 @@ static int lpc18xx_pconf_set_gpio_pin_int(struct pinctrl_dev *pctldev,
}
static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, unsigned param,
- u16 param_val, u32 *reg, unsigned pin,
+ u32 param_val, u32 *reg, unsigned pin,
struct lpc18xx_pin_caps *pin_cap)
{
switch (param) {
@@ -1088,7 +1088,7 @@ static int lpc18xx_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
struct lpc18xx_pin_caps *pin_cap;
enum pin_config_param param;
- u16 param_val;
+ u32 param_val;
u32 reg;
int ret;
int i;
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index d9ff53e8f715..b8d2180a2bea 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -402,7 +402,7 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
struct device *dev = mpci->dev;
struct max77620_fps_config *fps_config;
int param;
- u16 param_val;
+ u32 param_val;
unsigned int val;
unsigned int pu_val;
unsigned int pd_val;
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index a30146da7ffd..4d6a5015b927 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -860,7 +860,7 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
{
struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 param_val;
+ u32 param_val;
const struct palmas_pingroup *g;
const struct palmas_pin_info *opt;
int ret;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 08765f58253c..7813599e43fa 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1441,7 +1441,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
int i;
int rc;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index a5a0392ab817..8b2d45e85bae 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -33,27 +33,12 @@
#include "core.h"
#include "devicetree.h"
#include "pinconf.h"
+#include "pinmux.h"
#define DRIVER_NAME "pinctrl-single"
#define PCS_OFF_DISABLED ~0U
/**
- * struct pcs_pingroup - pingroups for a function
- * @np: pingroup device node pointer
- * @name: pingroup name
- * @gpins: array of the pins in the group
- * @ngpins: number of pins in the group
- * @node: list node
- */
-struct pcs_pingroup {
- struct device_node *np;
- const char *name;
- int *gpins;
- int ngpins;
- struct list_head node;
-};
-
-/**
* struct pcs_func_vals - mux function register offset and value pair
* @reg: register virtual address
* @val: register value
@@ -176,16 +161,10 @@ struct pcs_soc_data {
* @bits_per_mux: number of bits per mux
* @bits_per_pin: number of bits per pin
* @pins: physical pins on the SoC
- * @pgtree: pingroup index radix tree
- * @ftree: function index radix tree
- * @pingroups: list of pingroups
- * @functions: list of functions
* @gpiofuncs: list of gpio functions
* @irqs: list of interrupt registers
* @chip: chip container for this instance
* @domain: IRQ domain for this instance
- * @ngroups: number of pingroups
- * @nfuncs: number of functions
* @desc: pin controller descriptor
* @read: register read function to use
* @write: register write function to use
@@ -213,16 +192,10 @@ struct pcs_device {
bool bits_per_mux;
unsigned bits_per_pin;
struct pcs_data pins;
- struct radix_tree_root pgtree;
- struct radix_tree_root ftree;
- struct list_head pingroups;
- struct list_head functions;
struct list_head gpiofuncs;
struct list_head irqs;
struct irq_chip chip;
struct irq_domain *domain;
- unsigned ngroups;
- unsigned nfuncs;
struct pinctrl_desc desc;
unsigned (*read)(void __iomem *reg);
void (*write)(unsigned val, void __iomem *reg);
@@ -288,54 +261,6 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
writel(val, reg);
}
-static int pcs_get_groups_count(struct pinctrl_dev *pctldev)
-{
- struct pcs_device *pcs;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
-
- return pcs->ngroups;
-}
-
-static const char *pcs_get_group_name(struct pinctrl_dev *pctldev,
- unsigned gselector)
-{
- struct pcs_device *pcs;
- struct pcs_pingroup *group;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- group = radix_tree_lookup(&pcs->pgtree, gselector);
- if (!group) {
- dev_err(pcs->dev, "%s could not find pingroup%i\n",
- __func__, gselector);
- return NULL;
- }
-
- return group->name;
-}
-
-static int pcs_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned gselector,
- const unsigned **pins,
- unsigned *npins)
-{
- struct pcs_device *pcs;
- struct pcs_pingroup *group;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- group = radix_tree_lookup(&pcs->pgtree, gselector);
- if (!group) {
- dev_err(pcs->dev, "%s could not find pingroup%i\n",
- __func__, gselector);
- return -EINVAL;
- }
-
- *pins = group->gpins;
- *npins = group->ngpins;
-
- return 0;
-}
-
static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned pin)
@@ -369,67 +294,21 @@ static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map, unsigned *num_maps);
static const struct pinctrl_ops pcs_pinctrl_ops = {
- .get_groups_count = pcs_get_groups_count,
- .get_group_name = pcs_get_group_name,
- .get_group_pins = pcs_get_group_pins,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
.pin_dbg_show = pcs_pin_dbg_show,
.dt_node_to_map = pcs_dt_node_to_map,
.dt_free_map = pcs_dt_free_map,
};
-static int pcs_get_functions_count(struct pinctrl_dev *pctldev)
-{
- struct pcs_device *pcs;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
-
- return pcs->nfuncs;
-}
-
-static const char *pcs_get_function_name(struct pinctrl_dev *pctldev,
- unsigned fselector)
-{
- struct pcs_device *pcs;
- struct pcs_function *func;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- func = radix_tree_lookup(&pcs->ftree, fselector);
- if (!func) {
- dev_err(pcs->dev, "%s could not find function%i\n",
- __func__, fselector);
- return NULL;
- }
-
- return func->name;
-}
-
-static int pcs_get_function_groups(struct pinctrl_dev *pctldev,
- unsigned fselector,
- const char * const **groups,
- unsigned * const ngroups)
-{
- struct pcs_device *pcs;
- struct pcs_function *func;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- func = radix_tree_lookup(&pcs->ftree, fselector);
- if (!func) {
- dev_err(pcs->dev, "%s could not find function%i\n",
- __func__, fselector);
- return -EINVAL;
- }
- *groups = func->pgnames;
- *ngroups = func->npgnames;
-
- return 0;
-}
-
static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
struct pcs_function **func)
{
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pin_desc *pdesc = pin_desc_get(pctldev, pin);
const struct pinctrl_setting_mux *setting;
+ struct function_desc *function;
unsigned fselector;
/* If pin is not described in DTS & enabled, mux_setting is NULL. */
@@ -437,7 +316,8 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
if (!setting)
return -ENOTSUPP;
fselector = setting->func;
- *func = radix_tree_lookup(&pcs->ftree, fselector);
+ function = pinmux_generic_get_function(pctldev, fselector);
+ *func = function->data;
if (!(*func)) {
dev_err(pcs->dev, "%s could not find function%i\n",
__func__, fselector);
@@ -450,6 +330,7 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
unsigned group)
{
struct pcs_device *pcs;
+ struct function_desc *function;
struct pcs_function *func;
int i;
@@ -457,7 +338,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
/* If function mask is null, needn't enable it. */
if (!pcs->fmask)
return 0;
- func = radix_tree_lookup(&pcs->ftree, fselector);
+ function = pinmux_generic_get_function(pctldev, fselector);
+ func = function->data;
if (!func)
return -EINVAL;
@@ -515,9 +397,9 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
}
static const struct pinmux_ops pcs_pinmux_ops = {
- .get_functions_count = pcs_get_functions_count,
- .get_function_name = pcs_get_function_name,
- .get_function_groups = pcs_get_function_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
.set_mux = pcs_set_mux,
.gpio_request_enable = pcs_request_gpio,
};
@@ -622,7 +504,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pcs_function *func;
unsigned offset = 0, shift = 0, i, data, ret;
- u16 arg;
+ u32 arg;
int j;
ret = pcs_get_function(pctldev, pin, &func);
@@ -685,7 +567,7 @@ static int pcs_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned npins, old = 0;
int i, ret;
- ret = pcs_get_group_pins(pctldev, group, &pins, &npins);
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
if (ret)
return ret;
for (i = 0; i < npins; i++) {
@@ -707,7 +589,7 @@ static int pcs_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned npins;
int i, ret;
- ret = pcs_get_group_pins(pctldev, group, &pins, &npins);
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
if (ret)
return ret;
for (i = 0; i < npins; i++) {
@@ -859,77 +741,24 @@ static struct pcs_function *pcs_add_function(struct pcs_device *pcs,
unsigned npgnames)
{
struct pcs_function *function;
+ int res;
function = devm_kzalloc(pcs->dev, sizeof(*function), GFP_KERNEL);
if (!function)
return NULL;
- function->name = name;
function->vals = vals;
function->nvals = nvals;
- function->pgnames = pgnames;
- function->npgnames = npgnames;
- mutex_lock(&pcs->mutex);
- list_add_tail(&function->node, &pcs->functions);
- radix_tree_insert(&pcs->ftree, pcs->nfuncs, function);
- pcs->nfuncs++;
- mutex_unlock(&pcs->mutex);
+ res = pinmux_generic_add_function(pcs->pctl, name,
+ pgnames, npgnames,
+ function);
+ if (res)
+ return NULL;
return function;
}
-static void pcs_remove_function(struct pcs_device *pcs,
- struct pcs_function *function)
-{
- int i;
-
- mutex_lock(&pcs->mutex);
- for (i = 0; i < pcs->nfuncs; i++) {
- struct pcs_function *found;
-
- found = radix_tree_lookup(&pcs->ftree, i);
- if (found == function)
- radix_tree_delete(&pcs->ftree, i);
- }
- list_del(&function->node);
- mutex_unlock(&pcs->mutex);
-}
-
-/**
- * pcs_add_pingroup() - add a pingroup to the pingroup list
- * @pcs: pcs driver instance
- * @np: device node of the mux entry
- * @name: name of the pingroup
- * @gpins: array of the pins that belong to the group
- * @ngpins: number of pins in the group
- */
-static int pcs_add_pingroup(struct pcs_device *pcs,
- struct device_node *np,
- const char *name,
- int *gpins,
- int ngpins)
-{
- struct pcs_pingroup *pingroup;
-
- pingroup = devm_kzalloc(pcs->dev, sizeof(*pingroup), GFP_KERNEL);
- if (!pingroup)
- return -ENOMEM;
-
- pingroup->name = name;
- pingroup->np = np;
- pingroup->gpins = gpins;
- pingroup->ngpins = ngpins;
-
- mutex_lock(&pcs->mutex);
- list_add_tail(&pingroup->node, &pcs->pingroups);
- radix_tree_insert(&pcs->pgtree, pcs->ngroups, pingroup);
- pcs->ngroups++;
- mutex_unlock(&pcs->mutex);
-
- return 0;
-}
-
/**
* pcs_get_pin_by_offset() - get a pin index based on the register offset
* @pcs: pcs driver instance
@@ -1100,10 +929,9 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
return 0;
}
-static void pcs_free_pingroups(struct pcs_device *pcs);
-
/**
* smux_parse_one_pinctrl_entry() - parses a device tree mux entry
+ * @pctldev: pin controller device
* @pcs: pinctrl driver instance
* @np: device node of the mux entry
* @map: map entry
@@ -1134,7 +962,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
rows = pinctrl_count_index_with_args(np, name);
if (rows <= 0) {
- dev_err(pcs->dev, "Ivalid number of rows: %d\n", rows);
+ dev_err(pcs->dev, "Invalid number of rows: %d\n", rows);
return -EINVAL;
}
@@ -1186,7 +1014,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
goto free_pins;
}
- res = pcs_add_pingroup(pcs, np, np->name, pins, found);
+ res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
if (res < 0)
goto free_function;
@@ -1205,10 +1033,10 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
return 0;
free_pingroups:
- pcs_free_pingroups(pcs);
+ pinctrl_generic_remove_last_group(pcs->pctl);
*num_maps = 1;
free_function:
- pcs_remove_function(pcs, function);
+ pinmux_generic_remove_last_function(pcs->pctl);
free_pins:
devm_kfree(pcs->dev, pins);
@@ -1320,7 +1148,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
goto free_pins;
}
- res = pcs_add_pingroup(pcs, np, np->name, pins, found);
+ res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
if (res < 0)
goto free_function;
@@ -1337,11 +1165,10 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
return 0;
free_pingroups:
- pcs_free_pingroups(pcs);
+ pinctrl_generic_remove_last_group(pcs->pctl);
*num_maps = 1;
free_function:
- pcs_remove_function(pcs, function);
-
+ pinmux_generic_remove_last_function(pcs->pctl);
free_pins:
devm_kfree(pcs->dev, pins);
@@ -1409,60 +1236,6 @@ free_map:
}
/**
- * pcs_free_funcs() - free memory used by functions
- * @pcs: pcs driver instance
- */
-static void pcs_free_funcs(struct pcs_device *pcs)
-{
- struct list_head *pos, *tmp;
- int i;
-
- mutex_lock(&pcs->mutex);
- for (i = 0; i < pcs->nfuncs; i++) {
- struct pcs_function *func;
-
- func = radix_tree_lookup(&pcs->ftree, i);
- if (!func)
- continue;
- radix_tree_delete(&pcs->ftree, i);
- }
- list_for_each_safe(pos, tmp, &pcs->functions) {
- struct pcs_function *function;
-
- function = list_entry(pos, struct pcs_function, node);
- list_del(&function->node);
- }
- mutex_unlock(&pcs->mutex);
-}
-
-/**
- * pcs_free_pingroups() - free memory used by pingroups
- * @pcs: pcs driver instance
- */
-static void pcs_free_pingroups(struct pcs_device *pcs)
-{
- struct list_head *pos, *tmp;
- int i;
-
- mutex_lock(&pcs->mutex);
- for (i = 0; i < pcs->ngroups; i++) {
- struct pcs_pingroup *pingroup;
-
- pingroup = radix_tree_lookup(&pcs->pgtree, i);
- if (!pingroup)
- continue;
- radix_tree_delete(&pcs->pgtree, i);
- }
- list_for_each_safe(pos, tmp, &pcs->pingroups) {
- struct pcs_pingroup *pingroup;
-
- pingroup = list_entry(pos, struct pcs_pingroup, node);
- list_del(&pingroup->node);
- }
- mutex_unlock(&pcs->mutex);
-}
-
-/**
* pcs_irq_free() - free interrupt
* @pcs: pcs driver instance
*/
@@ -1490,8 +1263,7 @@ static void pcs_free_resources(struct pcs_device *pcs)
{
pcs_irq_free(pcs);
pinctrl_unregister(pcs->pctl);
- pcs_free_funcs(pcs);
- pcs_free_pingroups(pcs);
+
#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE)
if (pcs->missing_nr_pinctrl_cells)
of_remove_property(pcs->np, pcs->missing_nr_pinctrl_cells);
@@ -1885,8 +1657,6 @@ static int pcs_probe(struct platform_device *pdev)
pcs->np = np;
raw_spin_lock_init(&pcs->lock);
mutex_init(&pcs->mutex);
- INIT_LIST_HEAD(&pcs->pingroups);
- INIT_LIST_HEAD(&pcs->functions);
INIT_LIST_HEAD(&pcs->gpiofuncs);
soc = match->data;
pcs->flags = soc->flags;
@@ -1947,8 +1717,6 @@ static int pcs_probe(struct platform_device *pdev)
return -ENODEV;
}
- INIT_RADIX_TREE(&pcs->pgtree, GFP_KERNEL);
- INIT_RADIX_TREE(&pcs->ftree, GFP_KERNEL);
platform_set_drvdata(pdev, pcs);
switch (pcs->width) {
@@ -1979,10 +1747,9 @@ static int pcs_probe(struct platform_device *pdev)
if (ret < 0)
goto free;
- pcs->pctl = pinctrl_register(&pcs->desc, pcs->dev, pcs);
- if (IS_ERR(pcs->pctl)) {
+ ret = pinctrl_register_and_init(&pcs->desc, pcs->dev, pcs, &pcs->pctl);
+ if (ret) {
dev_err(pcs->dev, "could not register single pinctrl driver\n");
- ret = PTR_ERR(pcs->pctl);
goto free;
}
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 29fb7403d24e..7450f5118445 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -424,41 +424,6 @@ static int sx150x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(value & BIT(offset));
}
-static int sx150x_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
-{
- struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
- int ret;
-
- switch (mode) {
- case LINE_MODE_PUSH_PULL:
- if (pctl->data->model != SX150X_789 ||
- sx150x_pin_is_oscio(pctl, offset))
- return 0;
-
- ret = regmap_write_bits(pctl->regmap,
- pctl->data->pri.x789.reg_drain,
- BIT(offset), 0);
- break;
-
- case LINE_MODE_OPEN_DRAIN:
- if (pctl->data->model != SX150X_789 ||
- sx150x_pin_is_oscio(pctl, offset))
- return -ENOTSUPP;
-
- ret = regmap_write_bits(pctl->regmap,
- pctl->data->pri.x789.reg_drain,
- BIT(offset), BIT(offset));
- break;
- default:
- ret = -ENOTSUPP;
- break;
- }
-
- return ret;
-}
-
static int __sx150x_gpio_set(struct sx150x_pinctrl *pctl, unsigned int offset,
int value)
{
@@ -811,16 +776,26 @@ static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- ret = sx150x_gpio_set_single_ended(&pctl->gpio,
- pin, LINE_MODE_OPEN_DRAIN);
+ if (pctl->data->model != SX150X_789 ||
+ sx150x_pin_is_oscio(pctl, pin))
+ return -ENOTSUPP;
+
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ BIT(pin), BIT(pin));
if (ret < 0)
return ret;
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- ret = sx150x_gpio_set_single_ended(&pctl->gpio,
- pin, LINE_MODE_PUSH_PULL);
+ if (pctl->data->model != SX150X_789 ||
+ sx150x_pin_is_oscio(pctl, pin))
+ return 0;
+
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ BIT(pin), 0);
if (ret < 0)
return ret;
@@ -1178,7 +1153,7 @@ static int sx150x_probe(struct i2c_client *client,
pctl->gpio.direction_output = sx150x_gpio_direction_output;
pctl->gpio.get = sx150x_gpio_get;
pctl->gpio.set = sx150x_gpio_set;
- pctl->gpio.set_single_ended = sx150x_gpio_set_single_ended;
+ pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
#ifdef CONFIG_OF_GPIO
pctl->gpio.of_node = dev->of_node;
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index dd85ad1807f5..d4167e2c173a 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
* Copyright (C) 2015 Martin Schiller <mschiller@tdt.de>
*/
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index ece702881946..29ad3151abec 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -99,37 +99,24 @@ static int pin_request(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n",
pin, desc->name, owner);
- if (gpio_range) {
- /* There's no need to support multiple GPIO requests */
- if (desc->gpio_owner) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->gpio_owner, owner);
- goto out;
- }
- if (ops->strict && desc->mux_usecount &&
- strcmp(desc->mux_owner, owner)) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->mux_owner, owner);
- goto out;
- }
+ if ((!gpio_range || ops->strict) &&
+ desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
+ dev_err(pctldev->dev,
+ "pin %s already requested by %s; cannot claim for %s\n",
+ desc->name, desc->mux_owner, owner);
+ goto out;
+ }
+ if ((gpio_range || ops->strict) && desc->gpio_owner) {
+ dev_err(pctldev->dev,
+ "pin %s already requested by %s; cannot claim for %s\n",
+ desc->name, desc->gpio_owner, owner);
+ goto out;
+ }
+
+ if (gpio_range) {
desc->gpio_owner = owner;
} else {
- if (desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->mux_owner, owner);
- goto out;
- }
- if (ops->strict && desc->gpio_owner) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->gpio_owner, owner);
- goto out;
- }
-
desc->mux_usecount++;
if (desc->mux_usecount > 1)
return 0;
@@ -695,3 +682,176 @@ void pinmux_init_device_debugfs(struct dentry *devroot,
}
#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+
+/**
+ * pinmux_generic_get_function_count() - returns number of functions
+ * @pctldev: pin controller device
+ */
+int pinmux_generic_get_function_count(struct pinctrl_dev *pctldev)
+{
+ return pctldev->num_functions;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function_count);
+
+/**
+ * pinmux_generic_get_function_name() - returns the function name
+ * @pctldev: pin controller device
+ * @selector: function number
+ */
+const char *
+pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function)
+ return NULL;
+
+ return function->name;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
+
+/**
+ * pinmux_generic_get_function_groups() - gets the function groups
+ * @pctldev: pin controller device
+ * @selector: function number
+ * @groups: array of pin groups
+ * @num_groups: number of pin groups
+ */
+int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function) {
+ dev_err(pctldev->dev, "%s could not find function%i\n",
+ __func__, selector);
+ return -EINVAL;
+ }
+ *groups = function->group_names;
+ *num_groups = function->num_group_names;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function_groups);
+
+/**
+ * pinmux_generic_get_function() - returns a function based on the number
+ * @pctldev: pin controller device
+ * @group_selector: function number
+ */
+struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function)
+ return NULL;
+
+ return function;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function);
+
+/**
+ * pinmux_generic_get_function_groups() - gets the function groups
+ * @pctldev: pin controller device
+ * @name: name of the function
+ * @groups: array of pin groups
+ * @num_groups: number of pin groups
+ * @data: pin controller driver specific data
+ */
+int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
+ const char *name,
+ const char **groups,
+ const unsigned int num_groups,
+ void *data)
+{
+ struct function_desc *function;
+
+ function = devm_kzalloc(pctldev->dev, sizeof(*function), GFP_KERNEL);
+ if (!function)
+ return -ENOMEM;
+
+ function->name = name;
+ function->group_names = groups;
+ function->num_group_names = num_groups;
+ function->data = data;
+
+ radix_tree_insert(&pctldev->pin_function_tree, pctldev->num_functions,
+ function);
+
+ pctldev->num_functions++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_add_function);
+
+/**
+ * pinmux_generic_remove_function() - removes a numbered function
+ * @pctldev: pin controller device
+ * @selector: function number
+ *
+ * Note that the caller must take care of locking.
+ */
+int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function)
+ return -ENOENT;
+
+ radix_tree_delete(&pctldev->pin_function_tree, selector);
+ devm_kfree(pctldev->dev, function);
+
+ pctldev->num_functions--;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
+
+/**
+ * pinmux_generic_free_functions() - removes all functions
+ * @pctldev: pin controller device
+ *
+ * Note that the caller must take care of locking.
+ */
+void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
+{
+ struct radix_tree_iter iter;
+ struct function_desc *function;
+ unsigned long *indices;
+ void **slot;
+ int i = 0;
+
+ indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
+ pctldev->num_functions, GFP_KERNEL);
+ if (!indices)
+ return;
+
+ radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
+ indices[i++] = iter.index;
+
+ for (i = 0; i < pctldev->num_functions; i++) {
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ indices[i]);
+ radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
+ devm_kfree(pctldev->dev, function);
+ }
+
+ pctldev->num_functions = 0;
+}
+
+#endif /* CONFIG_GENERIC_PINMUX_FUNCTIONS */
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index d1a98b1c9fce..248d8ea30e26 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -111,3 +111,59 @@ static inline void pinmux_init_device_debugfs(struct dentry *devroot,
}
#endif
+
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+
+/**
+ * struct function_desc - generic function descriptor
+ * @name: name of the function
+ * @group_names: array of pin group names
+ * @num_group_names: number of pin group names
+ * @data: pin controller driver specific data
+ */
+struct function_desc {
+ const char *name;
+ const char **group_names;
+ int num_group_names;
+ void *data;
+};
+
+int pinmux_generic_get_function_count(struct pinctrl_dev *pctldev);
+
+const char *
+pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
+int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups);
+
+struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
+int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
+ const char *name,
+ const char **groups,
+ unsigned const num_groups,
+ void *data);
+
+int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
+static inline int
+pinmux_generic_remove_last_function(struct pinctrl_dev *pctldev)
+{
+ return pinmux_generic_remove_function(pctldev,
+ pctldev->num_functions - 1);
+}
+
+void pinmux_generic_free_functions(struct pinctrl_dev *pctldev);
+
+#else
+
+static inline void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
+{
+}
+
+#endif /* CONFIG_GENERIC_PINMUX_FUNCTIONS */
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 775c88303017..f8e9e1c2b2f6 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -61,7 +61,7 @@ struct msm_pinctrl {
struct notifier_block restart_nb;
int irq;
- spinlock_t lock;
+ raw_spinlock_t lock;
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
@@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~mask;
val |= i << g->mux_bit;
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT:
/* set output value */
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (arg)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
writel(val, pctrl->regs + g->io_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
/* enable output */
arg = 1;
@@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
return -EINVAL;
}
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~(mask << bit);
val |= arg << bit;
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
return 0;
@@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~BIT(g->oe_bit);
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (value)
@@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
val |= BIT(g->oe_bit);
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (value)
@@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
val &= ~BIT(g->out_bit);
writel(val, pctrl->regs + g->io_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
#ifdef CONFIG_DEBUG_FS
@@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_cfg_reg);
val &= ~BIT(g->intr_enable_bit);
@@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
clear_bit(d->hwirq, pctrl->enabled_irqs);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void msm_gpio_irq_unmask(struct irq_data *d)
@@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_status_reg);
val &= ~BIT(g->intr_status_bit);
@@ -604,7 +604,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
set_bit(d->hwirq, pctrl->enabled_irqs);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void msm_gpio_irq_ack(struct irq_data *d)
@@ -617,7 +617,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_status_reg);
if (g->intr_ack_high)
@@ -629,7 +629,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -642,7 +642,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
/*
* For hw without possibility of detecting both edges
@@ -716,7 +716,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
@@ -732,11 +732,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -882,7 +882,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
pctrl->soc = soc_data;
pctrl->chip = msm_gpio_template;
- spin_lock_init(&pctrl->lock);
+ raw_spin_lock_init(&pctrl->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 5591d093bf78..bb71dd1e6279 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -193,9 +193,9 @@ static const struct pinctrl_pin_desc msm8660_pins[] = {
PINCTRL_PIN(171, "GPIO_171"),
PINCTRL_PIN(172, "GPIO_172"),
- PINCTRL_PIN(173, "SDC1_CLK"),
- PINCTRL_PIN(174, "SDC1_CMD"),
- PINCTRL_PIN(175, "SDC1_DATA"),
+ PINCTRL_PIN(173, "SDC4_CLK"),
+ PINCTRL_PIN(174, "SDC4_CMD"),
+ PINCTRL_PIN(175, "SDC4_DATA"),
PINCTRL_PIN(176, "SDC3_CLK"),
PINCTRL_PIN(177, "SDC3_CMD"),
PINCTRL_PIN(178, "SDC3_DATA"),
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 07409fde02b2..f9b49967f512 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -24,11 +24,15 @@
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/err.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -528,10 +532,8 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kzalloc(dev, bank->nr_pins
* sizeof(*weint_data), GFP_KERNEL);
- if (!weint_data) {
- dev_err(dev, "could not allocate memory for weint_data\n");
+ if (!weint_data)
return -ENOMEM;
- }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(bank->of_node, idx);
@@ -559,10 +561,8 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
muxed_data = devm_kzalloc(dev, sizeof(*muxed_data)
+ muxed_banks*sizeof(struct samsung_pin_bank *), GFP_KERNEL);
- if (!muxed_data) {
- dev_err(dev, "could not allocate memory for muxed_data\n");
+ if (!muxed_data)
return -ENOMEM;
- }
irq_set_chained_handler_and_data(irq, exynos_irq_demux_eint16_31,
muxed_data);
@@ -644,6 +644,60 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
exynos_pinctrl_resume_bank(drvdata, bank);
}
+/* Retention control for S5PV210 are located at the end of clock controller */
+#define S5P_OTHERS 0xE000
+
+#define S5P_OTHERS_RET_IO (1 << 31)
+#define S5P_OTHERS_RET_CF (1 << 30)
+#define S5P_OTHERS_RET_MMC (1 << 29)
+#define S5P_OTHERS_RET_UART (1 << 28)
+
+static void s5pv210_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
+{
+ void *clk_base = drvdata->retention_ctrl->priv;
+ u32 tmp;
+
+ tmp = __raw_readl(clk_base + S5P_OTHERS);
+ tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF | S5P_OTHERS_RET_MMC |
+ S5P_OTHERS_RET_UART);
+ __raw_writel(tmp, clk_base + S5P_OTHERS);
+}
+
+static struct samsung_retention_ctrl *
+s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ const struct samsung_retention_data *data)
+{
+ struct samsung_retention_ctrl *ctrl;
+ struct device_node *np;
+ void *clk_base;
+
+ ctrl = devm_kzalloc(drvdata->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl->priv = clk_base;
+ ctrl->disable = s5pv210_retention_disable;
+
+ return ctrl;
+}
+
+static const struct samsung_retention_data s5pv210_retention_data __initconst = {
+ .init = s5pv210_retention_init,
+};
+
/* pin banks of s5pv210 pin-controller */
static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -691,9 +745,58 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &s5pv210_retention_data,
},
};
+/* Pad retention control code for accessing PMU regmap */
+static atomic_t exynos_shared_retention_refcnt;
+
+static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
+{
+ if (drvdata->retention_ctrl->refcnt)
+ atomic_inc(drvdata->retention_ctrl->refcnt);
+}
+
+static void exynos_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
+{
+ struct samsung_retention_ctrl *ctrl = drvdata->retention_ctrl;
+ struct regmap *pmu_regs = ctrl->priv;
+ int i;
+
+ if (ctrl->refcnt && !atomic_dec_and_test(ctrl->refcnt))
+ return;
+
+ for (i = 0; i < ctrl->nr_regs; i++)
+ regmap_write(pmu_regs, ctrl->regs[i], ctrl->value);
+}
+
+static struct samsung_retention_ctrl *
+exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ const struct samsung_retention_data *data)
+{
+ struct samsung_retention_ctrl *ctrl;
+ struct regmap *pmu_regs;
+
+ ctrl = devm_kzalloc(drvdata->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ pmu_regs = exynos_get_pmu_regmap();
+ if (IS_ERR(pmu_regs))
+ return ERR_CAST(pmu_regs);
+
+ ctrl->priv = pmu_regs;
+ ctrl->regs = data->regs;
+ ctrl->nr_regs = data->nr_regs;
+ ctrl->value = data->value;
+ ctrl->refcnt = data->refcnt;
+ ctrl->enable = exynos_retention_enable;
+ ctrl->disable = exynos_retention_disable;
+
+ return ctrl;
+}
+
/* pin banks of exynos3250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -726,6 +829,30 @@ static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst =
};
/*
+ * PMU pad retention groups for Exynos3250 doesn't match pin banks, so handle
+ * them all together
+ */
+static const u32 exynos3250_retention_regs[] = {
+ S5P_PAD_RET_MAUDIO_OPTION,
+ S5P_PAD_RET_GPIO_OPTION,
+ S5P_PAD_RET_UART_OPTION,
+ S5P_PAD_RET_MMCA_OPTION,
+ S5P_PAD_RET_MMCB_OPTION,
+ S5P_PAD_RET_EBIA_OPTION,
+ S5P_PAD_RET_EBIB_OPTION,
+ S5P_PAD_RET_MMC2_OPTION,
+ S5P_PAD_RET_SPI_OPTION,
+};
+
+static const struct samsung_retention_data exynos3250_retention_data __initconst = {
+ .regs = exynos3250_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos3250_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/*
* Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
* two gpio/pin-mux/pinconfig controllers.
*/
@@ -737,6 +864,7 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos3250_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos3250_pin_banks1,
@@ -745,6 +873,7 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos3250_retention_data,
},
};
@@ -797,6 +926,36 @@ static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst =
EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
};
+/* PMU pad retention groups registers for Exynos4 (without audio) */
+static const u32 exynos4_retention_regs[] = {
+ S5P_PAD_RET_GPIO_OPTION,
+ S5P_PAD_RET_UART_OPTION,
+ S5P_PAD_RET_MMCA_OPTION,
+ S5P_PAD_RET_MMCB_OPTION,
+ S5P_PAD_RET_EBIA_OPTION,
+ S5P_PAD_RET_EBIB_OPTION,
+};
+
+static const struct samsung_retention_data exynos4_retention_data __initconst = {
+ .regs = exynos4_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos4_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/* PMU retention control for audio pins can be tied to audio pin bank */
+static const u32 exynos4_audio_retention_regs[] = {
+ S5P_PAD_RET_MAUDIO_OPTION,
+};
+
+static const struct samsung_retention_data exynos4_audio_retention_data __initconst = {
+ .regs = exynos4_audio_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos4_audio_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .init = exynos_retention_init,
+};
+
/*
* Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
* three gpio/pin-mux/pinconfig controllers.
@@ -809,6 +968,7 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos4210_pin_banks1,
@@ -817,10 +977,12 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos4210_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos4210_pin_banks2),
+ .retention_data = &exynos4_audio_retention_data,
},
};
@@ -894,6 +1056,7 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos4x12_pin_banks1,
@@ -902,6 +1065,7 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos4x12_pin_banks2,
@@ -909,6 +1073,7 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_audio_retention_data,
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos4x12_pin_banks3,
@@ -919,81 +1084,6 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
},
};
-/* pin banks of exynos4415 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos4415_pin_banks0[] = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
- EXYNOS_PIN_BANK_EINTG(1, 0x1C0, "gpf2", 0x38),
-};
-
-/* pin banks of exynos4415 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos4415_pin_banks1[] = {
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpk0", 0x08),
- EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
- EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpl0", 0x18),
- EXYNOS_PIN_BANK_EINTN(6, 0x120, "mp00"),
- EXYNOS_PIN_BANK_EINTN(4, 0x140, "mp01"),
- EXYNOS_PIN_BANK_EINTN(6, 0x160, "mp02"),
- EXYNOS_PIN_BANK_EINTN(8, 0x180, "mp03"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "mp04"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "mp05"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "mp06"),
- EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
- EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
- EXYNOS_PIN_BANK_EINTG(5, 0x2A0, "gpm2", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x2C0, "gpm3", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x2E0, "gpm4", 0x34),
- EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos4415 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos4415_pin_banks2[] = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
- EXYNOS_PIN_BANK_EINTN(2, 0x000, "etc1"),
-};
-
-/*
- * Samsung pinctrl driver data for Exynos4415 SoC. Exynos4415 SoC includes
- * three gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos4415_pin_ctrl[] = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos4415_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos4415_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos4415_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos4415_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos4415_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos4415_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- },
-};
-
/* pin banks of exynos5250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -1063,6 +1153,7 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5250_pin_banks1,
@@ -1070,6 +1161,7 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5250_pin_banks2,
@@ -1084,6 +1176,7 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_audio_retention_data,
},
};
@@ -1310,6 +1403,30 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst =
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
+/* PMU pad retention groups registers for Exynos5420 (without audio) */
+static const u32 exynos5420_retention_regs[] = {
+ EXYNOS_PAD_RET_DRAM_OPTION,
+ EXYNOS_PAD_RET_JTAG_OPTION,
+ EXYNOS5420_PAD_RET_GPIO_OPTION,
+ EXYNOS5420_PAD_RET_UART_OPTION,
+ EXYNOS5420_PAD_RET_MMCA_OPTION,
+ EXYNOS5420_PAD_RET_MMCB_OPTION,
+ EXYNOS5420_PAD_RET_MMCC_OPTION,
+ EXYNOS5420_PAD_RET_HSI_OPTION,
+ EXYNOS_PAD_RET_EBIA_OPTION,
+ EXYNOS_PAD_RET_EBIB_OPTION,
+ EXYNOS5420_PAD_RET_SPI_OPTION,
+ EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION,
+};
+
+static const struct samsung_retention_data exynos5420_retention_data __initconst = {
+ .regs = exynos5420_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos5420_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
/*
* Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
* four gpio/pin-mux/pinconfig controllers.
@@ -1321,114 +1438,119 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5420_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5420_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos5420_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 4 data */
.pin_banks = exynos5420_pin_banks4,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos4_audio_retention_data,
},
};
/* pin banks of exynos5433 pin-controller - ALIVE */
-static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
- EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
- EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
- EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
- EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
- EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+ EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+ EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
};
/* pin banks of exynos5433 pin-controller - AUD */
-static const struct samsung_pin_bank_data exynos5433_pin_banks1[] = {
- EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
- EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
/* pin banks of exynos5433 pin-controller - CPIF */
-static const struct samsung_pin_bank_data exynos5433_pin_banks2[] = {
- EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
};
/* pin banks of exynos5433 pin-controller - eSE */
-static const struct samsung_pin_bank_data exynos5433_pin_banks3[] = {
- EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
};
/* pin banks of exynos5433 pin-controller - FINGER */
-static const struct samsung_pin_bank_data exynos5433_pin_banks4[] = {
- EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
};
/* pin banks of exynos5433 pin-controller - FSYS */
-static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = {
- EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
- EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+ EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+ EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
};
/* pin banks of exynos5433 pin-controller - IMEM */
-static const struct samsung_pin_bank_data exynos5433_pin_banks6[] = {
- EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
};
/* pin banks of exynos5433 pin-controller - NFC */
-static const struct samsung_pin_bank_data exynos5433_pin_banks7[] = {
- EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos5433 pin-controller - PERIC */
-static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = {
- EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
- EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
- EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
- EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
- EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
- EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+ EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+ EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+ EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+ EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+ EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+ EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
};
/* pin banks of exynos5433 pin-controller - TOUCH */
-static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = {
- EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
/*
* Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes
* ten gpio/pin-mux/pinconfig controllers.
*/
-const struct samsung_pin_ctrl exynos5433_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos5433_pin_banks0,
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 4c632812ccff..f17890aa6e25 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -489,10 +489,8 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
data = devm_kzalloc(dev, sizeof(*data)
+ nr_domains * sizeof(*data->domains), GFP_KERNEL);
- if (!data) {
- dev_err(dev, "failed to allocate handler data\n");
+ if (!data)
return -ENOMEM;
- }
data->drvdata = d;
bank = d->pin_banks;
@@ -715,10 +713,8 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(dev, "could not allocate memory for wkup eint data\n");
+ if (!data)
return -ENOMEM;
- }
data->drvdata = d;
for (i = 0; i < NUM_EINT0_IRQ; ++i) {
@@ -751,10 +747,8 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
ddata = devm_kzalloc(dev,
sizeof(*ddata) + nr_eints, GFP_KERNEL);
- if (!ddata) {
- dev_err(dev, "failed to allocate domain data\n");
+ if (!ddata)
return -ENOMEM;
- }
ddata->bank = bank;
bank->irq_domain = irq_domain_add_linear(bank->of_node,
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 41e62391c33c..f9ddba7decc1 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -27,8 +27,8 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/irqdomain.h>
+#include <linux/of_device.h>
#include <linux/spinlock.h>
-#include <linux/syscore_ops.h>
#include "../core.h"
#include "pinctrl-samsung.h"
@@ -48,9 +48,6 @@ static struct pin_config {
{ "samsung,pin-val", PINCFG_TYPE_DAT },
};
-/* Global list of devices (struct samsung_pinctrl_drv_data) */
-static LIST_HEAD(drvdata_list);
-
static unsigned int pin_base;
static int samsung_get_group_count(struct pinctrl_dev *pctldev)
@@ -93,10 +90,8 @@ static int reserve_map(struct device *dev, struct pinctrl_map **map,
return 0;
new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
- if (!new_map) {
- dev_err(dev, "krealloc(map) failed\n");
+ if (!new_map)
return -ENOMEM;
- }
memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
@@ -133,10 +128,8 @@ static int add_map_configs(struct device *dev, struct pinctrl_map **map,
dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
GFP_KERNEL);
- if (!dup_configs) {
- dev_err(dev, "kmemdup(configs) failed\n");
+ if (!dup_configs)
return -ENOMEM;
- }
(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
(*map)[*num_maps].data.configs.group_or_pin = group;
@@ -156,10 +149,8 @@ static int add_config(struct device *dev, unsigned long **configs,
new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
GFP_KERNEL);
- if (!new_configs) {
- dev_err(dev, "krealloc(configs) failed\n");
+ if (!new_configs)
return -ENOMEM;
- }
new_configs[old_num] = config;
@@ -356,7 +347,7 @@ static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata,
/* enable or disable a pinmux function */
static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group, bool enable)
+ unsigned group)
{
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_bank_type *type;
@@ -386,8 +377,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
data &= ~(mask << shift);
- if (enable)
- data |= func->val << shift;
+ data |= func->val << shift;
writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
spin_unlock_irqrestore(&bank->slock, flags);
@@ -398,7 +388,7 @@ static int samsung_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned selector,
unsigned group)
{
- samsung_pinmux_setup(pctldev, selector, group, true);
+ samsung_pinmux_setup(pctldev, selector, group);
return 0;
}
@@ -756,10 +746,8 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
functions = devm_kzalloc(dev, func_cnt * sizeof(*functions),
GFP_KERNEL);
- if (!functions) {
- dev_err(dev, "failed to allocate memory for function list\n");
- return ERR_PTR(-EINVAL);
- }
+ if (!functions)
+ return ERR_PTR(-ENOMEM);
func = functions;
/*
@@ -850,10 +838,8 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
drvdata->nr_pins, GFP_KERNEL);
- if (!pindesc) {
- dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
+ if (!pindesc)
return -ENOMEM;
- }
ctrldesc->pins = pindesc;
ctrldesc->npins = drvdata->nr_pins;
@@ -867,10 +853,8 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
*/
pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
drvdata->nr_pins, GFP_KERNEL);
- if (!pin_names) {
- dev_err(&pdev->dev, "mem alloc for pin names failed\n");
+ if (!pin_names)
return -ENOMEM;
- }
/* for each pin, the name of the pin is pin-bank name + pin number */
for (bank = 0; bank < drvdata->nr_banks; bank++) {
@@ -968,15 +952,12 @@ static int samsung_gpiolib_unregister(struct platform_device *pdev,
return 0;
}
-static const struct of_device_id samsung_pinctrl_dt_match[];
-
/* retrieve the soc specific data */
static const struct samsung_pin_ctrl *
samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
struct platform_device *pdev)
{
int id;
- const struct of_device_id *match;
struct device_node *node = pdev->dev.of_node;
struct device_node *np;
const struct samsung_pin_bank_data *bdata;
@@ -991,8 +972,8 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
dev_err(&pdev->dev, "failed to get alias id\n");
return ERR_PTR(-ENOENT);
}
- match = of_match_node(samsung_pinctrl_dt_match, node);
- ctrl = (struct samsung_pin_ctrl *)match->data + id;
+ ctrl = of_device_get_match_data(&pdev->dev);
+ ctrl += id;
d->suspend = ctrl->suspend;
d->resume = ctrl->resume;
@@ -1007,10 +988,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- virt_base[i] = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ virt_base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(virt_base[i]))
- return ERR_PTR(-EIO);
+ return ERR_CAST(virt_base[i]);
}
bank = d->pin_banks;
@@ -1075,6 +1055,13 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (res)
drvdata->irq = res->start;
+ if (ctrl->retention_data) {
+ drvdata->retention_ctrl = ctrl->retention_data->init(drvdata,
+ ctrl->retention_data);
+ if (IS_ERR(drvdata->retention_ctrl))
+ return PTR_ERR(drvdata->retention_ctrl);
+ }
+
ret = samsung_gpiolib_register(pdev, drvdata);
if (ret)
return ret;
@@ -1092,22 +1079,17 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
- /* Add to the global list */
- list_add_tail(&drvdata->node, &drvdata_list);
-
return 0;
}
-#ifdef CONFIG_PM
-
/**
- * samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device
+ * samsung_pinctrl_suspend - save pinctrl state for suspend
*
* Save data for all banks handled by this device.
*/
-static void samsung_pinctrl_suspend_dev(
- struct samsung_pinctrl_drv_data *drvdata)
+static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
{
+ struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
int i;
for (i = 0; i < drvdata->nr_banks; i++) {
@@ -1141,18 +1123,23 @@ static void samsung_pinctrl_suspend_dev(
if (drvdata->suspend)
drvdata->suspend(drvdata);
+ if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable)
+ drvdata->retention_ctrl->enable(drvdata);
+
+ return 0;
}
/**
- * samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device
+ * samsung_pinctrl_resume - restore pinctrl state from suspend
*
* Restore one of the banks that was saved during suspend.
*
* We don't bother doing anything complicated to avoid glitching lines since
* we're called before pad retention is turned off.
*/
-static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
+static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
{
+ struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
int i;
if (drvdata->resume)
@@ -1188,48 +1175,13 @@ static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
if (widths[type])
writel(bank->pm_save[type], reg + offs[type]);
}
-}
-
-/**
- * samsung_pinctrl_suspend - save pinctrl state for suspend
- *
- * Save data for all banks across all devices.
- */
-static int samsung_pinctrl_suspend(void)
-{
- struct samsung_pinctrl_drv_data *drvdata;
- list_for_each_entry(drvdata, &drvdata_list, node) {
- samsung_pinctrl_suspend_dev(drvdata);
- }
+ if (drvdata->retention_ctrl && drvdata->retention_ctrl->disable)
+ drvdata->retention_ctrl->disable(drvdata);
return 0;
}
-/**
- * samsung_pinctrl_resume - restore pinctrl state for suspend
- *
- * Restore data for all banks across all devices.
- */
-static void samsung_pinctrl_resume(void)
-{
- struct samsung_pinctrl_drv_data *drvdata;
-
- list_for_each_entry_reverse(drvdata, &drvdata_list, node) {
- samsung_pinctrl_resume_dev(drvdata);
- }
-}
-
-#else
-#define samsung_pinctrl_suspend NULL
-#define samsung_pinctrl_resume NULL
-#endif
-
-static struct syscore_ops samsung_pinctrl_syscore_ops = {
- .suspend = samsung_pinctrl_suspend,
- .resume = samsung_pinctrl_resume,
-};
-
static const struct of_device_id samsung_pinctrl_dt_match[] = {
#ifdef CONFIG_PINCTRL_EXYNOS
{ .compatible = "samsung,exynos3250-pinctrl",
@@ -1238,8 +1190,6 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos4210_pin_ctrl },
{ .compatible = "samsung,exynos4x12-pinctrl",
.data = (void *)exynos4x12_pin_ctrl },
- { .compatible = "samsung,exynos4415-pinctrl",
- .data = (void *)exynos4415_pin_ctrl },
{ .compatible = "samsung,exynos5250-pinctrl",
.data = (void *)exynos5250_pin_ctrl },
{ .compatible = "samsung,exynos5260-pinctrl",
@@ -1273,25 +1223,23 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, samsung_pinctrl_dt_match);
+static const struct dev_pm_ops samsung_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(samsung_pinctrl_suspend,
+ samsung_pinctrl_resume)
+};
+
static struct platform_driver samsung_pinctrl_driver = {
.probe = samsung_pinctrl_probe,
.driver = {
.name = "samsung-pinctrl",
.of_match_table = samsung_pinctrl_dt_match,
.suppress_bind_attrs = true,
+ .pm = &samsung_pinctrl_pm_ops,
},
};
static int __init samsung_pinctrl_drv_register(void)
{
- /*
- * Register syscore ops for save/restore of registers across suspend.
- * It's important to ensure that this driver is running at an earlier
- * initcall level than any arch-specific init calls that install syscore
- * ops that turn off pad retention (like exynos_pm_resume).
- */
- register_syscore_ops(&samsung_pinctrl_syscore_ops);
-
return platform_driver_register(&samsung_pinctrl_driver);
}
postcore_initcall(samsung_pinctrl_drv_register);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 043cb6c11180..515a61035e54 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -185,10 +185,48 @@ struct samsung_pin_bank {
};
/**
+ * struct samsung_retention_data: runtime pin-bank retention control data.
+ * @regs: array of PMU registers to control pad retention.
+ * @nr_regs: number of registers in @regs array.
+ * @value: value to store to registers to turn off retention.
+ * @refcnt: atomic counter if retention control affects more than one bank.
+ * @priv: retention control code private data
+ * @enable: platform specific callback to enter retention mode.
+ * @disable: platform specific callback to exit retention mode.
+ **/
+struct samsung_retention_ctrl {
+ const u32 *regs;
+ int nr_regs;
+ u32 value;
+ atomic_t *refcnt;
+ void *priv;
+ void (*enable)(struct samsung_pinctrl_drv_data *);
+ void (*disable)(struct samsung_pinctrl_drv_data *);
+};
+
+/**
+ * struct samsung_retention_data: represent a pin-bank retention control data.
+ * @regs: array of PMU registers to control pad retention.
+ * @nr_regs: number of registers in @regs array.
+ * @value: value to store to registers to turn off retention.
+ * @refcnt: atomic counter if retention control affects more than one bank.
+ * @init: platform specific callback to initialize retention control.
+ **/
+struct samsung_retention_data {
+ const u32 *regs;
+ int nr_regs;
+ u32 value;
+ atomic_t *refcnt;
+ struct samsung_retention_ctrl *(*init)(struct samsung_pinctrl_drv_data *,
+ const struct samsung_retention_data *);
+};
+
+/**
* struct samsung_pin_ctrl: represent a pin controller.
* @pin_banks: list of pin banks included in this controller.
* @nr_banks: number of pin banks.
* @nr_ext_resources: number of the extra base address for pin banks.
+ * @retention_data: configuration data for retention control.
* @eint_gpio_init: platform specific callback to setup the external gpio
* interrupts for the controller.
* @eint_wkup_init: platform specific callback to setup the external wakeup
@@ -198,6 +236,7 @@ struct samsung_pin_ctrl {
const struct samsung_pin_bank_data *pin_banks;
u32 nr_banks;
int nr_ext_resources;
+ const struct samsung_retention_data *retention_data;
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
@@ -219,6 +258,7 @@ struct samsung_pin_ctrl {
* @nr_function: number of such pin functions.
* @pin_base: starting system wide pin number.
* @nr_pins: number of pins supported by the controller.
+ * @retention_ctrl: retention control runtime data.
*/
struct samsung_pinctrl_drv_data {
struct list_head node;
@@ -238,6 +278,8 @@ struct samsung_pinctrl_drv_data {
unsigned int pin_base;
unsigned int nr_pins;
+ struct samsung_retention_ctrl *retention_ctrl;
+
void (*suspend)(struct samsung_pinctrl_drv_data *);
void (*resume)(struct samsung_pinctrl_drv_data *);
};
@@ -273,7 +315,6 @@ struct samsung_pmx_func {
extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
-extern const struct samsung_pin_ctrl exynos4415_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[];
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 7ca37c3019ab..841cecdca7ea 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1691,6 +1691,72 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - ADI -------------------------------------------------------------------- */
+static const unsigned int adi_common_pins[] = {
+ /* ADIDATA, ADICS/SAMP, ADICLK */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+static const unsigned int adi_common_mux[] = {
+ /* ADIDATA, ADICS/SAMP, ADICLK */
+ ADIDATA_MARK, ADICS_SAMP_MARK, ADICLK_MARK,
+};
+static const unsigned int adi_chsel0_pins[] = {
+ /* ADICHS 0 */
+ RCAR_GP_PIN(6, 27),
+};
+static const unsigned int adi_chsel0_mux[] = {
+ /* ADICHS 0 */
+ ADICHS0_MARK,
+};
+static const unsigned int adi_chsel1_pins[] = {
+ /* ADICHS 1 */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int adi_chsel1_mux[] = {
+ /* ADICHS 1 */
+ ADICHS1_MARK,
+};
+static const unsigned int adi_chsel2_pins[] = {
+ /* ADICHS 2 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int adi_chsel2_mux[] = {
+ /* ADICHS 2 */
+ ADICHS2_MARK,
+};
+static const unsigned int adi_common_b_pins[] = {
+ /* ADIDATA B, ADICS/SAMP B, ADICLK B */
+ RCAR_GP_PIN(5, 25), RCAR_GP_PIN(5, 26), RCAR_GP_PIN(5, 27),
+};
+static const unsigned int adi_common_b_mux[] = {
+ /* ADIDATA B, ADICS/SAMP B, ADICLK B */
+ ADIDATA_B_MARK, ADICS_SAMP_B_MARK, ADICLK_B_MARK,
+};
+static const unsigned int adi_chsel0_b_pins[] = {
+ /* ADICHS B 0 */
+ RCAR_GP_PIN(5, 28),
+};
+static const unsigned int adi_chsel0_b_mux[] = {
+ /* ADICHS B 0 */
+ ADICHS0_B_MARK,
+};
+static const unsigned int adi_chsel1_b_pins[] = {
+ /* ADICHS B 1 */
+ RCAR_GP_PIN(5, 29),
+};
+static const unsigned int adi_chsel1_b_mux[] = {
+ /* ADICHS B 1 */
+ ADICHS1_B_MARK,
+};
+static const unsigned int adi_chsel2_b_pins[] = {
+ /* ADICHS B 2 */
+ RCAR_GP_PIN(5, 30),
+};
+static const unsigned int adi_chsel2_b_mux[] = {
+ /* ADICHS B 2 */
+ ADICHS2_B_MARK,
+};
+
/* - Audio Clock ------------------------------------------------------------ */
static const unsigned int audio_clk_a_pins[] = {
/* CLK */
@@ -4343,6 +4409,14 @@ static const unsigned int vin2_clk_mux[] = {
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(adi_common),
+ SH_PFC_PIN_GROUP(adi_chsel0),
+ SH_PFC_PIN_GROUP(adi_chsel1),
+ SH_PFC_PIN_GROUP(adi_chsel2),
+ SH_PFC_PIN_GROUP(adi_common_b),
+ SH_PFC_PIN_GROUP(adi_chsel0_b),
+ SH_PFC_PIN_GROUP(adi_chsel1_b),
+ SH_PFC_PIN_GROUP(adi_chsel2_b),
SH_PFC_PIN_GROUP(audio_clk_a),
SH_PFC_PIN_GROUP(audio_clk_b),
SH_PFC_PIN_GROUP(audio_clk_b_b),
@@ -4687,6 +4761,17 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(vin2_clk),
};
+static const char * const adi_groups[] = {
+ "adi_common",
+ "adi_chsel0",
+ "adi_chsel1",
+ "adi_chsel2",
+ "adi_common_b",
+ "adi_chsel0_b",
+ "adi_chsel1_b",
+ "adi_chsel2_b",
+};
+
static const char * const audio_clk_groups[] = {
"audio_clk_a",
"audio_clk_b",
@@ -5192,6 +5277,7 @@ static const char * const vin2_groups[] = {
};
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(adi),
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(can0),
@@ -6455,6 +6541,7 @@ const struct sh_pfc_soc_info r8a7791_pinmux_info = {
#ifdef CONFIG_PINCTRL_PFC_R8A7793
const struct sh_pfc_soc_info r8a7793_pinmux_info = {
.name = "r8a77930_pfc",
+ .ops = &r8a7791_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 135ed5cbeb44..504d0c3d7f74 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -538,7 +538,7 @@ MOD_SEL0_2_1 MOD_SEL1_2 \
FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
FM(CLKOUT) FM(PRESETOUT) \
FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) FM(DU_DOTCLKIN3) \
- FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF)
+ FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
enum {
PINMUX_RESERVED = 0,
@@ -1461,46 +1461,50 @@ static const struct sh_pfc_pin pinmux_pins[] = {
* number for each pin. To this end use the pin layout from
* R-Car H3SiP to calculate a unique number for each pin.
*/
- SH_PFC_PIN_NAMED_CFG('A', 8, AVB_TX_CTL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 9, AVB_MDIO, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 12, AVB_TXCREFCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 13, AVB_RD0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 14, AVB_RD2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 16, AVB_RX_CTL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 17, AVB_TD2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 18, AVB_TD0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('A', 19, AVB_TXC, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('B', 13, AVB_RD1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('B', 14, AVB_RD3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('B', 17, AVB_TD3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('F', 1, CLKOUT, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('V', 6, RPC_WP#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('V', 7, RPC_RESET#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('W', 3, QSPI0_SPCLK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('Y', 3, QSPI0_SSL, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('Y', 6, QSPI0_IO2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG('Y', 7, RPC_INT#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 4, QSPI0_MISO_IO1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 6, QSPI0_IO3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 3, QSPI1_IO3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 5, QSPI0_MOSI_IO0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 7, QSPI1_MOSI_IO0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 38, FSCLKST#, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 4, QSPI1_IO2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 5, QSPI1_MISO_IO1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 7, DU_DOTCLKIN0, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 8, DU_DOTCLKIN1, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 7, DU_DOTCLKIN2, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN3, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 30, TMS, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG('A', 8, AVB_TX_CTL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 9, AVB_MDIO, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 12, AVB_TXCREFCLK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 13, AVB_RD0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 14, AVB_RD2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 16, AVB_RX_CTL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 17, AVB_TD2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 18, AVB_TD0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 19, AVB_TXC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 13, AVB_RD1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 14, AVB_RD3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 17, AVB_TD3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('F', 1, CLKOUT, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 6, RPC_WP#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 7, RPC_RESET#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('W', 3, QSPI0_SPCLK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('Y', 3, QSPI0_SSL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('Y', 6, QSPI0_IO2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('Y', 7, RPC_INT#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 4, QSPI0_MISO_IO1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 6, QSPI0_IO3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 3, QSPI1_IO3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 5, QSPI0_MOSI_IO0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 7, QSPI1_MOSI_IO0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 38, FSCLKST#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 39, EXTALR, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 4, QSPI1_IO2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 5, QSPI1_MISO_IO1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 7, DU_DOTCLKIN0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 8, DU_DOTCLKIN1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 7, DU_DOTCLKIN2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 26, TRST#, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 29, TDI, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 30, TMS, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 27, TCK, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 28, TDO, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
/* - AUDIO CLOCK ------------------------------------------------------------ */
@@ -5415,167 +5419,211 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
#define PU6 0x18
static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+ { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
+ { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
+ { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
+ { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
+ { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
+ { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
+ { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
+ { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
+ { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
+ { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
+ { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
+ { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
+ { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
+ { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
+ { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
+ { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
+ { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
+ { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
+ { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
+ { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
+ { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
+ { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
+ { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
+ { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
+ { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
+ { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
+ { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
+ { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
+ { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
+ { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
+ { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
+ { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
+
+ { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
+ { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
+ { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
+ { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
+ { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
+ { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
+ { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
+ { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
+ { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
+ { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
+ { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
+ { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
+ { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
+ { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
+ { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
+ { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
+ { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
+ { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
+ { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
+ { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
+ { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
+ { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
+ { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
+ { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
+ { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
+ { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
+ { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
+ { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
+ { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
+ { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
+ { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
+
+ { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
+ { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
+ { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
+ { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
+ { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
+ { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
+ { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
+ { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
+ { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
+ { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
+ { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
+ { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
+ { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
+ { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
+ { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
+ { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
+ { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
+ { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
+ { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
+ { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
+ { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
+ { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
+ { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
+ { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
+ { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
+ { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
+ { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
+ { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
+ { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
+ { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
+ { PIN_NUMBER('F', 1), PU2, 0 }, /* CLKOUT */
+
+ { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
+ { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
+ { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
+ { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
+ { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
+ { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
+ { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
+ { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
+ { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
+ { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
+ { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
+ { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
+ { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
+ { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
+ { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
+ { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
+ { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
+ { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
+ { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
+ /* bit 8 n/a */
+ { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
+ { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
+ { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
+ { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
+ { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
+ { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST# */
+ { PIN_A_NUMBER('R', 8), PU3, 1 }, /* DU_DOTCLKIN3 */
+ { PIN_A_NUMBER('R', 7), PU3, 0 }, /* DU_DOTCLKIN2 */
+
+ { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
+ { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
+ { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
+ { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
+ { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
+ { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
+ { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
+ { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
+ { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
+ { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
+ { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
+ { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
+ { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
+ { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
+ { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
+ { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
+ { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
+ { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
+ { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
+ { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
+ { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
+ { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
+ { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
+ { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
+ { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
+ { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
+ { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
+ { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
+ { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
+ { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
+
+ { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
+ { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
+ { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
+ { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
+ { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
+ { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
+ { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
+ { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
+ { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
+ { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
+ { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
+ { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
+ { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
+ { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
+ { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
+ { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
+ { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
+ { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
+ { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
+ { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
+ { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
+ { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
+ { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
+ { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
+ { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
+ { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
+ { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
+ { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
+
+ { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */
+ { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */
+ { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
+ { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
+ { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
+ { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
+ { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
};
static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 7e16545a2c3c..b0362ae707e2 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -19,19 +19,23 @@
#include "core.h"
#include "sh_pfc.h"
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | \
+ SH_PFC_PIN_CFG_PULL_UP | \
+ SH_PFC_PIN_CFG_PULL_DOWN)
+
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_16(0, fn, sfx), \
- PORT_GP_29(1, fn, sfx), \
- PORT_GP_15(2, fn, sfx), \
- PORT_GP_CFG_12(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_1(3, 12, fn, sfx), \
- PORT_GP_1(3, 13, fn, sfx), \
- PORT_GP_1(3, 14, fn, sfx), \
- PORT_GP_1(3, 15, fn, sfx), \
- PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_26(5, fn, sfx), \
- PORT_GP_32(6, fn, sfx), \
- PORT_GP_4(7, fn, sfx)
+ PORT_GP_CFG_16(0, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_29(1, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_15(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_26(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_32(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_4(7, fn, sfx, CFG_FLAGS)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
@@ -541,6 +545,23 @@ MOD_SEL0_2 MOD_SEL1_2 \
MOD_SEL1_1 \
MOD_SEL1_0 MOD_SEL2_0
+/*
+ * These pins are not able to be muxed but have other properties
+ * that can be set, such as drive-strength or pull-up/pull-down enable.
+ */
+#define PINMUX_STATIC \
+ FM(QSPI0_SPCLK) FM(QSPI0_SSL) FM(QSPI0_MOSI_IO0) FM(QSPI0_MISO_IO1) \
+ FM(QSPI0_IO2) FM(QSPI0_IO3) \
+ FM(QSPI1_SPCLK) FM(QSPI1_SSL) FM(QSPI1_MOSI_IO0) FM(QSPI1_MISO_IO1) \
+ FM(QSPI1_IO2) FM(QSPI1_IO3) \
+ FM(RPC_INT) FM(RPC_WP) FM(RPC_RESET) \
+ FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) FM(AVB_TD3) \
+ FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \
+ FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
+ FM(PRESETOUT) \
+ FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) \
+ FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
+
enum {
PINMUX_RESERVED = 0,
@@ -565,6 +586,7 @@ enum {
PINMUX_GPSR
PINMUX_IPSR
PINMUX_MOD_SELS
+ PINMUX_STATIC
PINMUX_MARK_END,
#undef F_
#undef FM
@@ -1484,10 +1506,80 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_NOGP(0, I2C_SEL_0_1),
PINMUX_IPSR_NOGP(0, I2C_SEL_3_1),
PINMUX_IPSR_NOGP(0, I2C_SEL_5_1),
+
+/*
+ * Static pins can not be muxed between different functions but
+ * still needs a mark entry in the pinmux list. Add each static
+ * pin to the list without an associated function. The sh-pfc
+ * core will do the right thing and skip trying to mux then pin
+ * while still applying configuration to it
+ */
+#define FM(x) PINMUX_DATA(x##_MARK, 0),
+ PINMUX_STATIC
+#undef FM
};
+/*
+ * R8A7796 has 8 banks with 32 GPIOs in each => 256 GPIOs.
+ * Physical layout rows: A - AW, cols: 1 - 39.
+ */
+#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
+#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
+#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
+
+ /*
+ * Pins not associated with a GPIO port.
+ *
+ * The pin positions are different between different r8a7796
+ * packages, all that is needed for the pfc driver is a unique
+ * number for each pin. To this end use the pin layout from
+ * R-Car M3SiP to calculate a unique number for each pin.
+ */
+ SH_PFC_PIN_NAMED_CFG('A', 8, AVB_TX_CTL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 9, AVB_MDIO, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 12, AVB_TXCREFCLK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 13, AVB_RD0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 14, AVB_RD2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 16, AVB_RX_CTL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 17, AVB_TD2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 18, AVB_TD0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('A', 19, AVB_TXC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 13, AVB_RD1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 14, AVB_RD3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 17, AVB_TD3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 6, RPC_WP#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('V', 7, RPC_RESET#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('W', 3, QSPI0_SPCLK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('Y', 3, QSPI0_SSL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('Y', 6, QSPI0_IO2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('Y', 7, RPC_INT#, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 4, QSPI0_MISO_IO1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('B'), 6, QSPI0_IO3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 3, QSPI1_IO3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 5, QSPI0_MOSI_IO0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('C'), 7, QSPI1_MOSI_IO0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 38, FSCLKST, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 39, EXTALR, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 4, QSPI1_IO2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 5, QSPI1_MISO_IO1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 7, DU_DOTCLKIN0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 8, DU_DOTCLKIN1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 26, TRST#, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 29, TDI, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 30, TMS, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 27, TCK, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 28, TDO, SH_PFC_PIN_CFG_DRIVE_STRENGTH),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
/* - EtherAVB --------------------------------------------------------------- */
@@ -1555,6 +1647,61 @@ static const unsigned int avb_avtp_capture_b_mux[] = {
AVB_AVTP_CAPTURE_B_MARK,
};
+/* - CAN ------------------------------------------------------------------ */
+static const unsigned int can0_data_a_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int can0_data_a_mux[] = {
+ CAN0_TX_A_MARK, CAN0_RX_A_MARK,
+};
+static const unsigned int can0_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int can0_data_b_mux[] = {
+ CAN0_TX_B_MARK, CAN0_RX_B_MARK,
+};
+static const unsigned int can1_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 26),
+};
+static const unsigned int can1_data_mux[] = {
+ CAN1_TX_MARK, CAN1_RX_MARK,
+};
+
+/* - CAN Clock -------------------------------------------------------------- */
+static const unsigned int can_clk_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - CAN FD --------------------------------------------------------------- */
+static const unsigned int canfd0_data_a_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int canfd0_data_a_mux[] = {
+ CANFD0_TX_A_MARK, CANFD0_RX_A_MARK,
+};
+static const unsigned int canfd0_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd0_data_b_mux[] = {
+ CANFD0_TX_B_MARK, CANFD0_RX_B_MARK,
+};
+static const unsigned int canfd1_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 26),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
/* - DRIF0 --------------------------------------------------------------- */
static const unsigned int drif0_ctrl_a_pins[] = {
/* CLK, SYNC */
@@ -1851,6 +1998,213 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int hscif2_data_a_mux[] = {
+ HRX2_A_MARK, HTX2_A_MARK,
+};
+static const unsigned int hscif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int hscif2_clk_a_mux[] = {
+ HSCK2_A_MARK,
+};
+static const unsigned int hscif2_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int hscif2_ctrl_a_mux[] = {
+ HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int hscif2_data_b_mux[] = {
+ HRX2_B_MARK, HTX2_B_MARK,
+};
+static const unsigned int hscif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif2_clk_b_mux[] = {
+ HSCK2_B_MARK,
+};
+static const unsigned int hscif2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int hscif2_ctrl_b_mux[] = {
+ HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+
+static const unsigned int hscif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+static const unsigned int hscif2_data_c_mux[] = {
+ HRX2_C_MARK, HTX2_C_MARK,
+};
+static const unsigned int hscif2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 24),
+};
+static const unsigned int hscif2_clk_c_mux[] = {
+ HSCK2_C_MARK,
+};
+static const unsigned int hscif2_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int hscif2_ctrl_c_mux[] = {
+ HRTS2_N_C_MARK, HCTS2_N_C_MARK,
+};
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+static const unsigned int hscif3_data_c_mux[] = {
+ HRX3_C_MARK, HTX3_C_MARK,
+};
+static const unsigned int hscif3_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int hscif3_data_d_mux[] = {
+ HRX3_D_MARK, HTX3_D_MARK,
+};
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif4_data_a_mux[] = {
+ HRX4_A_MARK, HTX4_A_MARK,
+};
+static const unsigned int hscif4_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_clk_mux[] = {
+ HSCK4_MARK,
+};
+static const unsigned int hscif4_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int hscif4_ctrl_mux[] = {
+ HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_data_b_mux[] = {
+ HRX4_B_MARK, HTX4_B_MARK,
+};
+
/* - I2C -------------------------------------------------------------------- */
static const unsigned int i2c1_a_pins[] = {
/* SDA, SCL */
@@ -1902,6 +2256,705 @@ static const unsigned int i2c6_c_mux[] = {
SDA6_C_MARK, SCL6_C_MARK,
};
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 22),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 8),
+};
+static const unsigned int msiof1_clk_a_mux[] = {
+ MSIOF1_SCK_A_MARK,
+};
+static const unsigned int msiof1_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(6, 9),
+};
+static const unsigned int msiof1_sync_a_mux[] = {
+ MSIOF1_SYNC_A_MARK,
+};
+static const unsigned int msiof1_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int msiof1_ss1_a_mux[] = {
+ MSIOF1_SS1_A_MARK,
+};
+static const unsigned int msiof1_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 6),
+};
+static const unsigned int msiof1_ss2_a_mux[] = {
+ MSIOF1_SS2_A_MARK,
+};
+static const unsigned int msiof1_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int msiof1_txd_a_mux[] = {
+ MSIOF1_TXD_A_MARK,
+};
+static const unsigned int msiof1_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int msiof1_rxd_a_mux[] = {
+ MSIOF1_RXD_A_MARK,
+};
+static const unsigned int msiof1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 9),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+ MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+ MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+ MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+ MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int msiof1_txd_b_mux[] = {
+ MSIOF1_TXD_B_MARK,
+};
+static const unsigned int msiof1_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int msiof1_rxd_b_mux[] = {
+ MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 17),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+ MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(6, 18),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+ MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int msiof1_ss1_c_mux[] = {
+ MSIOF1_SS1_C_MARK,
+};
+static const unsigned int msiof1_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 27),
+};
+static const unsigned int msiof1_ss2_c_mux[] = {
+ MSIOF1_SS2_C_MARK,
+};
+static const unsigned int msiof1_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int msiof1_txd_c_mux[] = {
+ MSIOF1_TXD_C_MARK,
+};
+static const unsigned int msiof1_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int msiof1_rxd_c_mux[] = {
+ MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+ MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+ MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+ MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof1_ss2_d_mux[] = {
+ MSIOF1_SS2_D_MARK,
+};
+static const unsigned int msiof1_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int msiof1_txd_d_mux[] = {
+ MSIOF1_TXD_D_MARK,
+};
+static const unsigned int msiof1_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int msiof1_rxd_d_mux[] = {
+ MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+ MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+ MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_ss1_e_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int msiof1_ss1_e_mux[] = {
+ MSIOF1_SS1_E_MARK,
+};
+static const unsigned int msiof1_ss2_e_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 5),
+};
+static const unsigned int msiof1_ss2_e_mux[] = {
+ MSIOF1_SS2_E_MARK,
+};
+static const unsigned int msiof1_txd_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 3),
+};
+static const unsigned int msiof1_txd_e_mux[] = {
+ MSIOF1_TXD_E_MARK,
+};
+static const unsigned int msiof1_rxd_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 2),
+};
+static const unsigned int msiof1_rxd_e_mux[] = {
+ MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_clk_f_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 23),
+};
+static const unsigned int msiof1_clk_f_mux[] = {
+ MSIOF1_SCK_F_MARK,
+};
+static const unsigned int msiof1_sync_f_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 24),
+};
+static const unsigned int msiof1_sync_f_mux[] = {
+ MSIOF1_SYNC_F_MARK,
+};
+static const unsigned int msiof1_ss1_f_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int msiof1_ss1_f_mux[] = {
+ MSIOF1_SS1_F_MARK,
+};
+static const unsigned int msiof1_ss2_f_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int msiof1_ss2_f_mux[] = {
+ MSIOF1_SS2_F_MARK,
+};
+static const unsigned int msiof1_txd_f_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 0),
+};
+static const unsigned int msiof1_txd_f_mux[] = {
+ MSIOF1_TXD_F_MARK,
+};
+static const unsigned int msiof1_rxd_f_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof1_rxd_f_mux[] = {
+ MSIOF1_RXD_F_MARK,
+};
+static const unsigned int msiof1_clk_g_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 6),
+};
+static const unsigned int msiof1_clk_g_mux[] = {
+ MSIOF1_SCK_G_MARK,
+};
+static const unsigned int msiof1_sync_g_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 7),
+};
+static const unsigned int msiof1_sync_g_mux[] = {
+ MSIOF1_SYNC_G_MARK,
+};
+static const unsigned int msiof1_ss1_g_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 10),
+};
+static const unsigned int msiof1_ss1_g_mux[] = {
+ MSIOF1_SS1_G_MARK,
+};
+static const unsigned int msiof1_ss2_g_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int msiof1_ss2_g_mux[] = {
+ MSIOF1_SS2_G_MARK,
+};
+static const unsigned int msiof1_txd_g_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof1_txd_g_mux[] = {
+ MSIOF1_TXD_G_MARK,
+};
+static const unsigned int msiof1_rxd_g_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof1_rxd_g_mux[] = {
+ MSIOF1_RXD_G_MARK,
+};
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof2_clk_a_mux[] = {
+ MSIOF2_SCK_A_MARK,
+};
+static const unsigned int msiof2_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof2_sync_a_mux[] = {
+ MSIOF2_SYNC_A_MARK,
+};
+static const unsigned int msiof2_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof2_ss1_a_mux[] = {
+ MSIOF2_SS1_A_MARK,
+};
+static const unsigned int msiof2_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof2_ss2_a_mux[] = {
+ MSIOF2_SS2_A_MARK,
+};
+static const unsigned int msiof2_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof2_txd_a_mux[] = {
+ MSIOF2_TXD_A_MARK,
+};
+static const unsigned int msiof2_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof2_rxd_a_mux[] = {
+ MSIOF2_RXD_A_MARK,
+};
+static const unsigned int msiof2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+ MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+ MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+ MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+ MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof2_txd_b_mux[] = {
+ MSIOF2_TXD_B_MARK,
+};
+static const unsigned int msiof2_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof2_rxd_b_mux[] = {
+ MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+ MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 11),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+ MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int msiof2_ss1_c_mux[] = {
+ MSIOF2_SS1_C_MARK,
+};
+static const unsigned int msiof2_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int msiof2_ss2_c_mux[] = {
+ MSIOF2_SS2_C_MARK,
+};
+static const unsigned int msiof2_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_txd_c_mux[] = {
+ MSIOF2_TXD_C_MARK,
+};
+static const unsigned int msiof2_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int msiof2_rxd_c_mux[] = {
+ MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+ MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+ MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+ MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+ MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof2_txd_d_mux[] = {
+ MSIOF2_TXD_D_MARK,
+};
+static const unsigned int msiof2_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof2_rxd_d_mux[] = {
+ MSIOF2_RXD_D_MARK,
+};
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_a_mux[] = {
+ MSIOF3_SCK_A_MARK,
+};
+static const unsigned int msiof3_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_a_mux[] = {
+ MSIOF3_SYNC_A_MARK,
+};
+static const unsigned int msiof3_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof3_ss1_a_mux[] = {
+ MSIOF3_SS1_A_MARK,
+};
+static const unsigned int msiof3_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof3_ss2_a_mux[] = {
+ MSIOF3_SS2_A_MARK,
+};
+static const unsigned int msiof3_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_txd_a_mux[] = {
+ MSIOF3_TXD_A_MARK,
+};
+static const unsigned int msiof3_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rxd_a_mux[] = {
+ MSIOF3_RXD_A_MARK,
+};
+static const unsigned int msiof3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+ MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+ MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof3_ss1_b_mux[] = {
+ MSIOF3_SS1_B_MARK,
+};
+static const unsigned int msiof3_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof3_ss2_b_mux[] = {
+ MSIOF3_SS2_B_MARK,
+};
+static const unsigned int msiof3_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof3_txd_b_mux[] = {
+ MSIOF3_TXD_B_MARK,
+};
+static const unsigned int msiof3_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof3_rxd_b_mux[] = {
+ MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof3_clk_c_mux[] = {
+ MSIOF3_SCK_C_MARK,
+};
+static const unsigned int msiof3_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int msiof3_sync_c_mux[] = {
+ MSIOF3_SYNC_C_MARK,
+};
+static const unsigned int msiof3_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int msiof3_txd_c_mux[] = {
+ MSIOF3_TXD_C_MARK,
+};
+static const unsigned int msiof3_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int msiof3_rxd_c_mux[] = {
+ MSIOF3_RXD_C_MARK,
+};
+static const unsigned int msiof3_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int msiof3_clk_d_mux[] = {
+ MSIOF3_SCK_D_MARK,
+};
+static const unsigned int msiof3_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof3_sync_d_mux[] = {
+ MSIOF3_SYNC_D_MARK,
+};
+static const unsigned int msiof3_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof3_ss1_d_mux[] = {
+ MSIOF3_SS1_D_MARK,
+};
+static const unsigned int msiof3_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof3_txd_d_mux[] = {
+ MSIOF3_TXD_D_MARK,
+};
+static const unsigned int msiof3_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof3_rxd_d_mux[] = {
+ MSIOF3_RXD_D_MARK,
+};
+
+static const unsigned int msiof3_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int msiof3_clk_e_mux[] = {
+ MSIOF3_SCK_E_MARK,
+};
+static const unsigned int msiof3_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int msiof3_sync_e_mux[] = {
+ MSIOF3_SYNC_E_MARK,
+};
+static const unsigned int msiof3_ss1_e_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int msiof3_ss1_e_mux[] = {
+ MSIOF3_SS1_E_MARK,
+};
+static const unsigned int msiof3_ss2_e_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int msiof3_ss2_e_mux[] = {
+ MSIOF3_SS1_E_MARK,
+};
+static const unsigned int msiof3_txd_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof3_txd_e_mux[] = {
+ MSIOF3_TXD_E_MARK,
+};
+static const unsigned int msiof3_rxd_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof3_rxd_e_mux[] = {
+ MSIOF3_RXD_E_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -2333,6 +3386,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
SH_PFC_PIN_GROUP(drif0_ctrl_a),
SH_PFC_PIN_GROUP(drif0_data0_a),
SH_PFC_PIN_GROUP(drif0_data1_a),
@@ -2371,6 +3431,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
SH_PFC_PIN_GROUP(i2c1_a),
SH_PFC_PIN_GROUP(i2c1_b),
SH_PFC_PIN_GROUP(i2c2_a),
@@ -2378,6 +3466,105 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c6_a),
SH_PFC_PIN_GROUP(i2c6_b),
SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -2447,6 +3634,28 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_b",
};
+static const char * const can0_groups[] = {
+ "can0_data_a",
+ "can0_data_b",
+};
+
+static const char * const can1_groups[] = {
+ "can1_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data_a",
+ "canfd0_data_b",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
static const char * const drif0_groups[] = {
"drif0_ctrl_a",
"drif0_data0_a",
@@ -2500,6 +3709,49 @@ static const char * const du_groups[] = {
"du_disp",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data_a",
+ "hscif2_clk_a",
+ "hscif2_ctrl_a",
+ "hscif2_data_b",
+ "hscif2_clk_b",
+ "hscif2_ctrl_b",
+ "hscif2_data_c",
+ "hscif2_clk_c",
+ "hscif2_ctrl_c",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_b",
+ "hscif3_data_c",
+ "hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+ "hscif4_data_a",
+ "hscif4_clk",
+ "hscif4_ctrl",
+ "hscif4_data_b",
+};
+
static const char * const i2c1_groups[] = {
"i2c1_a",
"i2c1_b",
@@ -2516,6 +3768,117 @@ static const char * const i2c6_groups[] = {
"i2c6_c",
};
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk_a",
+ "msiof1_sync_a",
+ "msiof1_ss1_a",
+ "msiof1_ss2_a",
+ "msiof1_txd_a",
+ "msiof1_rxd_a",
+ "msiof1_clk_b",
+ "msiof1_sync_b",
+ "msiof1_ss1_b",
+ "msiof1_ss2_b",
+ "msiof1_txd_b",
+ "msiof1_rxd_b",
+ "msiof1_clk_c",
+ "msiof1_sync_c",
+ "msiof1_ss1_c",
+ "msiof1_ss2_c",
+ "msiof1_txd_c",
+ "msiof1_rxd_c",
+ "msiof1_clk_d",
+ "msiof1_sync_d",
+ "msiof1_ss1_d",
+ "msiof1_ss2_d",
+ "msiof1_txd_d",
+ "msiof1_rxd_d",
+ "msiof1_clk_e",
+ "msiof1_sync_e",
+ "msiof1_ss1_e",
+ "msiof1_ss2_e",
+ "msiof1_txd_e",
+ "msiof1_rxd_e",
+ "msiof1_clk_f",
+ "msiof1_sync_f",
+ "msiof1_ss1_f",
+ "msiof1_ss2_f",
+ "msiof1_txd_f",
+ "msiof1_rxd_f",
+ "msiof1_clk_g",
+ "msiof1_sync_g",
+ "msiof1_ss1_g",
+ "msiof1_ss2_g",
+ "msiof1_txd_g",
+ "msiof1_rxd_g",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk_a",
+ "msiof2_sync_a",
+ "msiof2_ss1_a",
+ "msiof2_ss2_a",
+ "msiof2_txd_a",
+ "msiof2_rxd_a",
+ "msiof2_clk_b",
+ "msiof2_sync_b",
+ "msiof2_ss1_b",
+ "msiof2_ss2_b",
+ "msiof2_txd_b",
+ "msiof2_rxd_b",
+ "msiof2_clk_c",
+ "msiof2_sync_c",
+ "msiof2_ss1_c",
+ "msiof2_ss2_c",
+ "msiof2_txd_c",
+ "msiof2_rxd_c",
+ "msiof2_clk_d",
+ "msiof2_sync_d",
+ "msiof2_ss1_d",
+ "msiof2_ss2_d",
+ "msiof2_txd_d",
+ "msiof2_rxd_d",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk_a",
+ "msiof3_sync_a",
+ "msiof3_ss1_a",
+ "msiof3_ss2_a",
+ "msiof3_txd_a",
+ "msiof3_rxd_a",
+ "msiof3_clk_b",
+ "msiof3_sync_b",
+ "msiof3_ss1_b",
+ "msiof3_ss2_b",
+ "msiof3_txd_b",
+ "msiof3_rxd_b",
+ "msiof3_clk_c",
+ "msiof3_sync_c",
+ "msiof3_txd_c",
+ "msiof3_rxd_c",
+ "msiof3_clk_d",
+ "msiof3_sync_d",
+ "msiof3_ss1_d",
+ "msiof3_txd_d",
+ "msiof3_rxd_d",
+ "msiof3_clk_e",
+ "msiof3_sync_e",
+ "msiof3_ss1_e",
+ "msiof3_ss2_e",
+ "msiof3_txd_e",
+ "msiof3_rxd_e",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -2606,14 +3969,28 @@ static const char * const sdhi3_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
SH_PFC_FUNCTION(drif0),
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -3187,6 +4564,254 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRVCTRL0", 0xe6060300) {
+ { PIN_NUMBER('W', 3), 28, 2 }, /* QSPI0_SPCLK */
+ { PIN_A_NUMBER('C', 5), 24, 2 }, /* QSPI0_MOSI_IO0 */
+ { PIN_A_NUMBER('B', 4), 20, 2 }, /* QSPI0_MISO_IO1 */
+ { PIN_NUMBER('Y', 6), 16, 2 }, /* QSPI0_IO2 */
+ { PIN_A_NUMBER('B', 6), 12, 2 }, /* QSPI0_IO3 */
+ { PIN_NUMBER('Y', 3), 8, 2 }, /* QSPI0_SSL */
+ { PIN_NUMBER('V', 3), 4, 2 }, /* QSPI1_SPCLK */
+ { PIN_A_NUMBER('C', 7), 0, 2 }, /* QSPI1_MOSI_IO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL1", 0xe6060304) {
+ { PIN_A_NUMBER('E', 5), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { PIN_A_NUMBER('E', 4), 24, 2 }, /* QSPI1_IO2 */
+ { PIN_A_NUMBER('C', 3), 20, 2 }, /* QSPI1_IO3 */
+ { PIN_NUMBER('V', 5), 16, 2 }, /* QSPI1_SSL */
+ { PIN_NUMBER('Y', 7), 12, 2 }, /* RPC_INT# */
+ { PIN_NUMBER('V', 6), 8, 2 }, /* RPC_WP# */
+ { PIN_NUMBER('V', 7), 4, 2 }, /* RPC_RESET# */
+ { PIN_NUMBER('A', 16), 0, 3 }, /* AVB_RX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL2", 0xe6060308) {
+ { PIN_NUMBER('B', 19), 28, 3 }, /* AVB_RXC */
+ { PIN_NUMBER('A', 13), 24, 3 }, /* AVB_RD0 */
+ { PIN_NUMBER('B', 13), 20, 3 }, /* AVB_RD1 */
+ { PIN_NUMBER('A', 14), 16, 3 }, /* AVB_RD2 */
+ { PIN_NUMBER('B', 14), 12, 3 }, /* AVB_RD3 */
+ { PIN_NUMBER('A', 8), 8, 3 }, /* AVB_TX_CTL */
+ { PIN_NUMBER('A', 19), 4, 3 }, /* AVB_TXC */
+ { PIN_NUMBER('A', 18), 0, 3 }, /* AVB_TD0 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
+ { PIN_NUMBER('B', 18), 28, 3 }, /* AVB_TD1 */
+ { PIN_NUMBER('A', 17), 24, 3 }, /* AVB_TD2 */
+ { PIN_NUMBER('B', 17), 20, 3 }, /* AVB_TD3 */
+ { PIN_NUMBER('A', 12), 16, 3 }, /* AVB_TXCREFCLK */
+ { PIN_NUMBER('A', 9), 12, 3 }, /* AVB_MDIO */
+ { RCAR_GP_PIN(2, 9), 8, 3 }, /* AVB_MDC */
+ { RCAR_GP_PIN(2, 10), 4, 3 }, /* AVB_MAGIC */
+ { RCAR_GP_PIN(2, 11), 0, 3 }, /* AVB_PHY_INT */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
+ { RCAR_GP_PIN(2, 12), 28, 3 }, /* AVB_LINK */
+ { RCAR_GP_PIN(2, 13), 24, 3 }, /* AVB_AVTP_MATCH */
+ { RCAR_GP_PIN(2, 14), 20, 3 }, /* AVB_AVTP_CAPTURE */
+ { RCAR_GP_PIN(2, 0), 16, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(2, 1), 12, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(2, 3), 4, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(2, 4), 0, 3 }, /* IRQ4 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL5", 0xe6060314) {
+ { RCAR_GP_PIN(2, 5), 28, 3 }, /* IRQ5 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* PWM0 */
+ { RCAR_GP_PIN(2, 7), 20, 3 }, /* PWM1 */
+ { RCAR_GP_PIN(2, 8), 16, 3 }, /* PWM2 */
+ { RCAR_GP_PIN(1, 0), 12, 3 }, /* A0 */
+ { RCAR_GP_PIN(1, 1), 8, 3 }, /* A1 */
+ { RCAR_GP_PIN(1, 2), 4, 3 }, /* A2 */
+ { RCAR_GP_PIN(1, 3), 0, 3 }, /* A3 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL6", 0xe6060318) {
+ { RCAR_GP_PIN(1, 4), 28, 3 }, /* A4 */
+ { RCAR_GP_PIN(1, 5), 24, 3 }, /* A5 */
+ { RCAR_GP_PIN(1, 6), 20, 3 }, /* A6 */
+ { RCAR_GP_PIN(1, 7), 16, 3 }, /* A7 */
+ { RCAR_GP_PIN(1, 8), 12, 3 }, /* A8 */
+ { RCAR_GP_PIN(1, 9), 8, 3 }, /* A9 */
+ { RCAR_GP_PIN(1, 10), 4, 3 }, /* A10 */
+ { RCAR_GP_PIN(1, 11), 0, 3 }, /* A11 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL7", 0xe606031c) {
+ { RCAR_GP_PIN(1, 12), 28, 3 }, /* A12 */
+ { RCAR_GP_PIN(1, 13), 24, 3 }, /* A13 */
+ { RCAR_GP_PIN(1, 14), 20, 3 }, /* A14 */
+ { RCAR_GP_PIN(1, 15), 16, 3 }, /* A15 */
+ { RCAR_GP_PIN(1, 16), 12, 3 }, /* A16 */
+ { RCAR_GP_PIN(1, 17), 8, 3 }, /* A17 */
+ { RCAR_GP_PIN(1, 18), 4, 3 }, /* A18 */
+ { RCAR_GP_PIN(1, 19), 0, 3 }, /* A19 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
+ { RCAR_GP_PIN(1, 28), 28, 3 }, /* CLKOUT */
+ { RCAR_GP_PIN(1, 20), 24, 3 }, /* CS0 */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* CS1_A26 */
+ { RCAR_GP_PIN(1, 22), 16, 3 }, /* BS */
+ { RCAR_GP_PIN(1, 23), 12, 3 }, /* RD */
+ { RCAR_GP_PIN(1, 24), 8, 3 }, /* RD_WR */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* WE0 */
+ { RCAR_GP_PIN(1, 26), 0, 3 }, /* WE1 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
+ { RCAR_GP_PIN(1, 27), 28, 3 }, /* EX_WAIT0 */
+ { PIN_NUMBER('C', 1), 24, 3 }, /* PRESETOUT# */
+ { RCAR_GP_PIN(0, 0), 20, 3 }, /* D0 */
+ { RCAR_GP_PIN(0, 1), 16, 3 }, /* D1 */
+ { RCAR_GP_PIN(0, 2), 12, 3 }, /* D2 */
+ { RCAR_GP_PIN(0, 3), 8, 3 }, /* D3 */
+ { RCAR_GP_PIN(0, 4), 4, 3 }, /* D4 */
+ { RCAR_GP_PIN(0, 5), 0, 3 }, /* D5 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
+ { RCAR_GP_PIN(0, 6), 28, 3 }, /* D6 */
+ { RCAR_GP_PIN(0, 7), 24, 3 }, /* D7 */
+ { RCAR_GP_PIN(0, 8), 20, 3 }, /* D8 */
+ { RCAR_GP_PIN(0, 9), 16, 3 }, /* D9 */
+ { RCAR_GP_PIN(0, 10), 12, 3 }, /* D10 */
+ { RCAR_GP_PIN(0, 11), 8, 3 }, /* D11 */
+ { RCAR_GP_PIN(0, 12), 4, 3 }, /* D12 */
+ { RCAR_GP_PIN(0, 13), 0, 3 }, /* D13 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
+ { RCAR_GP_PIN(0, 14), 28, 3 }, /* D14 */
+ { RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
+ { RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
+ { RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
+ { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 3), 8, 3 }, /* GP7_03 */
+ { PIN_A_NUMBER('P', 7), 4, 2 }, /* DU_DOTCLKIN0 */
+ { PIN_A_NUMBER('P', 8), 0, 2 }, /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
+ { PIN_A_NUMBER('R', 8), 28, 2 }, /* DU_DOTCLKIN2 */
+ { PIN_A_NUMBER('D', 38), 20, 2 }, /* FSCLKST */
+ { PIN_A_NUMBER('R', 30), 4, 2 }, /* TMS */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
+ { PIN_A_NUMBER('T', 28), 28, 2 }, /* TDO */
+ { PIN_A_NUMBER('T', 30), 24, 2 }, /* ASEBRK */
+ { RCAR_GP_PIN(3, 0), 20, 3 }, /* SD0_CLK */
+ { RCAR_GP_PIN(3, 1), 16, 3 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 2), 12, 3 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 3), 8, 3 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 4), 4, 3 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 5), 0, 3 }, /* SD0_DAT3 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
+ { RCAR_GP_PIN(3, 6), 28, 3 }, /* SD1_CLK */
+ { RCAR_GP_PIN(3, 7), 24, 3 }, /* SD1_CMD */
+ { RCAR_GP_PIN(3, 8), 20, 3 }, /* SD1_DAT0 */
+ { RCAR_GP_PIN(3, 9), 16, 3 }, /* SD1_DAT1 */
+ { RCAR_GP_PIN(3, 10), 12, 3 }, /* SD1_DAT2 */
+ { RCAR_GP_PIN(3, 11), 8, 3 }, /* SD1_DAT3 */
+ { RCAR_GP_PIN(4, 0), 4, 3 }, /* SD2_CLK */
+ { RCAR_GP_PIN(4, 1), 0, 3 }, /* SD2_CMD */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL15", 0xe606033c) {
+ { RCAR_GP_PIN(4, 2), 28, 3 }, /* SD2_DAT0 */
+ { RCAR_GP_PIN(4, 3), 24, 3 }, /* SD2_DAT1 */
+ { RCAR_GP_PIN(4, 4), 20, 3 }, /* SD2_DAT2 */
+ { RCAR_GP_PIN(4, 5), 16, 3 }, /* SD2_DAT3 */
+ { RCAR_GP_PIN(4, 6), 12, 3 }, /* SD2_DS */
+ { RCAR_GP_PIN(4, 7), 8, 3 }, /* SD3_CLK */
+ { RCAR_GP_PIN(4, 8), 4, 3 }, /* SD3_CMD */
+ { RCAR_GP_PIN(4, 9), 0, 3 }, /* SD3_DAT0 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL16", 0xe6060340) {
+ { RCAR_GP_PIN(4, 10), 28, 3 }, /* SD3_DAT1 */
+ { RCAR_GP_PIN(4, 11), 24, 3 }, /* SD3_DAT2 */
+ { RCAR_GP_PIN(4, 12), 20, 3 }, /* SD3_DAT3 */
+ { RCAR_GP_PIN(4, 13), 16, 3 }, /* SD3_DAT4 */
+ { RCAR_GP_PIN(4, 14), 12, 3 }, /* SD3_DAT5 */
+ { RCAR_GP_PIN(4, 15), 8, 3 }, /* SD3_DAT6 */
+ { RCAR_GP_PIN(4, 16), 4, 3 }, /* SD3_DAT7 */
+ { RCAR_GP_PIN(4, 17), 0, 3 }, /* SD3_DS */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL17", 0xe6060344) {
+ { RCAR_GP_PIN(3, 12), 28, 3 }, /* SD0_CD */
+ { RCAR_GP_PIN(3, 13), 24, 3 }, /* SD0_WP */
+ { RCAR_GP_PIN(3, 14), 20, 3 }, /* SD1_CD */
+ { RCAR_GP_PIN(3, 15), 16, 3 }, /* SD1_WP */
+ { RCAR_GP_PIN(5, 0), 12, 3 }, /* SCK0 */
+ { RCAR_GP_PIN(5, 1), 8, 3 }, /* RX0 */
+ { RCAR_GP_PIN(5, 2), 4, 3 }, /* TX0 */
+ { RCAR_GP_PIN(5, 3), 0, 3 }, /* CTS0 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
+ { RCAR_GP_PIN(5, 4), 28, 3 }, /* RTS0_TANS */
+ { RCAR_GP_PIN(5, 5), 24, 3 }, /* RX1 */
+ { RCAR_GP_PIN(5, 6), 20, 3 }, /* TX1 */
+ { RCAR_GP_PIN(5, 7), 16, 3 }, /* CTS1 */
+ { RCAR_GP_PIN(5, 8), 12, 3 }, /* RTS1_TANS */
+ { RCAR_GP_PIN(5, 9), 8, 3 }, /* SCK2 */
+ { RCAR_GP_PIN(5, 10), 4, 3 }, /* TX2 */
+ { RCAR_GP_PIN(5, 11), 0, 3 }, /* RX2 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL19", 0xe606034c) {
+ { RCAR_GP_PIN(5, 12), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(5, 13), 24, 3 }, /* HRX0 */
+ { RCAR_GP_PIN(5, 14), 20, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(5, 15), 16, 3 }, /* HCTS0 */
+ { RCAR_GP_PIN(5, 16), 12, 3 }, /* HRTS0 */
+ { RCAR_GP_PIN(5, 17), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(5, 18), 4, 3 }, /* MSIOF0_SYNC */
+ { RCAR_GP_PIN(5, 19), 0, 3 }, /* MSIOF0_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL20", 0xe6060350) {
+ { RCAR_GP_PIN(5, 20), 28, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(5, 21), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(5, 22), 20, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(5, 23), 16, 3 }, /* MLB_CLK */
+ { RCAR_GP_PIN(5, 24), 12, 3 }, /* MLB_SIG */
+ { RCAR_GP_PIN(5, 25), 8, 3 }, /* MLB_DAT */
+ { PIN_NUMBER('H', 37), 4, 3 }, /* MLB_REF */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* SSI_SCK01239 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
+ { RCAR_GP_PIN(6, 1), 28, 3 }, /* SSI_WS01239 */
+ { RCAR_GP_PIN(6, 2), 24, 3 }, /* SSI_SDATA0 */
+ { RCAR_GP_PIN(6, 3), 20, 3 }, /* SSI_SDATA1 */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* SSI_SDATA2 */
+ { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 7), 4, 3 }, /* SSI_SDATA3 */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* SSI_SCK4 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL22", 0xe6060358) {
+ { RCAR_GP_PIN(6, 9), 28, 3 }, /* SSI_WS4 */
+ { RCAR_GP_PIN(6, 10), 24, 3 }, /* SSI_SDATA4 */
+ { RCAR_GP_PIN(6, 11), 20, 3 }, /* SSI_SCK5 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* SSI_WS5 */
+ { RCAR_GP_PIN(6, 13), 12, 3 }, /* SSI_SDATA5 */
+ { RCAR_GP_PIN(6, 14), 8, 3 }, /* SSI_SCK6 */
+ { RCAR_GP_PIN(6, 15), 4, 3 }, /* SSI_WS6 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* SSI_SDATA6 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL23", 0xe606035c) {
+ { RCAR_GP_PIN(6, 17), 28, 3 }, /* SSI_SCK78 */
+ { RCAR_GP_PIN(6, 18), 24, 3 }, /* SSI_WS78 */
+ { RCAR_GP_PIN(6, 19), 20, 3 }, /* SSI_SDATA7 */
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* SSI_SDATA8 */
+ { RCAR_GP_PIN(6, 21), 12, 3 }, /* SSI_SDATA9 */
+ { RCAR_GP_PIN(6, 22), 8, 3 }, /* AUDIO_CLKA */
+ { RCAR_GP_PIN(6, 23), 4, 3 }, /* AUDIO_CLKB */
+ { RCAR_GP_PIN(6, 24), 0, 3 }, /* USB0_PWEN */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL24", 0xe6060360) {
+ { RCAR_GP_PIN(6, 25), 28, 3 }, /* USB0_OVC */
+ { RCAR_GP_PIN(6, 26), 24, 3 }, /* USB1_PWEN */
+ { RCAR_GP_PIN(6, 27), 20, 3 }, /* USB1_OVC */
+ { RCAR_GP_PIN(6, 28), 16, 3 }, /* USB30_PWEN */
+ { RCAR_GP_PIN(6, 29), 12, 3 }, /* USB30_OVC */
+ { RCAR_GP_PIN(6, 30), 8, 3 }, /* GP6_30 */
+ { RCAR_GP_PIN(6, 31), 4, 3 }, /* GP6_31 */
+ } },
+ { },
+};
+
static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
@@ -3202,8 +4827,278 @@ static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
+#define PUEN 0xe6060400
+#define PUD 0xe6060440
+
+#define PU0 0x00
+#define PU1 0x04
+#define PU2 0x08
+#define PU3 0x0c
+#define PU4 0x10
+#define PU5 0x14
+#define PU6 0x18
+
+static const struct sh_pfc_bias_info bias_info[] = {
+ { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
+ { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
+ { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
+ { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
+ { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
+ { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
+ { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
+ { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
+ { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
+ { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
+ { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
+ { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
+ { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
+ { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
+ { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
+ { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
+ { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
+ { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
+ { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
+ { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
+ { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
+ { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
+ { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
+ { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
+ { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
+ { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
+ { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
+ { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
+ { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
+ { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
+ { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
+ { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
+
+ { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
+ { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
+ { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
+ { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
+ { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
+ { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
+ { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
+ { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
+ { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
+ { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
+ { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
+ { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
+ { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
+ { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
+ { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
+ { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
+ { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
+ { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
+ { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
+ { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
+ { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
+ { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
+ { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
+ { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
+ { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
+ { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
+ { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
+ { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
+ { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
+ { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
+ { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
+
+ { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
+ { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
+ { RCAR_GP_PIN(7, 3), PU2, 29 }, /* GP7_03 */
+ { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
+ { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
+ { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
+ { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
+ { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
+ { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
+ { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
+ { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
+ { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
+ { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
+ { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
+ { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
+ { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
+ { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
+ { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
+ { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
+ { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
+ { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
+ { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
+ { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
+ { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
+ { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
+ { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
+ { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
+ { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
+ { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
+ { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
+ { RCAR_GP_PIN(1, 28), PU2, 0 }, /* CLKOUT */
+
+ { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
+ { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
+ { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
+ { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
+ { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
+ { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
+ { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
+ { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
+ { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
+ { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
+ { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
+ { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
+ { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
+ { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
+ { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
+ { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
+ { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
+ { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
+ { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
+ /* bit 8 n/a */
+ { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
+ { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
+ { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
+ { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
+ { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
+ { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST */
+ /* bit 1 n/a on M3*/
+ { PIN_A_NUMBER('R', 8), PU3, 0 }, /* DU_DOTCLKIN2 */
+
+ { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
+ { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
+ { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
+ { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
+ { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
+ { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
+ { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
+ { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
+ { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
+ { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
+ { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
+ { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
+ { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
+ { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
+ { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
+ { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
+ { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
+ { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
+ { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
+ { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
+ { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
+ { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
+ { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
+ { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
+ { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
+ { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
+ { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
+ { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
+ { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
+ { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
+
+ { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
+ { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
+ { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
+ { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
+ { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
+ { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
+ { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
+ { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
+ { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
+ { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
+ { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
+ { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
+ { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
+ { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
+ { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
+ { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
+ { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
+ { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
+ { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
+ { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
+ { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
+ { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
+ { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
+ { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
+ { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
+ { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
+ { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
+ { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
+
+ { RCAR_GP_PIN(6, 31), PU6, 6 }, /* GP6_31 */
+ { RCAR_GP_PIN(6, 30), PU6, 5 }, /* GP6_30 */
+ { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
+ { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
+ { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
+ { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
+ { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+};
+
+static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
+{
+ const struct sh_pfc_bias_info *info;
+ u32 reg;
+ u32 bit;
+
+ info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
+ if (!info)
+ return PIN_CONFIG_BIAS_DISABLE;
+
+ reg = info->reg;
+ bit = BIT(info->bit);
+
+ if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ return PIN_CONFIG_BIAS_DISABLE;
+ else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+}
+
+static void r8a7796_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ const struct sh_pfc_bias_info *info;
+ u32 enable, updown;
+ u32 reg;
+ u32 bit;
+
+ info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
+ if (!info)
+ return;
+
+ reg = info->reg;
+ bit = BIT(info->bit);
+
+ enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ if (bias != PIN_CONFIG_BIAS_DISABLE)
+ enable |= bit;
+
+ updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= bit;
+
+ sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
+ sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+}
+
static const struct sh_pfc_soc_operations r8a7796_pinmux_ops = {
.pin_to_pocctrl = r8a7796_pin_to_pocctrl,
+ .get_bias = r8a7796_pinmux_get_bias,
+ .set_bias = r8a7796_pinmux_set_bias,
};
const struct sh_pfc_soc_info r8a7796_pinmux_info = {
@@ -3221,6 +5116,7 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index fcacfa73ef6e..08150a321be6 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -816,6 +816,6 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
pmx->pctl_desc.pins = pmx->pins;
pmx->pctl_desc.npins = pfc->info->nr_pins;
- pmx->pctl = devm_pinctrl_register(pfc->dev, &pmx->pctl_desc, pmx);
- return PTR_ERR_OR_ZERO(pmx->pctl);
+ return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
+ &pmx->pctl);
}
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 7f3041697813..600d6427a978 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5322,7 +5322,8 @@ static int atlas7_pin_config_set(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long *configs,
unsigned num_configs)
{
- u16 param, arg;
+ u16 param;
+ u32 arg;
int idx, err;
for (idx = 0; idx < num_configs; idx++) {
@@ -5420,14 +5421,15 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
sys2pci_np = of_find_node_by_name(NULL, "sys2pci");
if (!sys2pci_np)
return -EINVAL;
+
ret = of_address_to_resource(sys2pci_np, 0, &res);
+ of_node_put(sys2pci_np);
if (ret)
return ret;
+
pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(pmx->sys2pci_base)) {
- of_node_put(sys2pci_np);
+ if (IS_ERR(pmx->sys2pci_base))
return -ENOMEM;
- }
pmx->dev = &pdev->dev;
@@ -5443,7 +5445,7 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
pmx->regs[idx] = of_iomap(np, idx);
if (!pmx->regs[idx]) {
dev_err(&pdev->dev,
- "can't map ioc bank#%d registers\n", idx);
+ "can't map ioc bank#%d registers\n", idx);
ret = -ENOMEM;
goto unmap_io;
}
@@ -6056,8 +6058,8 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(chip, a7gc);
if (ret) {
dev_err(&pdev->dev,
- "%s: error in probe function with status %d\n",
- np->name, ret);
+ "%s: error in probe function with status %d\n",
+ np->name, ret);
goto failed;
}
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 4c9b863f8267..cf6d68c7345b 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
@@ -705,7 +705,6 @@ static const struct of_device_id plgpio_of_match[] = {
{ .compatible = "st,spear-plgpio" },
{}
};
-MODULE_DEVICE_TABLE(of, plgpio_of_match);
static struct platform_driver plgpio_driver = {
.probe = plgpio_probe,
@@ -721,7 +720,3 @@ static int __init plgpio_init(void)
return platform_driver_register(&plgpio_driver);
}
subsys_initcall(plgpio_init);
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
-MODULE_DESCRIPTION("STMicroelectronics SPEAr PLGPIO driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index 18210681c737..0180eb544f02 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -11,7 +11,6 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "pinctrl-spear.h"
@@ -2717,14 +2716,3 @@ static int __init spear1310_pinctrl_init(void)
return platform_driver_register(&spear1310_pinctrl_driver);
}
arch_initcall(spear1310_pinctrl_init);
-
-static void __exit spear1310_pinctrl_exit(void)
-{
- platform_driver_unregister(&spear1310_pinctrl_driver);
-}
-module_exit(spear1310_pinctrl_exit);
-
-MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
-MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index c01fb23ee636..0ca961219b3b 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -11,7 +11,6 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "pinctrl-spear.h"
@@ -2033,14 +2032,3 @@ static int __init spear1340_pinctrl_init(void)
return platform_driver_register(&spear1340_pinctrl_driver);
}
arch_initcall(spear1340_pinctrl_init);
-
-static void __exit spear1340_pinctrl_exit(void)
-{
- platform_driver_unregister(&spear1340_pinctrl_driver);
-}
-module_exit(spear1340_pinctrl_exit);
-
-MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
-MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 111148daa3f1..e39913a18139 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -11,7 +11,6 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "pinctrl-spear3xx.h"
@@ -690,14 +689,3 @@ static int __init spear300_pinctrl_init(void)
return platform_driver_register(&spear300_pinctrl_driver);
}
arch_initcall(spear300_pinctrl_init);
-
-static void __exit spear300_pinctrl_exit(void)
-{
- platform_driver_unregister(&spear300_pinctrl_driver);
-}
-module_exit(spear300_pinctrl_exit);
-
-MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
-MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index a7b000062985..393b2b97d527 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -11,7 +11,6 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "pinctrl-spear3xx.h"
@@ -413,14 +412,3 @@ static int __init spear310_pinctrl_init(void)
return platform_driver_register(&spear310_pinctrl_driver);
}
arch_initcall(spear310_pinctrl_init);
-
-static void __exit spear310_pinctrl_exit(void)
-{
- platform_driver_unregister(&spear310_pinctrl_driver);
-}
-module_exit(spear310_pinctrl_exit);
-
-MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
-MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index e2b3817701dc..99c10fc3d9b5 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -11,7 +11,6 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "pinctrl-spear3xx.h"
@@ -3454,14 +3453,3 @@ static int __init spear320_pinctrl_init(void)
return platform_driver_register(&spear320_pinctrl_driver);
}
arch_initcall(spear320_pinctrl_init);
-
-static void __exit spear320_pinctrl_exit(void)
-{
- platform_driver_unregister(&spear320_pinctrl_driver);
-}
-module_exit(spear320_pinctrl_exit);
-
-MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
-MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
index c03dce7a22df..f5ccabd8535e 100644
--- a/drivers/pinctrl/stm32/Kconfig
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -20,4 +20,9 @@ config PINCTRL_STM32F746
default MACH_STM32F746
select PINCTRL_STM32
+config PINCTRL_STM32H743
+ bool "STMicroelectronics STM32H743 pin control" if COMPILE_TEST && !MACH_STM32H743
+ depends on OF && IRQ_DOMAIN_HIERARCHY
+ default MACH_STM32H743
+ select PINCTRL_STM32
endif
diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile
index 4a1ee748441f..cb31b4d24c44 100644
--- a/drivers/pinctrl/stm32/Makefile
+++ b/drivers/pinctrl/stm32/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_PINCTRL_STM32) += pinctrl-stm32.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_STM32F429) += pinctrl-stm32f429.o
obj-$(CONFIG_PINCTRL_STM32F746) += pinctrl-stm32f746.o
+obj-$(CONFIG_PINCTRL_STM32H743) += pinctrl-stm32h743.o
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index efc43711ff5c..abc405be0212 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -236,6 +236,15 @@ static void stm32_gpio_domain_activate(struct irq_domain *d,
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->range.id);
+ gpiochip_lock_as_irq(&bank->gpio_chip, irq_data->hwirq);
+}
+
+static void stm32_gpio_domain_deactivate(struct irq_domain *d,
+ struct irq_data *irq_data)
+{
+ struct stm32_gpio_bank *bank = d->host_data;
+
+ gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
static int stm32_gpio_domain_alloc(struct irq_domain *d,
@@ -243,11 +252,9 @@ static int stm32_gpio_domain_alloc(struct irq_domain *d,
unsigned int nr_irqs, void *data)
{
struct stm32_gpio_bank *bank = d->host_data;
- struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
struct irq_fwspec *fwspec = data;
struct irq_fwspec parent_fwspec;
irq_hw_number_t hwirq;
- int ret;
hwirq = fwspec->param[0];
parent_fwspec.fwnode = d->parent->fwnode;
@@ -258,35 +265,15 @@ static int stm32_gpio_domain_alloc(struct irq_domain *d,
irq_domain_set_hwirq_and_chip(d, virq, hwirq, &stm32_gpio_irq_chip,
bank);
- ret = gpiochip_lock_as_irq(&bank->gpio_chip, hwirq);
- if (ret) {
- dev_err(pctl->dev, "Unable to configure STM32 %s%ld as IRQ\n",
- bank->gpio_chip.label, hwirq);
- return ret;
- }
-
- ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &parent_fwspec);
- if (ret)
- gpiochip_unlock_as_irq(&bank->gpio_chip, hwirq);
-
- return ret;
-}
-
-static void stm32_gpio_domain_free(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct stm32_gpio_bank *bank = d->host_data;
- struct irq_data *data = irq_get_irq_data(virq);
-
- irq_domain_free_irqs_common(d, virq, nr_irqs);
- gpiochip_unlock_as_irq(&bank->gpio_chip, data->hwirq);
+ return irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &parent_fwspec);
}
static const struct irq_domain_ops stm32_gpio_domain_ops = {
.translate = stm32_gpio_domain_translate,
.alloc = stm32_gpio_domain_alloc,
- .free = stm32_gpio_domain_free,
+ .free = irq_domain_free_irqs_common,
.activate = stm32_gpio_domain_activate,
+ .deactivate = stm32_gpio_domain_deactivate,
};
/* Pinctrl functions */
@@ -631,6 +618,7 @@ static const struct pinmux_ops stm32_pmx_ops = {
.get_function_groups = stm32_pmx_get_func_groups,
.set_mux = stm32_pmx_set_mux,
.gpio_set_direction = stm32_pmx_gpio_set_direction,
+ .strict = true,
};
/* Pinconf functions */
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32h743.c b/drivers/pinctrl/stm32/pinctrl-stm32h743.c
new file mode 100644
index 000000000000..f7f9eacd3768
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32h743.c
@@ -0,0 +1,1980 @@
+/*
+ * Copyright (C) Alexandre Torgue 2017
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-stm32.h"
+
+static const struct stm32_desc_pin stm32h743_pins[] = {
+ STM32_PIN(
+ PINCTRL_PIN(0, "PA0"),
+ STM32_FUNCTION(0, "GPIOA0"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(5, "TIM15_BKIN"),
+ STM32_FUNCTION(8, "USART2_CTS_NSS"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(10, "SDMMC2_CMD"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(12, "ETH_MII_CRS"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(1, "PA1"),
+ STM32_FUNCTION(0, "GPIOA1"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(4, "LPTIM3_OUT"),
+ STM32_FUNCTION(5, "TIM15_CH1N"),
+ STM32_FUNCTION(8, "USART2_RTS"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(11, "SAI2_MCK_B"),
+ STM32_FUNCTION(12, "ETH_MII_RX_CLK ETH_RMII_REF_CLK"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(2, "PA2"),
+ STM32_FUNCTION(0, "GPIOA2"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(4, "LPTIM4_OUT"),
+ STM32_FUNCTION(5, "TIM15_CH1"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(9, "SAI2_SCK_B"),
+ STM32_FUNCTION(12, "ETH_MDIO"),
+ STM32_FUNCTION(13, "MDIOS_MDIO"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(3, "PA3"),
+ STM32_FUNCTION(0, "GPIOA3"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(4, "LPTIM5_OUT"),
+ STM32_FUNCTION(5, "TIM15_CH2"),
+ STM32_FUNCTION(8, "USART2_RX"),
+ STM32_FUNCTION(10, "LCD_B2"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D0"),
+ STM32_FUNCTION(12, "ETH_MII_COL"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(4, "PA4"),
+ STM32_FUNCTION(0, "GPIOA4"),
+ STM32_FUNCTION(3, "TIM5_ETR"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(8, "USART2_CK"),
+ STM32_FUNCTION(9, "SPI6_NSS"),
+ STM32_FUNCTION(13, "OTG_HS_SOF"),
+ STM32_FUNCTION(14, "DCMI_HSYNC"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(5, "PA5"),
+ STM32_FUNCTION(0, "GPIOA5"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(9, "SPI6_SCK"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_CK"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(6, "PA6"),
+ STM32_FUNCTION(0, "GPIOA6"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(6, "SPI1_MISO I2S1_SDI"),
+ STM32_FUNCTION(9, "SPI6_MISO"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(11, "TIM8_BKIN_COMP12"),
+ STM32_FUNCTION(12, "MDIOS_MDC"),
+ STM32_FUNCTION(13, "TIM1_BKIN_COMP12"),
+ STM32_FUNCTION(14, "DCMI_PIXCLK"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(7, "PA7"),
+ STM32_FUNCTION(0, "GPIOA7"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(6, "SPI1_MOSI I2S1_SDO"),
+ STM32_FUNCTION(9, "SPI6_MOSI"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(12, "ETH_MII_RX_DV ETH_RMII_CRS_DV"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(8, "PA8"),
+ STM32_FUNCTION(0, "GPIOA8"),
+ STM32_FUNCTION(1, "MCO1"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(3, "HRTIM_CHB2"),
+ STM32_FUNCTION(4, "TIM8_BKIN2"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(8, "USART1_CK"),
+ STM32_FUNCTION(11, "OTG_FS_SOF"),
+ STM32_FUNCTION(12, "UART7_RX"),
+ STM32_FUNCTION(13, "TIM8_BKIN2_COMP12"),
+ STM32_FUNCTION(14, "LCD_B3"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(9, "PA9"),
+ STM32_FUNCTION(0, "GPIOA9"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(3, "HRTIM_CHC1"),
+ STM32_FUNCTION(4, "LPUART1_TX"),
+ STM32_FUNCTION(5, "I2C3_SMBA"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(10, "CAN1_RXFD"),
+ STM32_FUNCTION(12, "ETH_TX_ER"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(10, "PA10"),
+ STM32_FUNCTION(0, "GPIOA10"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(3, "HRTIM_CHC2"),
+ STM32_FUNCTION(4, "LPUART1_RX"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(10, "CAN1_TXFD"),
+ STM32_FUNCTION(11, "OTG_FS_ID"),
+ STM32_FUNCTION(12, "MDIOS_MDIO"),
+ STM32_FUNCTION(13, "LCD_B4"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(11, "PA11"),
+ STM32_FUNCTION(0, "GPIOA11"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(3, "HRTIM_CHD1"),
+ STM32_FUNCTION(4, "LPUART1_CTS"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(7, "UART4_RX"),
+ STM32_FUNCTION(8, "USART1_CTS_NSS"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(11, "OTG_FS_DM"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(12, "PA12"),
+ STM32_FUNCTION(0, "GPIOA12"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(3, "HRTIM_CHD2"),
+ STM32_FUNCTION(4, "LPUART1_RTS"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(7, "UART4_TX"),
+ STM32_FUNCTION(8, "USART1_RTS"),
+ STM32_FUNCTION(9, "SAI2_FS_B"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(11, "OTG_FS_DP"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(13, "PA13"),
+ STM32_FUNCTION(0, "GPIOA13"),
+ STM32_FUNCTION(1, "JTMS SWDIO"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(14, "PA14"),
+ STM32_FUNCTION(0, "GPIOA14"),
+ STM32_FUNCTION(1, "JTCK SWCLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(15, "PA15"),
+ STM32_FUNCTION(0, "GPIOA15"),
+ STM32_FUNCTION(1, "JTDI"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(3, "HRTIM_FLT1"),
+ STM32_FUNCTION(5, "HDMI_CEC"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(8, "SPI6_NSS"),
+ STM32_FUNCTION(9, "UART4_RTS"),
+ STM32_FUNCTION(12, "UART7_TX"),
+ STM32_FUNCTION(14, "DSI_TE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(16, "PB0"),
+ STM32_FUNCTION(0, "GPIOB0"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(7, "DFSDM_CKOUT"),
+ STM32_FUNCTION(9, "UART4_CTS"),
+ STM32_FUNCTION(10, "LCD_R3"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D1"),
+ STM32_FUNCTION(12, "ETH_MII_RXD2"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(17, "PB1"),
+ STM32_FUNCTION(0, "GPIOB1"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(7, "DFSDM_DATIN1"),
+ STM32_FUNCTION(10, "LCD_R6"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D2"),
+ STM32_FUNCTION(12, "ETH_MII_RXD3"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(18, "PB2"),
+ STM32_FUNCTION(0, "GPIOB2"),
+ STM32_FUNCTION(3, "SAI1_D1"),
+ STM32_FUNCTION(5, "DFSDM_CKIN1"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(8, "SPI3_MOSI I2S3_SDO"),
+ STM32_FUNCTION(9, "SAI4_SD_A"),
+ STM32_FUNCTION(10, "QUADSPI_CLK"),
+ STM32_FUNCTION(11, "SAI4_D1"),
+ STM32_FUNCTION(12, "ETH_TX_ER"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(19, "PB3"),
+ STM32_FUNCTION(0, "GPIOB3"),
+ STM32_FUNCTION(1, "JTDO TRACESWO"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(3, "HRTIM_FLT4"),
+ STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(9, "SPI6_SCK"),
+ STM32_FUNCTION(10, "SDMMC2_D2"),
+ STM32_FUNCTION(12, "UART7_RX"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(20, "PB4"),
+ STM32_FUNCTION(0, "GPIOB4"),
+ STM32_FUNCTION(1, "NJTRST"),
+ STM32_FUNCTION(2, "TIM16_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "HRTIM_EEV6"),
+ STM32_FUNCTION(6, "SPI1_MISO I2S1_SDI"),
+ STM32_FUNCTION(7, "SPI3_MISO I2S3_SDI"),
+ STM32_FUNCTION(8, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(9, "SPI6_MISO"),
+ STM32_FUNCTION(10, "SDMMC2_D3"),
+ STM32_FUNCTION(12, "UART7_TX"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(21, "PB5"),
+ STM32_FUNCTION(0, "GPIOB5"),
+ STM32_FUNCTION(2, "TIM17_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "HRTIM_EEV7"),
+ STM32_FUNCTION(5, "I2C1_SMBA"),
+ STM32_FUNCTION(6, "SPI1_MOSI I2S1_SDO"),
+ STM32_FUNCTION(7, "I2C4_SMBA"),
+ STM32_FUNCTION(8, "SPI3_MOSI I2S3_SDO"),
+ STM32_FUNCTION(9, "SPI6_MOSI"),
+ STM32_FUNCTION(10, "CAN2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D7"),
+ STM32_FUNCTION(12, "ETH_PPS_OUT"),
+ STM32_FUNCTION(13, "FMC_SDCKE1"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(15, "UART5_RX"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(22, "PB6"),
+ STM32_FUNCTION(0, "GPIOB6"),
+ STM32_FUNCTION(2, "TIM16_CH1N"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(4, "HRTIM_EEV8"),
+ STM32_FUNCTION(5, "I2C1_SCL"),
+ STM32_FUNCTION(6, "HDMI_CEC"),
+ STM32_FUNCTION(7, "I2C4_SCL"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(9, "LPUART1_TX"),
+ STM32_FUNCTION(10, "CAN2_TX"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(12, "DFSDM_DATIN5"),
+ STM32_FUNCTION(13, "FMC_SDNE1"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(15, "UART5_TX"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(23, "PB7"),
+ STM32_FUNCTION(0, "GPIOB7"),
+ STM32_FUNCTION(2, "TIM17_CH1N"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(4, "HRTIM_EEV9"),
+ STM32_FUNCTION(5, "I2C1_SDA"),
+ STM32_FUNCTION(7, "I2C4_SDA"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(9, "LPUART1_RX"),
+ STM32_FUNCTION(10, "CAN2_TXFD"),
+ STM32_FUNCTION(12, "DFSDM_CKIN5"),
+ STM32_FUNCTION(13, "FMC_NL"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(24, "PB8"),
+ STM32_FUNCTION(0, "GPIOB8"),
+ STM32_FUNCTION(2, "TIM16_CH1"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(4, "DFSDM_CKIN7"),
+ STM32_FUNCTION(5, "I2C1_SCL"),
+ STM32_FUNCTION(7, "I2C4_SCL"),
+ STM32_FUNCTION(8, "SDMMC1_CKIN"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(11, "SDMMC2_D4"),
+ STM32_FUNCTION(12, "ETH_MII_TXD3"),
+ STM32_FUNCTION(13, "SDMMC1_D4"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(25, "PB9"),
+ STM32_FUNCTION(0, "GPIOB9"),
+ STM32_FUNCTION(2, "TIM17_CH1"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(4, "DFSDM_DATIN7"),
+ STM32_FUNCTION(5, "I2C1_SDA"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(7, "I2C4_SDA"),
+ STM32_FUNCTION(8, "SDMMC1_CDIR"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(11, "SDMMC2_D5"),
+ STM32_FUNCTION(12, "I2C4_SMBA"),
+ STM32_FUNCTION(13, "SDMMC1_D5"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(26, "PB10"),
+ STM32_FUNCTION(0, "GPIOB10"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(3, "HRTIM_SCOUT"),
+ STM32_FUNCTION(4, "LPTIM2_IN1"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(7, "DFSDM_DATIN7"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D3"),
+ STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(27, "PB11"),
+ STM32_FUNCTION(0, "GPIOB11"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(3, "HRTIM_SCIN"),
+ STM32_FUNCTION(4, "LPTIM2_ETR"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(7, "DFSDM_CKIN7"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D4"),
+ STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+ STM32_FUNCTION(14, "DSI_TE"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(28, "PB12"),
+ STM32_FUNCTION(0, "GPIOB12"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(7, "DFSDM_DATIN1"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(10, "CAN2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D5"),
+ STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(13, "OTG_HS_ID"),
+ STM32_FUNCTION(14, "TIM1_BKIN_COMP12"),
+ STM32_FUNCTION(15, "UART5_RX"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(29, "PB13"),
+ STM32_FUNCTION(0, "GPIOB13"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(4, "LPTIM2_OUT"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(7, "DFSDM_CKIN1"),
+ STM32_FUNCTION(8, "USART3_CTS_NSS"),
+ STM32_FUNCTION(10, "CAN2_TX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D6"),
+ STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(15, "UART5_TX"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(30, "PB14"),
+ STM32_FUNCTION(0, "GPIOB14"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(5, "USART1_TX"),
+ STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
+ STM32_FUNCTION(7, "DFSDM_DATIN2"),
+ STM32_FUNCTION(8, "USART3_RTS"),
+ STM32_FUNCTION(9, "UART4_RTS"),
+ STM32_FUNCTION(10, "SDMMC2_D0"),
+ STM32_FUNCTION(13, "OTG_HS_DM"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(31, "PB15"),
+ STM32_FUNCTION(0, "GPIOB15"),
+ STM32_FUNCTION(1, "RTC_REFIN"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(5, "USART1_RX"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
+ STM32_FUNCTION(7, "DFSDM_CKIN2"),
+ STM32_FUNCTION(9, "UART4_CTS"),
+ STM32_FUNCTION(10, "SDMMC2_D1"),
+ STM32_FUNCTION(13, "OTG_HS_DP"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(32, "PC0"),
+ STM32_FUNCTION(0, "GPIOC0"),
+ STM32_FUNCTION(4, "DFSDM_CKIN0"),
+ STM32_FUNCTION(7, "DFSDM_DATIN4"),
+ STM32_FUNCTION(9, "SAI2_FS_B"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_STP"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(33, "PC1"),
+ STM32_FUNCTION(0, "GPIOC1"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(3, "SAI1_D1"),
+ STM32_FUNCTION(4, "DFSDM_DATIN0"),
+ STM32_FUNCTION(5, "DFSDM_CKIN4"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(9, "SAI4_SD_A"),
+ STM32_FUNCTION(10, "SDMMC2_CK"),
+ STM32_FUNCTION(11, "SAI4_D1"),
+ STM32_FUNCTION(12, "ETH_MDC"),
+ STM32_FUNCTION(13, "MDIOS_MDC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(34, "PC2"),
+ STM32_FUNCTION(0, "GPIOC2"),
+ STM32_FUNCTION(4, "DFSDM_CKIN1"),
+ STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
+ STM32_FUNCTION(7, "DFSDM_CKOUT"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+ STM32_FUNCTION(12, "ETH_MII_TXD2"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(35, "PC3"),
+ STM32_FUNCTION(0, "GPIOC3"),
+ STM32_FUNCTION(4, "DFSDM_DATIN1"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+ STM32_FUNCTION(12, "ETH_MII_TX_CLK"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(36, "PC4"),
+ STM32_FUNCTION(0, "GPIOC4"),
+ STM32_FUNCTION(4, "DFSDM_CKIN2"),
+ STM32_FUNCTION(6, "I2S1_MCK"),
+ STM32_FUNCTION(10, "SPDIFRX_IN2"),
+ STM32_FUNCTION(12, "ETH_MII_RXD0 ETH_RMII_RXD0"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(37, "PC5"),
+ STM32_FUNCTION(0, "GPIOC5"),
+ STM32_FUNCTION(3, "SAI1_D3"),
+ STM32_FUNCTION(4, "DFSDM_DATIN2"),
+ STM32_FUNCTION(10, "SPDIFRX_IN3"),
+ STM32_FUNCTION(11, "SAI4_D3"),
+ STM32_FUNCTION(12, "ETH_MII_RXD1 ETH_RMII_RXD1"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(14, "COMP_1_OUT"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(38, "PC6"),
+ STM32_FUNCTION(0, "GPIOC6"),
+ STM32_FUNCTION(2, "HRTIM_CHA1"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(5, "DFSDM_CKIN3"),
+ STM32_FUNCTION(6, "I2S2_MCK"),
+ STM32_FUNCTION(8, "USART6_TX"),
+ STM32_FUNCTION(9, "SDMMC1_D0DIR"),
+ STM32_FUNCTION(10, "FMC_NWAIT"),
+ STM32_FUNCTION(11, "SDMMC2_D6"),
+ STM32_FUNCTION(13, "SDMMC1_D6"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(39, "PC7"),
+ STM32_FUNCTION(0, "GPIOC7"),
+ STM32_FUNCTION(1, "TRGIO"),
+ STM32_FUNCTION(2, "HRTIM_CHA2"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(5, "DFSDM_DATIN3"),
+ STM32_FUNCTION(7, "I2S3_MCK"),
+ STM32_FUNCTION(8, "USART6_RX"),
+ STM32_FUNCTION(9, "SDMMC1_D123DIR"),
+ STM32_FUNCTION(10, "FMC_NE1"),
+ STM32_FUNCTION(11, "SDMMC2_D7"),
+ STM32_FUNCTION(12, "SWPMI_TX"),
+ STM32_FUNCTION(13, "SDMMC1_D7"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(40, "PC8"),
+ STM32_FUNCTION(0, "GPIOC8"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(2, "HRTIM_CHB1"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(8, "USART6_CK"),
+ STM32_FUNCTION(9, "UART5_RTS"),
+ STM32_FUNCTION(10, "FMC_NE2 FMC_NCE"),
+ STM32_FUNCTION(12, "SWPMI_RX"),
+ STM32_FUNCTION(13, "SDMMC1_D0"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(41, "PC9"),
+ STM32_FUNCTION(0, "GPIOC9"),
+ STM32_FUNCTION(1, "MCO2"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH4"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(6, "I2S_CKIN"),
+ STM32_FUNCTION(9, "UART5_CTS"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(11, "LCD_G3"),
+ STM32_FUNCTION(12, "SWPMI_SUSPEND"),
+ STM32_FUNCTION(13, "SDMMC1_D1"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(42, "PC10"),
+ STM32_FUNCTION(0, "GPIOC10"),
+ STM32_FUNCTION(3, "HRTIM_EEV1"),
+ STM32_FUNCTION(4, "DFSDM_CKIN5"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(13, "SDMMC1_D2"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(43, "PC11"),
+ STM32_FUNCTION(0, "GPIOC11"),
+ STM32_FUNCTION(3, "HRTIM_FLT2"),
+ STM32_FUNCTION(4, "DFSDM_DATIN5"),
+ STM32_FUNCTION(7, "SPI3_MISO I2S3_SDI"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_NCS"),
+ STM32_FUNCTION(13, "SDMMC1_D3"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(44, "PC12"),
+ STM32_FUNCTION(0, "GPIOC12"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(3, "HRTIM_EEV2"),
+ STM32_FUNCTION(7, "SPI3_MOSI I2S3_SDO"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(9, "UART5_TX"),
+ STM32_FUNCTION(13, "SDMMC1_CK"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(45, "PC13"),
+ STM32_FUNCTION(0, "GPIOC13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(46, "PC14"),
+ STM32_FUNCTION(0, "GPIOC14"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(47, "PC15"),
+ STM32_FUNCTION(0, "GPIOC15"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(48, "PD0"),
+ STM32_FUNCTION(0, "GPIOD0"),
+ STM32_FUNCTION(4, "DFSDM_CKIN6"),
+ STM32_FUNCTION(7, "SAI3_SCK_A"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D2 FMC_DA2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(49, "PD1"),
+ STM32_FUNCTION(0, "GPIOD1"),
+ STM32_FUNCTION(4, "DFSDM_DATIN6"),
+ STM32_FUNCTION(7, "SAI3_SD_A"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "FMC_D3 FMC_DA3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(50, "PD2"),
+ STM32_FUNCTION(0, "GPIOD2"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(3, "TIM3_ETR"),
+ STM32_FUNCTION(9, "UART5_RX"),
+ STM32_FUNCTION(13, "SDMMC1_CMD"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(51, "PD3"),
+ STM32_FUNCTION(0, "GPIOD3"),
+ STM32_FUNCTION(4, "DFSDM_CKOUT"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART2_CTS_NSS"),
+ STM32_FUNCTION(13, "FMC_CLK"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(52, "PD4"),
+ STM32_FUNCTION(0, "GPIOD4"),
+ STM32_FUNCTION(3, "HRTIM_FLT3"),
+ STM32_FUNCTION(7, "SAI3_FS_A"),
+ STM32_FUNCTION(8, "USART2_RTS"),
+ STM32_FUNCTION(10, "CAN1_RXFD"),
+ STM32_FUNCTION(13, "FMC_NOE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(53, "PD5"),
+ STM32_FUNCTION(0, "GPIOD5"),
+ STM32_FUNCTION(3, "HRTIM_EEV3"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(10, "CAN1_TXFD"),
+ STM32_FUNCTION(13, "FMC_NWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(54, "PD6"),
+ STM32_FUNCTION(0, "GPIOD6"),
+ STM32_FUNCTION(3, "SAI1_D1"),
+ STM32_FUNCTION(4, "DFSDM_CKIN4"),
+ STM32_FUNCTION(5, "DFSDM_DATIN1"),
+ STM32_FUNCTION(6, "SPI3_MOSI I2S3_SDO"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(8, "USART2_RX"),
+ STM32_FUNCTION(9, "SAI4_SD_A"),
+ STM32_FUNCTION(10, "CAN2_RXFD"),
+ STM32_FUNCTION(11, "SAI4_D1"),
+ STM32_FUNCTION(12, "SDMMC2_CK"),
+ STM32_FUNCTION(13, "FMC_NWAIT"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(55, "PD7"),
+ STM32_FUNCTION(0, "GPIOD7"),
+ STM32_FUNCTION(4, "DFSDM_DATIN4"),
+ STM32_FUNCTION(6, "SPI1_MOSI I2S1_SDO"),
+ STM32_FUNCTION(7, "DFSDM_CKIN1"),
+ STM32_FUNCTION(8, "USART2_CK"),
+ STM32_FUNCTION(10, "SPDIFRX_IN0"),
+ STM32_FUNCTION(12, "SDMMC2_CMD"),
+ STM32_FUNCTION(13, "FMC_NE1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(56, "PD8"),
+ STM32_FUNCTION(0, "GPIOD8"),
+ STM32_FUNCTION(4, "DFSDM_CKIN3"),
+ STM32_FUNCTION(7, "SAI3_SCK_B"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(10, "SPDIFRX_IN1"),
+ STM32_FUNCTION(13, "FMC_D13 FMC_DA13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(57, "PD9"),
+ STM32_FUNCTION(0, "GPIOD9"),
+ STM32_FUNCTION(4, "DFSDM_DATIN3"),
+ STM32_FUNCTION(7, "SAI3_SD_B"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(10, "CAN2_RXFD"),
+ STM32_FUNCTION(13, "FMC_D14 FMC_DA14"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(58, "PD10"),
+ STM32_FUNCTION(0, "GPIOD10"),
+ STM32_FUNCTION(4, "DFSDM_CKOUT"),
+ STM32_FUNCTION(7, "SAI3_FS_B"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(10, "CAN2_TXFD"),
+ STM32_FUNCTION(13, "FMC_D15 FMC_DA15"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(59, "PD11"),
+ STM32_FUNCTION(0, "GPIOD11"),
+ STM32_FUNCTION(4, "LPTIM2_IN2"),
+ STM32_FUNCTION(5, "I2C4_SMBA"),
+ STM32_FUNCTION(8, "USART3_CTS_NSS"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(11, "SAI2_SD_A"),
+ STM32_FUNCTION(13, "FMC_A16"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(60, "PD12"),
+ STM32_FUNCTION(0, "GPIOD12"),
+ STM32_FUNCTION(2, "LPTIM1_IN1"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(4, "LPTIM2_IN1"),
+ STM32_FUNCTION(5, "I2C4_SCL"),
+ STM32_FUNCTION(8, "USART3_RTS"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(11, "SAI2_FS_A"),
+ STM32_FUNCTION(13, "FMC_A17"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(61, "PD13"),
+ STM32_FUNCTION(0, "GPIOD13"),
+ STM32_FUNCTION(2, "LPTIM1_OUT"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(5, "I2C4_SDA"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(11, "SAI2_SCK_A"),
+ STM32_FUNCTION(13, "FMC_A18"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(62, "PD14"),
+ STM32_FUNCTION(0, "GPIOD14"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(7, "SAI3_MCLK_B"),
+ STM32_FUNCTION(9, "UART8_CTS"),
+ STM32_FUNCTION(13, "FMC_D0 FMC_DA0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(63, "PD15"),
+ STM32_FUNCTION(0, "GPIOD15"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(7, "SAI3_MCLK_A"),
+ STM32_FUNCTION(9, "UART8_RTS"),
+ STM32_FUNCTION(13, "FMC_D1 FMC_DA1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(64, "PE0"),
+ STM32_FUNCTION(0, "GPIOE0"),
+ STM32_FUNCTION(2, "LPTIM1_ETR"),
+ STM32_FUNCTION(3, "TIM4_ETR"),
+ STM32_FUNCTION(4, "HRTIM_SCIN"),
+ STM32_FUNCTION(5, "LPTIM2_ETR"),
+ STM32_FUNCTION(9, "UART8_RX"),
+ STM32_FUNCTION(10, "CAN1_RXFD"),
+ STM32_FUNCTION(11, "SAI2_MCK_A"),
+ STM32_FUNCTION(13, "FMC_NBL0"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(65, "PE1"),
+ STM32_FUNCTION(0, "GPIOE1"),
+ STM32_FUNCTION(2, "LPTIM1_IN2"),
+ STM32_FUNCTION(4, "HRTIM_SCOUT"),
+ STM32_FUNCTION(9, "UART8_TX"),
+ STM32_FUNCTION(10, "CAN1_TXFD"),
+ STM32_FUNCTION(13, "FMC_NBL1"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(66, "PE2"),
+ STM32_FUNCTION(0, "GPIOE2"),
+ STM32_FUNCTION(1, "TRACECLK"),
+ STM32_FUNCTION(3, "SAI1_CK1"),
+ STM32_FUNCTION(6, "SPI4_SCK"),
+ STM32_FUNCTION(7, "SAI1_MCLK_A"),
+ STM32_FUNCTION(9, "SAI4_MCLK_A"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(11, "SAI4_CK1"),
+ STM32_FUNCTION(12, "ETH_MII_TXD3"),
+ STM32_FUNCTION(13, "FMC_A23"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(67, "PE3"),
+ STM32_FUNCTION(0, "GPIOE3"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(5, "TIM15_BKIN"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(9, "SAI4_SD_B"),
+ STM32_FUNCTION(13, "FMC_A19"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(68, "PE4"),
+ STM32_FUNCTION(0, "GPIOE4"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(3, "SAI1_D2"),
+ STM32_FUNCTION(4, "DFSDM_DATIN3"),
+ STM32_FUNCTION(5, "TIM15_CH1N"),
+ STM32_FUNCTION(6, "SPI4_NSS"),
+ STM32_FUNCTION(7, "SAI1_FS_A"),
+ STM32_FUNCTION(9, "SAI4_FS_A"),
+ STM32_FUNCTION(11, "SAI4_D2"),
+ STM32_FUNCTION(13, "FMC_A20"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(69, "PE5"),
+ STM32_FUNCTION(0, "GPIOE5"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(3, "SAI1_CK2"),
+ STM32_FUNCTION(4, "DFSDM_CKIN3"),
+ STM32_FUNCTION(5, "TIM15_CH1"),
+ STM32_FUNCTION(6, "SPI4_MISO"),
+ STM32_FUNCTION(7, "SAI1_SCK_A"),
+ STM32_FUNCTION(9, "SAI4_SCK_A"),
+ STM32_FUNCTION(11, "SAI4_CK2"),
+ STM32_FUNCTION(13, "FMC_A21"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(70, "PE6"),
+ STM32_FUNCTION(0, "GPIOE6"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(2, "TIM1_BKIN2"),
+ STM32_FUNCTION(3, "SAI1_D1"),
+ STM32_FUNCTION(5, "TIM15_CH2"),
+ STM32_FUNCTION(6, "SPI4_MOSI"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(9, "SAI4_SD_A"),
+ STM32_FUNCTION(10, "SAI4_D1"),
+ STM32_FUNCTION(11, "SAI2_MCK_B"),
+ STM32_FUNCTION(12, "TIM1_BKIN2_COMP12"),
+ STM32_FUNCTION(13, "FMC_A22"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(71, "PE7"),
+ STM32_FUNCTION(0, "GPIOE7"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(4, "DFSDM_DATIN2"),
+ STM32_FUNCTION(8, "UART7_RX"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO0"),
+ STM32_FUNCTION(13, "FMC_D4 FMC_DA4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(72, "PE8"),
+ STM32_FUNCTION(0, "GPIOE8"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(4, "DFSDM_CKIN2"),
+ STM32_FUNCTION(8, "UART7_TX"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO1"),
+ STM32_FUNCTION(13, "FMC_D5 FMC_DA5"),
+ STM32_FUNCTION(14, "COMP_2_OUT"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(73, "PE9"),
+ STM32_FUNCTION(0, "GPIOE9"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(4, "DFSDM_CKOUT"),
+ STM32_FUNCTION(8, "UART7_RTS"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(13, "FMC_D6 FMC_DA6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(74, "PE10"),
+ STM32_FUNCTION(0, "GPIOE10"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(4, "DFSDM_DATIN4"),
+ STM32_FUNCTION(8, "UART7_CTS"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO3"),
+ STM32_FUNCTION(13, "FMC_D7 FMC_DA7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(75, "PE11"),
+ STM32_FUNCTION(0, "GPIOE11"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(4, "DFSDM_CKIN4"),
+ STM32_FUNCTION(6, "SPI4_NSS"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(13, "FMC_D8 FMC_DA8"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(76, "PE12"),
+ STM32_FUNCTION(0, "GPIOE12"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(4, "DFSDM_DATIN5"),
+ STM32_FUNCTION(6, "SPI4_SCK"),
+ STM32_FUNCTION(11, "SAI2_SCK_B"),
+ STM32_FUNCTION(13, "FMC_D9 FMC_DA9"),
+ STM32_FUNCTION(14, "COMP_1_OUT"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(77, "PE13"),
+ STM32_FUNCTION(0, "GPIOE13"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(4, "DFSDM_CKIN5"),
+ STM32_FUNCTION(6, "SPI4_MISO"),
+ STM32_FUNCTION(11, "SAI2_FS_B"),
+ STM32_FUNCTION(13, "FMC_D10 FMC_DA10"),
+ STM32_FUNCTION(14, "COMP_2_OUT"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(78, "PE14"),
+ STM32_FUNCTION(0, "GPIOE14"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(6, "SPI4_MOSI"),
+ STM32_FUNCTION(11, "SAI2_MCK_B"),
+ STM32_FUNCTION(13, "FMC_D11 FMC_DA11"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(79, "PE15"),
+ STM32_FUNCTION(0, "GPIOE15"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(6, "HDMI__TIM1_BKIN"),
+ STM32_FUNCTION(13, "FMC_D12 FMC_DA12"),
+ STM32_FUNCTION(14, "TIM1_BKIN_COMP12"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(80, "PF0"),
+ STM32_FUNCTION(0, "GPIOF0"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(13, "FMC_A0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(81, "PF1"),
+ STM32_FUNCTION(0, "GPIOF1"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(13, "FMC_A1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(82, "PF2"),
+ STM32_FUNCTION(0, "GPIOF2"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(13, "FMC_A2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(83, "PF3"),
+ STM32_FUNCTION(0, "GPIOF3"),
+ STM32_FUNCTION(13, "FMC_A3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(84, "PF4"),
+ STM32_FUNCTION(0, "GPIOF4"),
+ STM32_FUNCTION(13, "FMC_A4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(85, "PF5"),
+ STM32_FUNCTION(0, "GPIOF5"),
+ STM32_FUNCTION(13, "FMC_A5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(86, "PF6"),
+ STM32_FUNCTION(0, "GPIOF6"),
+ STM32_FUNCTION(2, "TIM16_CH1"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(8, "UART7_RX"),
+ STM32_FUNCTION(9, "SAI4_SD_B"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(87, "PF7"),
+ STM32_FUNCTION(0, "GPIOF7"),
+ STM32_FUNCTION(2, "TIM17_CH1"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(7, "SAI1_MCLK_B"),
+ STM32_FUNCTION(8, "UART7_TX"),
+ STM32_FUNCTION(9, "SAI4_MCLK_B"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(88, "PF8"),
+ STM32_FUNCTION(0, "GPIOF8"),
+ STM32_FUNCTION(2, "TIM16_CH1N"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(7, "SAI1_SCK_B"),
+ STM32_FUNCTION(8, "UART7_RTS"),
+ STM32_FUNCTION(9, "SAI4_SCK_B"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(89, "PF9"),
+ STM32_FUNCTION(0, "GPIOF9"),
+ STM32_FUNCTION(2, "TIM17_CH1N"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(7, "SAI1_FS_B"),
+ STM32_FUNCTION(8, "UART7_CTS"),
+ STM32_FUNCTION(9, "SAI4_FS_B"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(90, "PF10"),
+ STM32_FUNCTION(0, "GPIOF10"),
+ STM32_FUNCTION(2, "TIM16_BKIN"),
+ STM32_FUNCTION(3, "SAI1_D3"),
+ STM32_FUNCTION(10, "QUADSPI_CLK"),
+ STM32_FUNCTION(11, "SAI4_D3"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(91, "PF11"),
+ STM32_FUNCTION(0, "GPIOF11"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(13, "FMC_SDNRAS"),
+ STM32_FUNCTION(14, "DCMI_D12"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(92, "PF12"),
+ STM32_FUNCTION(0, "GPIOF12"),
+ STM32_FUNCTION(13, "FMC_A6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(93, "PF13"),
+ STM32_FUNCTION(0, "GPIOF13"),
+ STM32_FUNCTION(4, "DFSDM_DATIN6"),
+ STM32_FUNCTION(5, "I2C4_SMBA"),
+ STM32_FUNCTION(13, "FMC_A7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(94, "PF14"),
+ STM32_FUNCTION(0, "GPIOF14"),
+ STM32_FUNCTION(4, "DFSDM_CKIN6"),
+ STM32_FUNCTION(5, "I2C4_SCL"),
+ STM32_FUNCTION(13, "FMC_A8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(95, "PF15"),
+ STM32_FUNCTION(0, "GPIOF15"),
+ STM32_FUNCTION(5, "I2C4_SDA"),
+ STM32_FUNCTION(13, "FMC_A9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(96, "PG0"),
+ STM32_FUNCTION(0, "GPIOG0"),
+ STM32_FUNCTION(13, "FMC_A10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(97, "PG1"),
+ STM32_FUNCTION(0, "GPIOG1"),
+ STM32_FUNCTION(13, "FMC_A11"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(98, "PG2"),
+ STM32_FUNCTION(0, "GPIOG2"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(12, "TIM8_BKIN_COMP12"),
+ STM32_FUNCTION(13, "FMC_A12"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(99, "PG3"),
+ STM32_FUNCTION(0, "GPIOG3"),
+ STM32_FUNCTION(4, "TIM8_BKIN2"),
+ STM32_FUNCTION(12, "TIM8_BKIN2_COMP12"),
+ STM32_FUNCTION(13, "FMC_A13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(100, "PG4"),
+ STM32_FUNCTION(0, "GPIOG4"),
+ STM32_FUNCTION(2, "TIM1_BKIN2"),
+ STM32_FUNCTION(12, "TIM1_BKIN2_COMP12"),
+ STM32_FUNCTION(13, "FMC_A14 FMC_BA0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(101, "PG5"),
+ STM32_FUNCTION(0, "GPIOG5"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(13, "FMC_A15 FMC_BA1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(102, "PG6"),
+ STM32_FUNCTION(0, "GPIOG6"),
+ STM32_FUNCTION(2, "TIM17_BKIN"),
+ STM32_FUNCTION(3, "HRTIM_CHE1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(13, "FMC_NE3"),
+ STM32_FUNCTION(14, "DCMI_D12"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(103, "PG7"),
+ STM32_FUNCTION(0, "GPIOG7"),
+ STM32_FUNCTION(3, "HRTIM_CHE2"),
+ STM32_FUNCTION(7, "SAI1_MCLK_A"),
+ STM32_FUNCTION(8, "USART6_CK"),
+ STM32_FUNCTION(13, "FMC_INT"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(104, "PG8"),
+ STM32_FUNCTION(0, "GPIOG8"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(6, "SPI6_NSS"),
+ STM32_FUNCTION(8, "USART6_RTS"),
+ STM32_FUNCTION(9, "SPDIFRX_IN2"),
+ STM32_FUNCTION(12, "ETH_PPS_OUT"),
+ STM32_FUNCTION(13, "FMC_SDCLK"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(105, "PG9"),
+ STM32_FUNCTION(0, "GPIOG9"),
+ STM32_FUNCTION(6, "SPI1_MISO I2S1_SDI"),
+ STM32_FUNCTION(8, "USART6_RX"),
+ STM32_FUNCTION(9, "SPDIFRX_IN3"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(11, "SAI2_FS_B"),
+ STM32_FUNCTION(13, "FMC_NE2 FMC_NCE"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(106, "PG10"),
+ STM32_FUNCTION(0, "GPIOG10"),
+ STM32_FUNCTION(3, "HRTIM_FLT5"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(10, "LCD_G3"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(13, "FMC_NE3"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(107, "PG11"),
+ STM32_FUNCTION(0, "GPIOG11"),
+ STM32_FUNCTION(3, "HRTIM_EEV4"),
+ STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(9, "SPDIFRX_IN0"),
+ STM32_FUNCTION(11, "SDMMC2_D2"),
+ STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(108, "PG12"),
+ STM32_FUNCTION(0, "GPIOG12"),
+ STM32_FUNCTION(2, "LPTIM1_IN1"),
+ STM32_FUNCTION(3, "HRTIM_EEV5"),
+ STM32_FUNCTION(6, "SPI6_MISO"),
+ STM32_FUNCTION(8, "USART6_RTS"),
+ STM32_FUNCTION(9, "SPDIFRX_IN1"),
+ STM32_FUNCTION(10, "LCD_B4"),
+ STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(13, "FMC_NE4"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(109, "PG13"),
+ STM32_FUNCTION(0, "GPIOG13"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(2, "LPTIM1_OUT"),
+ STM32_FUNCTION(3, "HRTIM_EEV10"),
+ STM32_FUNCTION(6, "SPI6_SCK"),
+ STM32_FUNCTION(8, "USART6_CTS_NSS"),
+ STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(13, "FMC_A24"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(110, "PG14"),
+ STM32_FUNCTION(0, "GPIOG14"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(2, "LPTIM1_ETR"),
+ STM32_FUNCTION(6, "SPI6_MOSI"),
+ STM32_FUNCTION(8, "USART6_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO3"),
+ STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(13, "FMC_A25"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(111, "PG15"),
+ STM32_FUNCTION(0, "GPIOG15"),
+ STM32_FUNCTION(8, "USART6_CTS_NSS"),
+ STM32_FUNCTION(13, "FMC_SDNCAS"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(112, "PH0"),
+ STM32_FUNCTION(0, "GPIOH0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(113, "PH1"),
+ STM32_FUNCTION(0, "GPIOH1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(114, "PH2"),
+ STM32_FUNCTION(0, "GPIOH2"),
+ STM32_FUNCTION(2, "LPTIM1_IN2"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO0"),
+ STM32_FUNCTION(11, "SAI2_SCK_B"),
+ STM32_FUNCTION(12, "ETH_MII_CRS"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(115, "PH3"),
+ STM32_FUNCTION(0, "GPIOH3"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO1"),
+ STM32_FUNCTION(11, "SAI2_MCK_B"),
+ STM32_FUNCTION(12, "ETH_MII_COL"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(116, "PH4"),
+ STM32_FUNCTION(0, "GPIOH4"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(10, "LCD_G5"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(117, "PH5"),
+ STM32_FUNCTION(0, "GPIOH5"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(118, "PH6"),
+ STM32_FUNCTION(0, "GPIOH6"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(12, "ETH_MII_RXD2"),
+ STM32_FUNCTION(13, "FMC_SDNE1"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(119, "PH7"),
+ STM32_FUNCTION(0, "GPIOH7"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(12, "ETH_MII_RXD3"),
+ STM32_FUNCTION(13, "FMC_SDCKE1"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(120, "PH8"),
+ STM32_FUNCTION(0, "GPIOH8"),
+ STM32_FUNCTION(3, "TIM5_ETR"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(13, "FMC_D16"),
+ STM32_FUNCTION(14, "DCMI_HSYNC"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(121, "PH9"),
+ STM32_FUNCTION(0, "GPIOH9"),
+ STM32_FUNCTION(5, "I2C3_SMBA"),
+ STM32_FUNCTION(13, "FMC_D17"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(15, "LCD_R3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(122, "PH10"),
+ STM32_FUNCTION(0, "GPIOH10"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(5, "I2C4_SMBA"),
+ STM32_FUNCTION(13, "FMC_D18"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(123, "PH11"),
+ STM32_FUNCTION(0, "GPIOH11"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(5, "I2C4_SCL"),
+ STM32_FUNCTION(13, "FMC_D19"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(124, "PH12"),
+ STM32_FUNCTION(0, "GPIOH12"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(5, "I2C4_SDA"),
+ STM32_FUNCTION(13, "FMC_D20"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(125, "PH13"),
+ STM32_FUNCTION(0, "GPIOH13"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "FMC_D21"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(126, "PH14"),
+ STM32_FUNCTION(0, "GPIOH14"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D22"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(127, "PH15"),
+ STM32_FUNCTION(0, "GPIOH15"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(10, "CAN1_TXFD"),
+ STM32_FUNCTION(13, "FMC_D23"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(128, "PI0"),
+ STM32_FUNCTION(0, "GPIOI0"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(10, "CAN1_RXFD"),
+ STM32_FUNCTION(13, "FMC_D24"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(129, "PI1"),
+ STM32_FUNCTION(0, "GPIOI1"),
+ STM32_FUNCTION(4, "TIM8_BKIN2"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(12, "TIM8_BKIN2_COMP12"),
+ STM32_FUNCTION(13, "FMC_D25"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(130, "PI2"),
+ STM32_FUNCTION(0, "GPIOI2"),
+ STM32_FUNCTION(4, "TIM8_CH4"),
+ STM32_FUNCTION(6, "SPI2_MISO I2S2_SDI"),
+ STM32_FUNCTION(13, "FMC_D26"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(131, "PI3"),
+ STM32_FUNCTION(0, "GPIOI3"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SDO"),
+ STM32_FUNCTION(13, "FMC_D27"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(132, "PI4"),
+ STM32_FUNCTION(0, "GPIOI4"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(11, "SAI2_MCK_A"),
+ STM32_FUNCTION(12, "TIM8_BKIN_COMP12"),
+ STM32_FUNCTION(13, "FMC_NBL2"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(133, "PI5"),
+ STM32_FUNCTION(0, "GPIOI5"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(11, "SAI2_SCK_A"),
+ STM32_FUNCTION(13, "FMC_NBL3"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(134, "PI6"),
+ STM32_FUNCTION(0, "GPIOI6"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(11, "SAI2_SD_A"),
+ STM32_FUNCTION(13, "FMC_D28"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(135, "PI7"),
+ STM32_FUNCTION(0, "GPIOI7"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(11, "SAI2_FS_A"),
+ STM32_FUNCTION(13, "FMC_D29"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(136, "PI8"),
+ STM32_FUNCTION(0, "GPIOI8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(137, "PI9"),
+ STM32_FUNCTION(0, "GPIOI9"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D30"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(138, "PI10"),
+ STM32_FUNCTION(0, "GPIOI10"),
+ STM32_FUNCTION(10, "CAN1_RXFD"),
+ STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+ STM32_FUNCTION(13, "FMC_D31"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(139, "PI11"),
+ STM32_FUNCTION(0, "GPIOI11"),
+ STM32_FUNCTION(10, "LCD_G6"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(140, "PI12"),
+ STM32_FUNCTION(0, "GPIOI12"),
+ STM32_FUNCTION(12, "ETH_TX_ER"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(141, "PI13"),
+ STM32_FUNCTION(0, "GPIOI13"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(142, "PI14"),
+ STM32_FUNCTION(0, "GPIOI14"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(143, "PI15"),
+ STM32_FUNCTION(0, "GPIOI15"),
+ STM32_FUNCTION(10, "LCD_G2"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(144, "PJ0"),
+ STM32_FUNCTION(0, "GPIOJ0"),
+ STM32_FUNCTION(10, "LCD_R7"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(145, "PJ1"),
+ STM32_FUNCTION(0, "GPIOJ1"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(146, "PJ2"),
+ STM32_FUNCTION(0, "GPIOJ2"),
+ STM32_FUNCTION(14, "DSI_TE"),
+ STM32_FUNCTION(15, "LCD_R3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(147, "PJ3"),
+ STM32_FUNCTION(0, "GPIOJ3"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(148, "PJ4"),
+ STM32_FUNCTION(0, "GPIOJ4"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(149, "PJ5"),
+ STM32_FUNCTION(0, "GPIOJ5"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(150, "PJ6"),
+ STM32_FUNCTION(0, "GPIOJ6"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(151, "PJ7"),
+ STM32_FUNCTION(0, "GPIOJ7"),
+ STM32_FUNCTION(1, "TRGIN"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(152, "PJ8"),
+ STM32_FUNCTION(0, "GPIOJ8"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(9, "UART8_TX"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(153, "PJ9"),
+ STM32_FUNCTION(0, "GPIOJ9"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(9, "UART8_RX"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(154, "PJ10"),
+ STM32_FUNCTION(0, "GPIOJ10"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(155, "PJ11"),
+ STM32_FUNCTION(0, "GPIOJ11"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(156, "PJ12"),
+ STM32_FUNCTION(0, "GPIOJ12"),
+ STM32_FUNCTION(1, "TRGOUT"),
+ STM32_FUNCTION(10, "LCD_G3"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(157, "PJ13"),
+ STM32_FUNCTION(0, "GPIOJ13"),
+ STM32_FUNCTION(10, "LCD_B4"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(158, "PJ14"),
+ STM32_FUNCTION(0, "GPIOJ14"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(159, "PJ15"),
+ STM32_FUNCTION(0, "GPIOJ15"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(160, "PK0"),
+ STM32_FUNCTION(0, "GPIOK0"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(161, "PK1"),
+ STM32_FUNCTION(0, "GPIOK1"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(162, "PK2"),
+ STM32_FUNCTION(0, "GPIOK2"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(11, "TIM8_BKIN_COMP12"),
+ STM32_FUNCTION(12, "TIM1_BKIN_COMP12"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(163, "PK3"),
+ STM32_FUNCTION(0, "GPIOK3"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(164, "PK4"),
+ STM32_FUNCTION(0, "GPIOK4"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(165, "PK5"),
+ STM32_FUNCTION(0, "GPIOK5"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(166, "PK6"),
+ STM32_FUNCTION(0, "GPIOK6"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(167, "PK7"),
+ STM32_FUNCTION(0, "GPIOK7"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+};
+
+static struct stm32_pinctrl_match_data stm32h743_match_data = {
+ .pins = stm32h743_pins,
+ .npins = ARRAY_SIZE(stm32h743_pins),
+};
+
+static const struct of_device_id stm32h743_pctrl_match[] = {
+ {
+ .compatible = "st,stm32h743-pinctrl",
+ .data = &stm32h743_match_data,
+ },
+ { }
+};
+
+static struct platform_driver stm32h743_pinctrl_driver = {
+ .probe = stm32_pctl_probe,
+ .driver = {
+ .name = "stm32h743-pinctrl",
+ .of_match_table = stm32h743_pctrl_match,
+ },
+};
+
+builtin_platform_driver(stm32h743_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index bff1ffc6f01e..816015cf7053 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -9,26 +9,14 @@ config PINCTRL_SUN4I_A10
def_bool MACH_SUN4I
select PINCTRL_SUNXI
-config PINCTRL_SUN5I_A10S
+config PINCTRL_SUN5I
def_bool MACH_SUN5I
select PINCTRL_SUNXI
-config PINCTRL_SUN5I_A13
- def_bool MACH_SUN5I
- select PINCTRL_SUNXI
-
-config PINCTRL_GR8
- def_bool MACH_SUN5I
- select PINCTRL_SUNXI_COMMON
-
config PINCTRL_SUN6I_A31
def_bool MACH_SUN6I
select PINCTRL_SUNXI
-config PINCTRL_SUN6I_A31S
- def_bool MACH_SUN6I
- select PINCTRL_SUNXI
-
config PINCTRL_SUN6I_A31_R
def_bool MACH_SUN6I
depends on RESET_CONTROLLER
@@ -63,6 +51,10 @@ config PINCTRL_SUN8I_H3_R
def_bool MACH_SUN8I
select PINCTRL_SUNXI_COMMON
+config PINCTRL_SUN8I_V3S
+ def_bool MACH_SUN8I
+ select PINCTRL_SUNXI
+
config PINCTRL_SUN9I_A80
def_bool MACH_SUN9I
select PINCTRL_SUNXI
@@ -76,4 +68,8 @@ config PINCTRL_SUN50I_A64
bool
select PINCTRL_SUNXI
+config PINCTRL_SUN50I_H5
+ bool
+ select PINCTRL_SUNXI
+
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 95f93d0561fc..04ccb88ebd5f 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -3,11 +3,8 @@ obj-y += pinctrl-sunxi.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_SUN4I_A10) += pinctrl-sun4i-a10.o
-obj-$(CONFIG_PINCTRL_SUN5I_A10S) += pinctrl-sun5i-a10s.o
-obj-$(CONFIG_PINCTRL_SUN5I_A13) += pinctrl-sun5i-a13.o
-obj-$(CONFIG_PINCTRL_GR8) += pinctrl-gr8.o
+obj-$(CONFIG_PINCTRL_SUN5I) += pinctrl-sun5i.o
obj-$(CONFIG_PINCTRL_SUN6I_A31) += pinctrl-sun6i-a31.o
-obj-$(CONFIG_PINCTRL_SUN6I_A31S) += pinctrl-sun6i-a31s.o
obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
@@ -17,5 +14,7 @@ obj-$(CONFIG_PINCTRL_SUN50I_A64) += pinctrl-sun50i-a64.o
obj-$(CONFIG_PINCTRL_SUN8I_A83T) += pinctrl-sun8i-a83t.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
+obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
+obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-gr8.c b/drivers/pinctrl/sunxi/pinctrl-gr8.c
deleted file mode 100644
index 2f232c3a0579..000000000000
--- a/drivers/pinctrl/sunxi/pinctrl-gr8.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * NextThing GR8 SoCs pinctrl driver.
- *
- * Copyright (C) 2016 Mylene Josserand
- *
- * Based on pinctrl-sun5i-a13.c
- *
- * Mylene Josserand <mylene.josserand@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include "pinctrl-sunxi.h"
-
-static const struct sunxi_desc_pin sun5i_gr8_pins[] = {
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm0"),
- SUNXI_FUNCTION(0x3, "spdif"), /* DO */
- SUNXI_FUNCTION_IRQ(0x6, 16)), /* EINT16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 17)), /* EINT17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 18)), /* EINT18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
- SUNXI_FUNCTION_IRQ(0x6, 19)), /* EINT19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
- SUNXI_FUNCTION_IRQ(0x6, 20)), /* EINT20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
- SUNXI_FUNCTION_IRQ(0x6, 21)), /* EINT21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DO */
- SUNXI_FUNCTION_IRQ(0x6, 22)), /* EINT22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DI */
- SUNXI_FUNCTION(0x3, "spdif"), /* DI */
- SUNXI_FUNCTION_IRQ(0x6, 23)), /* EINT23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
- SUNXI_FUNCTION(0x3, "spdif"), /* DO */
- SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
- SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
- SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
- SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
- SUNXI_FUNCTION_IRQ(0x6, 26)), /* EINT26 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
- SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
- SUNXI_FUNCTION_IRQ(0x6, 27)), /* EINT27 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
- SUNXI_FUNCTION(0x3, "jtag"), /* DI0 */
- SUNXI_FUNCTION_IRQ(0x6, 28)), /* EINT28 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE1 */
- SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NRE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ4 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ5 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ6 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQS */
- SUNXI_FUNCTION(0x3, "uart2"), /* RX */
- SUNXI_FUNCTION(0x4, "uart3")), /* RTS */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
- SUNXI_FUNCTION(0x3, "uart2")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
- SUNXI_FUNCTION(0x3, "uart2")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
- SUNXI_FUNCTION(0x3, "uart2")), /* CTS */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
- SUNXI_FUNCTION(0x3, "uart2")), /* RTS */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
- SUNXI_FUNCTION(0x3, "emac")), /* ECRS */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
- SUNXI_FUNCTION(0x3, "emac")), /* ECOL */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXERR */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXDV */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXEN */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXERR*/
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
- SUNXI_FUNCTION(0x3, "emac")), /* EMDC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
- SUNXI_FUNCTION(0x3, "emac")), /* EMDIO */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "ts0"), /* CLK */
- SUNXI_FUNCTION(0x3, "csi0"), /* PCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CS0 */
- SUNXI_FUNCTION_IRQ(0x6, 14)), /* EINT14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "ts0"), /* ERR */
- SUNXI_FUNCTION(0x3, "csi0"), /* MCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 15)), /* EINT15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "ts0"), /* SYNC */
- SUNXI_FUNCTION(0x3, "csi0"), /* HSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* DVLD */
- SUNXI_FUNCTION(0x3, "csi0"), /* VSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D0 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D0 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D1 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D1 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D2 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D2 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D3 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D3 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D4 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D4 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D5 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D5 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D6 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D6 */
- SUNXI_FUNCTION(0x4, "uart1")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D7 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D7 */
- SUNXI_FUNCTION(0x4, "uart1")), /* RX */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
- SUNXI_FUNCTION(0x4, "jtag")), /* MS1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
- SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart0")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
- SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart0")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
- SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "gps"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 0)), /* EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "gps"), /* SIGN */
- SUNXI_FUNCTION_IRQ(0x6, 1)), /* EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "gps"), /* MAG */
- SUNXI_FUNCTION_IRQ(0x6, 2)), /* EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
- SUNXI_FUNCTION(0x3, "ms"), /* BS */
- SUNXI_FUNCTION(0x4, "uart1"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 3)), /* EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
- SUNXI_FUNCTION(0x3, "ms"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart1"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 4)), /* EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
- SUNXI_FUNCTION(0x3, "ms"), /* D0 */
- SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 5)), /* EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
- SUNXI_FUNCTION(0x3, "ms"), /* D1 */
- SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
- SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 6)), /* EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
- SUNXI_FUNCTION(0x3, "ms"), /* D2 */
- SUNXI_FUNCTION(0x5, "uart2"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 7)), /* EINT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
- SUNXI_FUNCTION(0x3, "ms"), /* D3 */
- SUNXI_FUNCTION(0x5, "uart2"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 8)), /* EINT8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x3, "uart3"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 9)), /* EINT9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x3, "uart3"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 10)), /* EINT10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 11)), /* EINT11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "pwm1"),
- SUNXI_FUNCTION(0x5, "uart2"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */
-};
-
-static const struct sunxi_pinctrl_desc sun5i_gr8_pinctrl_data = {
- .pins = sun5i_gr8_pins,
- .npins = ARRAY_SIZE(sun5i_gr8_pins),
- .irq_banks = 1,
-};
-
-static int sun5i_gr8_pinctrl_probe(struct platform_device *pdev)
-{
- return sunxi_pinctrl_init(pdev,
- &sun5i_gr8_pinctrl_data);
-}
-
-static const struct of_device_id sun5i_gr8_pinctrl_match[] = {
- { .compatible = "nextthing,gr8-pinctrl", },
- {}
-};
-
-static struct platform_driver sun5i_gr8_pinctrl_driver = {
- .probe = sun5i_gr8_pinctrl_probe,
- .driver = {
- .name = "gr8-pinctrl",
- .of_match_table = sun5i_gr8_pinctrl_match,
- },
-};
-builtin_platform_driver(sun5i_gr8_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
new file mode 100644
index 000000000000..ccf9419e9418
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -0,0 +1,558 @@
+/*
+ * Allwinner H5 SoC pinctrl driver.
+ *
+ * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ * Based on pinctrl-sun8i-h3.c, which is:
+ * Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com>
+ *
+ * Based on pinctrl-sun8i-a23.c, which is:
+ * Copyright (C) 2014 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun50i_h5_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x3, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "sim"), /* PWREN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "sim"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "sim"), /* DATA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "sim"), /* RST */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "sim"), /* DET */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x3, "di"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x3, "di"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* OUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* SYNC */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 19)), /* PA_EINT19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DOUT */
+ SUNXI_FUNCTION(0x3, "sim"), /* VPPEN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 20)), /* PA_EINT20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DIN */
+ SUNXI_FUNCTION(0x3, "sim"), /* VPPPP */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 21)), /* PA_EINT21 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* WE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
+ SUNXI_FUNCTION(0x3, "spi0"), /* MISO */
+ SUNXI_FUNCTION(0x4, "mmc2")), /* DS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE1 */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE0 */
+ SUNXI_FUNCTION(0x4, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RE */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* RB1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* RXD3 */
+ SUNXI_FUNCTION(0x3, "di"), /* TX */
+ SUNXI_FUNCTION(0x4, "ts2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* RXD2 */
+ SUNXI_FUNCTION(0x3, "di"), /* RX */
+ SUNXI_FUNCTION(0x4, "ts2")), /* ERR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* RXD1 */
+ SUNXI_FUNCTION(0x4, "ts2")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* RXD0 */
+ SUNXI_FUNCTION(0x4, "ts2")), /* DVLD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* RXCK */
+ SUNXI_FUNCTION(0x4, "ts2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* RXCTL/RXDV */
+ SUNXI_FUNCTION(0x4, "ts2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* RXERR */
+ SUNXI_FUNCTION(0x4, "ts2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* TXD3 */
+ SUNXI_FUNCTION(0x4, "ts2"), /* D3 */
+ SUNXI_FUNCTION(0x5, "ts3")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* TXD2 */
+ SUNXI_FUNCTION(0x4, "ts2"), /* D4 */
+ SUNXI_FUNCTION(0x5, "ts3")), /* ERR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* TXD1 */
+ SUNXI_FUNCTION(0x4, "ts2"), /* D5 */
+ SUNXI_FUNCTION(0x5, "ts3")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* TXD0 */
+ SUNXI_FUNCTION(0x4, "ts2"), /* D6 */
+ SUNXI_FUNCTION(0x5, "ts3")), /* DVLD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* CRS */
+ SUNXI_FUNCTION(0x4, "ts2"), /* D7 */
+ SUNXI_FUNCTION(0x5, "ts3")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* TXCK */
+ SUNXI_FUNCTION(0x4, "sim")), /* PWREN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION(0x4, "sim")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* TXERR */
+ SUNXI_FUNCTION(0x4, "sim")), /* DATA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* CLKIN/COL */
+ SUNXI_FUNCTION(0x4, "sim")), /* RST */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* MDC */
+ SUNXI_FUNCTION(0x4, "sim")), /* DET */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac")), /* MDIO */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "ts0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "ts0")), /* ERR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "ts0")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "ts0")), /* DVLD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D0 */
+ SUNXI_FUNCTION(0x3, "ts0")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D1 */
+ SUNXI_FUNCTION(0x3, "ts0")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D2 */
+ SUNXI_FUNCTION(0x3, "ts0")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D3 */
+ SUNXI_FUNCTION(0x3, "ts0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "ts1")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D4 */
+ SUNXI_FUNCTION(0x3, "ts0"), /* D4 */
+ SUNXI_FUNCTION(0x4, "ts1")), /* ERR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D5 */
+ SUNXI_FUNCTION(0x3, "ts0"), /* D5 */
+ SUNXI_FUNCTION(0x4, "ts1")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D6 */
+ SUNXI_FUNCTION(0x3, "ts0"), /* D6 */
+ SUNXI_FUNCTION(0x4, "ts1")), /* DVLD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D7 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D7 */
+ SUNXI_FUNCTION(0x4, "ts1")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SCK */
+ SUNXI_FUNCTION(0x3, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SDA */
+ SUNXI_FUNCTION(0x3, "i2c2")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "sim")), /* VPPEN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "sim")), /* VPPPP */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PF_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PF_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PF_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PF_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PF_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PF_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PF_EINT6 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PG_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PG_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PG_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PG_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PG_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PG_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PG_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PG_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PG_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PG_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PG_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PG_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* DOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PG_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PG_EINT13 */
+};
+
+static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
+ .pins = sun50i_h5_pins,
+ .npins = ARRAY_SIZE(sun50i_h5_pins),
+ .irq_banks = 2,
+ .irq_read_needs_mux = true
+};
+
+static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun50i_h5_pinctrl_data);
+}
+
+static const struct of_device_id sun50i_h5_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-h5-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun50i_h5_pinctrl_driver = {
+ .probe = sun50i_h5_pinctrl_probe,
+ .driver = {
+ .name = "sun50i-h5-pinctrl",
+ .of_match_table = sun50i_h5_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun50i_h5_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
deleted file mode 100644
index 8575f3f6d3dd..000000000000
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Allwinner A13 SoCs pinctrl driver.
- *
- * Copyright (C) 2014 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include "pinctrl-sunxi.h"
-
-static const struct sunxi_desc_pin sun5i_a13_pins[] = {
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm"),
- SUNXI_FUNCTION_IRQ(0x6, 16)), /* EINT16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 17)), /* EINT17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 18)), /* EINT18 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
- SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE1 */
- SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NRE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ4 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ5 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ6 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQS */
- SUNXI_FUNCTION(0x4, "uart3")), /* RTS */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D15 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* DE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* HSYNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* VSYNC */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x3, "csi0"), /* PCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CS0 */
- SUNXI_FUNCTION_IRQ(0x6, 14)), /* EINT14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x3, "csi0"), /* MCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 15)), /* EINT15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x3, "csi0"), /* HSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* VSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D0 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D1 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D2 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D3 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D4 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D5 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D6 */
- SUNXI_FUNCTION(0x4, "uart1")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D7 */
- SUNXI_FUNCTION(0x4, "uart1")), /* RX */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D2 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION_IRQ(0x6, 0)), /* EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION_IRQ(0x6, 1)), /* EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION_IRQ(0x6, 2)), /* EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
- SUNXI_FUNCTION(0x4, "uart1"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 3)), /* EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart1"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 4)), /* EINT4 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x3, "uart3"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 9)), /* EINT9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x3, "uart3"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 10)), /* EINT10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 11)), /* EINT11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
-};
-
-static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = {
- .pins = sun5i_a13_pins,
- .npins = ARRAY_SIZE(sun5i_a13_pins),
- .irq_banks = 1,
-};
-
-static int sun5i_a13_pinctrl_probe(struct platform_device *pdev)
-{
- return sunxi_pinctrl_init(pdev,
- &sun5i_a13_pinctrl_data);
-}
-
-static const struct of_device_id sun5i_a13_pinctrl_match[] = {
- { .compatible = "allwinner,sun5i-a13-pinctrl", },
- {}
-};
-
-static struct platform_driver sun5i_a13_pinctrl_driver = {
- .probe = sun5i_a13_pinctrl_probe,
- .driver = {
- .name = "sun5i-a13-pinctrl",
- .of_match_table = sun5i_a13_pinctrl_match,
- },
-};
-builtin_platform_driver(sun5i_a13_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
index a5b57fdff9e1..47afd558b114 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
@@ -1,9 +1,8 @@
/*
- * Allwinner A10s SoCs pinctrl driver.
+ * Allwinner sun5i SoCs pinctrl driver.
*
- * Copyright (C) 2014 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2014-2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2016 Mylene Josserand <mylene.josserand@free-electrons.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -18,115 +17,133 @@
#include "pinctrl-sunxi.h"
-static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
+static const struct sunxi_desc_pin sun5i_pins[] = {
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 0),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD3 */
SUNXI_FUNCTION(0x3, "ts0"), /* CLK */
SUNXI_FUNCTION(0x5, "keypad")), /* IN0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 1),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD2 */
SUNXI_FUNCTION(0x3, "ts0"), /* ERR */
SUNXI_FUNCTION(0x5, "keypad")), /* IN1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 2),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD1 */
SUNXI_FUNCTION(0x3, "ts0"), /* SYNC */
SUNXI_FUNCTION(0x5, "keypad")), /* IN2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 3),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD0 */
SUNXI_FUNCTION(0x3, "ts0"), /* DLVD */
SUNXI_FUNCTION(0x5, "keypad")), /* IN3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 4),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD3 */
SUNXI_FUNCTION(0x3, "ts0"), /* D0 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 5),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD2 */
SUNXI_FUNCTION(0x3, "ts0"), /* D1 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 6),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD1 */
SUNXI_FUNCTION(0x3, "ts0"), /* D2 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 7),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD0 */
SUNXI_FUNCTION(0x3, "ts0"), /* D3 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 8),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXCK */
SUNXI_FUNCTION(0x3, "ts0"), /* D4 */
SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 9),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXERR */
SUNXI_FUNCTION(0x3, "ts0"), /* D5 */
SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 10),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXDV */
SUNXI_FUNCTION(0x3, "ts0"), /* D6 */
SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 11),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* EMDC */
SUNXI_FUNCTION(0x3, "ts0"), /* D7 */
SUNXI_FUNCTION(0x4, "uart1"), /* RING */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 12),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* EMDIO */
SUNXI_FUNCTION(0x3, "uart1"), /* TX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 13),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXEN */
SUNXI_FUNCTION(0x3, "uart1"), /* RX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 14),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXCK */
SUNXI_FUNCTION(0x3, "uart1"), /* CTS */
SUNXI_FUNCTION(0x4, "uart3"), /* TX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 15),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ECRS */
SUNXI_FUNCTION(0x3, "uart1"), /* RTS */
SUNXI_FUNCTION(0x4, "uart3"), /* RX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 16),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ECOL */
SUNXI_FUNCTION(0x3, "uart2")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 17),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXERR */
@@ -145,6 +162,9 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "pwm"), /* PWM0 */
+ SUNXI_FUNCTION_VARIANT(0x3,
+ "spdif", /* DO */
+ PINCTRL_SUN5I_GR8),
SUNXI_FUNCTION_IRQ(0x6, 16)), /* EINT16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -156,55 +176,70 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "ir0"), /* RX */
SUNXI_FUNCTION_IRQ(0x6, 18)), /* EINT18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 5),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* MCLK */
SUNXI_FUNCTION_IRQ(0x6, 19)), /* EINT19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 6),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* BCLK */
SUNXI_FUNCTION_IRQ(0x6, 20)), /* EINT20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 7),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* LRCK */
SUNXI_FUNCTION_IRQ(0x6, 21)), /* EINT21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 8),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* DO */
SUNXI_FUNCTION_IRQ(0x6, 22)), /* EINT22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 9),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* DI */
+ SUNXI_FUNCTION_VARIANT(0x3,
+ "spdif", /* DI */
+ PINCTRL_SUN5I_GR8),
SUNXI_FUNCTION_IRQ(0x6, 23)), /* EINT23 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
+ SUNXI_FUNCTION_VARIANT(0x3,
+ "spdif", /* DO */
+ PINCTRL_SUN5I_GR8),
SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 11),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 12),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
SUNXI_FUNCTION_IRQ(0x6, 26)), /* EINT26 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 13),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
SUNXI_FUNCTION_IRQ(0x6, 27)), /* EINT27 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 14),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
@@ -226,12 +261,14 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 19),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 19),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart0"), /* TX */
SUNXI_FUNCTION_IRQ(0x6, 29)), /* EINT29 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 20),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 20),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart0"), /* RX */
@@ -251,7 +288,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* SCK */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -315,17 +352,20 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 16),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NWP */
SUNXI_FUNCTION(0x4, "uart3")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 17),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE2 */
SUNXI_FUNCTION(0x4, "uart3")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 18),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE3 */
@@ -338,11 +378,13 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x3, "uart2"), /* RX */
SUNXI_FUNCTION(0x4, "uart3")), /* RTS */
/* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 0),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 1),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D1 */
@@ -376,11 +418,13 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
SUNXI_FUNCTION(0x3, "emac")), /* ECOL */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 8),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 9),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D9 */
@@ -414,11 +458,13 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
SUNXI_FUNCTION(0x3, "emac")), /* ERXERR */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 16),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 17),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D17 */
@@ -600,26 +646,30 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
SUNXI_FUNCTION(0x4, "uart1"), /* RX */
SUNXI_FUNCTION_IRQ(0x6, 4)), /* EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 5),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* DO */
SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
SUNXI_FUNCTION_IRQ(0x6, 5)), /* EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 6),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
SUNXI_FUNCTION_IRQ(0x6, 6)), /* EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 7),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
SUNXI_FUNCTION(0x5, "uart2"), /* TX */
SUNXI_FUNCTION_IRQ(0x6, 7)), /* EINT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 8),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
@@ -649,7 +699,8 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 13),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
@@ -658,28 +709,41 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */
};
-static const struct sunxi_pinctrl_desc sun5i_a10s_pinctrl_data = {
- .pins = sun5i_a10s_pins,
- .npins = ARRAY_SIZE(sun5i_a10s_pins),
+static const struct sunxi_pinctrl_desc sun5i_pinctrl_data = {
+ .pins = sun5i_pins,
+ .npins = ARRAY_SIZE(sun5i_pins),
.irq_banks = 1,
};
-static int sun5i_a10s_pinctrl_probe(struct platform_device *pdev)
+static int sun5i_pinctrl_probe(struct platform_device *pdev)
{
- return sunxi_pinctrl_init(pdev,
- &sun5i_a10s_pinctrl_data);
+ unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ return sunxi_pinctrl_init_with_variant(pdev, &sun5i_pinctrl_data,
+ variant);
}
-static const struct of_device_id sun5i_a10s_pinctrl_match[] = {
- { .compatible = "allwinner,sun5i-a10s-pinctrl", },
- {}
+static const struct of_device_id sun5i_pinctrl_match[] = {
+ {
+ .compatible = "allwinner,sun5i-a10s-pinctrl",
+ .data = (void *)PINCTRL_SUN5I_A10S
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-pinctrl",
+ .data = (void *)PINCTRL_SUN5I_A13
+ },
+ {
+ .compatible = "nextthing,gr8-pinctrl",
+ .data = (void *)PINCTRL_SUN5I_GR8
+ },
+ { },
};
-static struct platform_driver sun5i_a10s_pinctrl_driver = {
- .probe = sun5i_a10s_pinctrl_probe,
+static struct platform_driver sun5i_pinctrl_driver = {
+ .probe = sun5i_pinctrl_probe,
.driver = {
- .name = "sun5i-a10s-pinctrl",
- .of_match_table = sun5i_a10s_pinctrl_match,
+ .name = "sun5i-pinctrl",
+ .of_match_table = sun5i_pinctrl_match,
},
};
-builtin_platform_driver(sun5i_a10s_pinctrl_driver);
+builtin_platform_driver(sun5i_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 9e58926bef37..951a25c18815 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -23,69 +23,79 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D0 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D0 */
SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD1 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D1 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D1 */
SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D2 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D2 */
SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D3 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D3 */
SUNXI_FUNCTION(0x4, "uart1"), /* RING */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD4 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D4 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D4 */
SUNXI_FUNCTION(0x4, "uart1"), /* TX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD5 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D5 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D5 */
SUNXI_FUNCTION(0x4, "uart1"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD6 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D6 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D6 */
SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD7 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D7 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D7 */
SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXCLK */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D8 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D8 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D9 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D9 */
SUNXI_FUNCTION(0x4, "mmc3"), /* CMD */
SUNXI_FUNCTION(0x5, "mmc2"), /* CMD */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
@@ -93,7 +103,8 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* GTXCLK */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D10 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D10 */
SUNXI_FUNCTION(0x4, "mmc3"), /* CLK */
SUNXI_FUNCTION(0x5, "mmc2"), /* CLK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
@@ -101,7 +112,8 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D11 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D11 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D0 */
SUNXI_FUNCTION(0x5, "mmc2"), /* D0 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
@@ -109,7 +121,8 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D12 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D12 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D1 */
SUNXI_FUNCTION(0x5, "mmc2"), /* D1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
@@ -117,7 +130,8 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D13 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D13 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D2 */
SUNXI_FUNCTION(0x5, "mmc2"), /* D2 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */
@@ -125,7 +139,8 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D14 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D14 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D3 */
SUNXI_FUNCTION(0x5, "mmc2"), /* D3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */
@@ -133,91 +148,104 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD4 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D15 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D15 */
SUNXI_FUNCTION(0x4, "clk_out_a"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD5 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D16 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D16 */
SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD6 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D17 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D17 */
SUNXI_FUNCTION(0x4, "dmic"), /* DIN */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD7 */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D18 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D18 */
SUNXI_FUNCTION(0x4, "clk_out_b"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXDV */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D19 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D19 */
SUNXI_FUNCTION(0x4, "pwm3"), /* Positive */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 19)), /* PA_EINT19 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 20),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXCLK */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D20 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D20 */
SUNXI_FUNCTION(0x4, "pwm3"), /* Negative */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 20)), /* PA_EINT20 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 21),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXERR */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D21 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D21 */
SUNXI_FUNCTION(0x4, "spi3"), /* CS0 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 21)), /* PA_EINT21 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 22),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D22 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D22 */
SUNXI_FUNCTION(0x4, "spi3"), /* CLK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 22)), /* PA_EINT22 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 23),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* COL */
- SUNXI_FUNCTION(0x3, "lcd1"), /* D23 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* D23 */
SUNXI_FUNCTION(0x4, "spi3"), /* MOSI */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 23)), /* PA_EINT23 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 24),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* CRS */
- SUNXI_FUNCTION(0x3, "lcd1"), /* CLK */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* CLK */
SUNXI_FUNCTION(0x4, "spi3"), /* MISO */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 24)), /* PA_EINT24 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 25),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* CLKIN */
- SUNXI_FUNCTION(0x3, "lcd1"), /* DE */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* DE */
SUNXI_FUNCTION(0x4, "spi3"), /* CS1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 25)), /* PA_EINT25 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 26),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* MDC */
- SUNXI_FUNCTION(0x3, "lcd1"), /* HSYNC */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* HSYNC */
SUNXI_FUNCTION(0x4, "clk_out_c"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 26)), /* PA_EINT26 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 27),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* MDIO */
- SUNXI_FUNCTION(0x3, "lcd1"), /* VSYNC */
+ SUNXI_FUNCTION_VARIANT(0x3, "lcd1",
+ PINCTRL_SUN6I_A31), /* VSYNC */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 27)), /* PA_EINT27 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
@@ -225,7 +253,8 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
- SUNXI_FUNCTION(0x4, "csi"), /* MCLK1 */
+ SUNXI_FUNCTION_VARIANT(0x4, "csi",
+ PINCTRL_SUN6I_A31), /* MCLK1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PB_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -355,42 +384,43 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
SUNXI_FUNCTION(0x3, "mmc2"), /* D7 */
SUNXI_FUNCTION(0x4, "mmc3")), /* D7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ /* Hole in pin numbering for A31s */
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 16), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ8 */
SUNXI_FUNCTION(0x3, "nand1")), /* DQ0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 17), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ9 */
SUNXI_FUNCTION(0x3, "nand1")), /* DQ1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 18), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ10 */
SUNXI_FUNCTION(0x3, "nand1")), /* DQ2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 19), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ11 */
SUNXI_FUNCTION(0x3, "nand1")), /* DQ3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 20),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 20), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ12 */
SUNXI_FUNCTION(0x3, "nand1")), /* DQ4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 21),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 21), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ13 */
SUNXI_FUNCTION(0x3, "nand1")), /* DQ5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 22),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 22), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ14 */
SUNXI_FUNCTION(0x3, "nand1")), /* DQ6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 23),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 23), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* DQ15 */
@@ -468,52 +498,62 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP0 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VP0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN0 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VN0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP1 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VP1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN1 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VN1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP2 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VP2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN2 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VN2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VPC */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VPC */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VNC */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VNC */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP3 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VP3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN3 */
+ SUNXI_FUNCTION_VARIANT(0x3, "lvds1",
+ PINCTRL_SUN6I_A31)), /* VN3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -643,7 +683,7 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x2, "csi"), /* D11 */
SUNXI_FUNCTION(0x3, "ts"), /* D7 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PE_EINT15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(E, 16), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* MIPI CSI MCLK */
@@ -734,13 +774,15 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
- SUNXI_FUNCTION(0x3, "usb"), /* DP3 */
+ SUNXI_FUNCTION_VARIANT(0x3, "usb",
+ PINCTRL_SUN6I_A31), /* DP3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PG_EINT10 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
- SUNXI_FUNCTION(0x3, "usb"), /* DM3 */
+ SUNXI_FUNCTION_VARIANT(0x3, "usb",
+ PINCTRL_SUN6I_A31), /* DM3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PG_EINT11 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -782,40 +824,40 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart4"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 18)), /* PG_EINT18 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+ /* Hole; H starts at pin 9 for A31s */
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 0), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* WE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 1), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* ALE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 2), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* CLE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 3), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* CE1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 4), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* CE0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 5), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* RE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 6), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* RB0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 7), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* RB1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 8), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* DQS */
@@ -908,11 +950,12 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
/* Undocumented mux function - see above */
SUNXI_FUNCTION(0x3, "spdif")), /* SPDIF OUT */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 29),
+ /* 2 extra pins for A31 */
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 29), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* CE2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 30),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(H, 30), PINCTRL_SUN6I_A31,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand1")), /* CE3 */
@@ -926,12 +969,23 @@ static const struct sunxi_pinctrl_desc sun6i_a31_pinctrl_data = {
static int sun6i_a31_pinctrl_probe(struct platform_device *pdev)
{
- return sunxi_pinctrl_init(pdev,
- &sun6i_a31_pinctrl_data);
+ unsigned long variant =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ return sunxi_pinctrl_init_with_variant(pdev,
+ &sun6i_a31_pinctrl_data,
+ variant);
}
static const struct of_device_id sun6i_a31_pinctrl_match[] = {
- { .compatible = "allwinner,sun6i-a31-pinctrl", },
+ {
+ .compatible = "allwinner,sun6i-a31-pinctrl",
+ .data = (void *)PINCTRL_SUN6I_A31
+ },
+ {
+ .compatible = "allwinner,sun6i-a31s-pinctrl",
+ .data = (void *)PINCTRL_SUN6I_A31S
+ },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
deleted file mode 100644
index 231a746a5356..000000000000
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c
+++ /dev/null
@@ -1,809 +0,0 @@
-/*
- * Allwinner A31s SoCs pinctrl driver.
- *
- * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com>
- *
- * Based on pinctrl-sun6i-a31.c, which is:
- * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include "pinctrl-sunxi.h"
-
-static const struct sunxi_desc_pin sun6i_a31s_pins[] = {
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */
- SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD1 */
- SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */
- SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */
- SUNXI_FUNCTION(0x4, "uart1"), /* RING */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD4 */
- SUNXI_FUNCTION(0x4, "uart1"), /* TX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD5 */
- SUNXI_FUNCTION(0x4, "uart1"), /* RX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD6 */
- SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXD7 */
- SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXCLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */
- SUNXI_FUNCTION(0x4, "mmc3"), /* CMD */
- SUNXI_FUNCTION(0x5, "mmc2"), /* CMD */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* GTXCLK */
- SUNXI_FUNCTION(0x4, "mmc3"), /* CLK */
- SUNXI_FUNCTION(0x5, "mmc2"), /* CLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */
- SUNXI_FUNCTION(0x4, "mmc3"), /* D0 */
- SUNXI_FUNCTION(0x5, "mmc2"), /* D0 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */
- SUNXI_FUNCTION(0x4, "mmc3"), /* D1 */
- SUNXI_FUNCTION(0x5, "mmc2"), /* D1 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */
- SUNXI_FUNCTION(0x4, "mmc3"), /* D2 */
- SUNXI_FUNCTION(0x5, "mmc2"), /* D2 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */
- SUNXI_FUNCTION(0x4, "mmc3"), /* D3 */
- SUNXI_FUNCTION(0x5, "mmc2"), /* D3 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD4 */
- SUNXI_FUNCTION(0x4, "clk_out_a"),
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD5 */
- SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD6 */
- SUNXI_FUNCTION(0x4, "dmic"), /* DIN */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXD7 */
- SUNXI_FUNCTION(0x4, "clk_out_b"),
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXDV */
- SUNXI_FUNCTION(0x4, "pwm3"), /* Positive */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 19)), /* PA_EINT19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXCLK */
- SUNXI_FUNCTION(0x4, "pwm3"), /* Negative */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 20)), /* PA_EINT20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* TXERR */
- SUNXI_FUNCTION(0x4, "spi3"), /* CS0 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 21)), /* PA_EINT21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */
- SUNXI_FUNCTION(0x4, "spi3"), /* CLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 22)), /* PA_EINT22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* COL */
- SUNXI_FUNCTION(0x4, "spi3"), /* MOSI */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 23)), /* PA_EINT23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* CRS */
- SUNXI_FUNCTION(0x4, "spi3"), /* MISO */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 24)), /* PA_EINT24 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* CLKIN */
- SUNXI_FUNCTION(0x4, "spi3"), /* CS1 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 25)), /* PA_EINT25 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* MDC */
- SUNXI_FUNCTION(0x4, "clk_out_c"),
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 26)), /* PA_EINT26 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "gmac"), /* MDIO */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 27)), /* PA_EINT27 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
- SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PB_EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PB_EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PB_EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DO0 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PB_EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DO1 */
- SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PB_EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DO2 */
- SUNXI_FUNCTION(0x3, "uart3"), /* TX */
- SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PB_EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DO3 */
- SUNXI_FUNCTION(0x3, "uart3"), /* RX */
- SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PB_EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2s0"), /* DI */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* PB_EINT7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* WE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* CE1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* RE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
- SUNXI_FUNCTION(0x4, "mmc3")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* RB1 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
- SUNXI_FUNCTION(0x4, "mmc3")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D4 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D5 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D6 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
- SUNXI_FUNCTION(0x3, "mmc2"), /* D7 */
- SUNXI_FUNCTION(0x4, "mmc3")), /* D7 */
- /* Hole in pin numbering ! */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
- SUNXI_FUNCTION(0x3, "mmc2"), /* RST */
- SUNXI_FUNCTION(0x4, "mmc3")), /* RST */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* DE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* HSYNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* VSYNC */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
- SUNXI_FUNCTION(0x3, "ts"), /* CLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PE_EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
- SUNXI_FUNCTION(0x3, "ts"), /* ERR */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PE_EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
- SUNXI_FUNCTION(0x3, "ts"), /* SYNC */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PE_EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
- SUNXI_FUNCTION(0x3, "ts"), /* DVLD */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PE_EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D0 */
- SUNXI_FUNCTION(0x3, "uart5"), /* TX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PE_EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D1 */
- SUNXI_FUNCTION(0x3, "uart5"), /* RX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PE_EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D2 */
- SUNXI_FUNCTION(0x3, "uart5"), /* RTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PE_EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D3 */
- SUNXI_FUNCTION(0x3, "uart5"), /* CTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PE_EINT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D4 */
- SUNXI_FUNCTION(0x3, "ts"), /* D0 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PE_EINT8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D5 */
- SUNXI_FUNCTION(0x3, "ts"), /* D1 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PE_EINT9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D6 */
- SUNXI_FUNCTION(0x3, "ts"), /* D2 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PE_EINT10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D7 */
- SUNXI_FUNCTION(0x3, "ts"), /* D3 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PE_EINT11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D8 */
- SUNXI_FUNCTION(0x3, "ts"), /* D4 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PE_EINT12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D9 */
- SUNXI_FUNCTION(0x3, "ts"), /* D5 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PE_EINT13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D10 */
- SUNXI_FUNCTION(0x3, "ts"), /* D6 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)), /* PE_EINT14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi"), /* D11 */
- SUNXI_FUNCTION(0x3, "ts"), /* D7 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PE_EINT15 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
- SUNXI_FUNCTION(0x4, "jtag")), /* MS1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
- SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart0")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
- SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart0")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
- SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)), /* PG_EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)), /* PG_EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)), /* PG_EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)), /* PG_EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)), /* PG_EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)), /* PG_EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* TX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)), /* PG_EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* RX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)), /* PG_EINT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)), /* PG_EINT8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)), /* PG_EINT9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PG_EINT10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PG_EINT11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "i2s1"), /* MCLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 12)), /* PG_EINT12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x3, "i2s1"), /* BCLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 13)), /* PG_EINT13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x3, "i2s1"), /* LRCK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 14)), /* PG_EINT14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x3, "i2s1"), /* DIN */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 15)), /* PG_EINT15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x3, "i2s1"), /* DOUT */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 16)), /* PG_EINT16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart4"), /* TX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 17)), /* PG_EINT17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart4"), /* RX */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 18)), /* PG_EINT18 */
- /* Hole, note H starts at pin 9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
- SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
- SUNXI_FUNCTION(0x4, "pwm1")), /* Positive */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
- SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
- SUNXI_FUNCTION(0x4, "pwm1")), /* Negative */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
- SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
- SUNXI_FUNCTION(0x4, "pwm2")), /* Positive */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
- SUNXI_FUNCTION(0x3, "jtag"), /* DI0 */
- SUNXI_FUNCTION(0x4, "pwm2")), /* Negative */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm0")),
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart0")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart0")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 28),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
-};
-
-static const struct sunxi_pinctrl_desc sun6i_a31s_pinctrl_data = {
- .pins = sun6i_a31s_pins,
- .npins = ARRAY_SIZE(sun6i_a31s_pins),
- .irq_banks = 4,
-};
-
-static int sun6i_a31s_pinctrl_probe(struct platform_device *pdev)
-{
- return sunxi_pinctrl_init(pdev,
- &sun6i_a31s_pinctrl_data);
-}
-
-static const struct of_device_id sun6i_a31s_pinctrl_match[] = {
- { .compatible = "allwinner,sun6i-a31s-pinctrl", },
- {}
-};
-
-static struct platform_driver sun6i_a31s_pinctrl_driver = {
- .probe = sun6i_a31s_pinctrl_probe,
- .driver = {
- .name = "sun6i-a31s-pinctrl",
- .of_match_table = sun6i_a31s_pinctrl_match,
- },
-};
-builtin_platform_driver(sun6i_a31s_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
new file mode 100644
index 000000000000..c86d3c42a905
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
@@ -0,0 +1,321 @@
+/*
+ * Allwinner V3s SoCs pinctrl driver.
+ *
+ * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ * Based on pinctrl-sun8i-h3.c, which is:
+ * Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com>
+ *
+ * Based on pinctrl-sun8i-a23.c, which is:
+ * Copyright (C) 2014 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_v3s_pins[] = {
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PB_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PB_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PB_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PB_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PB_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PB_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PB_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PB_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PB_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PB_EINT9 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* CLK */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* CMD */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* RST */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "lcd")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "lcd")), /* DE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "lcd")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "lcd")), /* VSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D0 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D1 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D8 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D9 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* FIELD */
+ SUNXI_FUNCTION(0x3, "csi_mipi")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SCK */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SDA */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "lcd"), /* D22 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "lcd"), /* D23 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* CTS */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* MS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* CK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PG_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PG_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PG_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PG_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PG_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PG_EINT5 */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_v3s_pinctrl_data = {
+ .pins = sun8i_v3s_pins,
+ .npins = ARRAY_SIZE(sun8i_v3s_pins),
+ .irq_banks = 2,
+ .irq_read_needs_mux = true
+};
+
+static int sun8i_v3s_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun8i_v3s_pinctrl_data);
+}
+
+static const struct of_device_id sun8i_v3s_pinctrl_match[] = {
+ { .compatible = "allwinner,sun8i-v3s-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun8i_v3s_pinctrl_driver = {
+ .probe = sun8i_v3s_pinctrl_probe,
+ .driver = {
+ .name = "sun8i-v3s-pinctrl",
+ .of_match_table = sun8i_v3s_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun8i_v3s_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0eb51e33cb1b..60e6e36c4a7e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -540,7 +540,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
enum pin_config_param param;
unsigned long flags;
u32 offset, shift, mask, reg;
- u16 arg, val;
+ u32 arg, val;
int ret;
param = pinconf_to_config_param(configs[i]);
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
val = arg / 10 - 1;
break;
case PIN_CONFIG_BIAS_DISABLE:
- val = 0;
- break;
+ continue;
case PIN_CONFIG_BIAS_PULL_UP:
if (arg == 0)
return -EINVAL;
@@ -1041,21 +1040,35 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev);
int i;
- pctl->ngroups = pctl->desc->npins;
+ /*
+ * Allocate groups
+ *
+ * We assume that the number of groups is the number of pins
+ * given in the data array.
- /* Allocate groups */
+ * This will not always be true, since some pins might not be
+ * available in the current variant, but fortunately for us,
+ * this means that the number of pins is the maximum group
+ * number we will ever see.
+ */
pctl->groups = devm_kzalloc(&pdev->dev,
- pctl->ngroups * sizeof(*pctl->groups),
+ pctl->desc->npins * sizeof(*pctl->groups),
GFP_KERNEL);
if (!pctl->groups)
return -ENOMEM;
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
- struct sunxi_pinctrl_group *group = pctl->groups + i;
+ struct sunxi_pinctrl_group *group = pctl->groups + pctl->ngroups;
+
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
group->name = pin->pin.name;
group->pin = pin->pin.number;
+
+ /* And now we count the actual number of pins / groups */
+ pctl->ngroups++;
}
/*
@@ -1063,17 +1076,23 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
* we'll reallocate that later anyway
*/
pctl->functions = devm_kzalloc(&pdev->dev,
- pctl->desc->npins * sizeof(*pctl->functions),
- GFP_KERNEL);
+ pctl->ngroups * sizeof(*pctl->functions),
+ GFP_KERNEL);
if (!pctl->functions)
return -ENOMEM;
/* Count functions and their associated groups */
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
- struct sunxi_desc_function *func = pin->functions;
+ struct sunxi_desc_function *func;
+
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
+
+ for (func = pin->functions; func->name; func++) {
+ if (func->variant && !(pctl->variant & func->variant))
+ continue;
- while (func->name) {
/* Create interrupt mapping while we're at it */
if (!strcmp(func->name, "irq")) {
int irqnum = func->irqnum + func->irqbank * IRQ_PER_BANK;
@@ -1081,22 +1100,32 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
}
sunxi_pinctrl_add_function(pctl, func->name);
- func++;
}
}
+ /* And now allocated and fill the array for real */
pctl->functions = krealloc(pctl->functions,
- pctl->nfunctions * sizeof(*pctl->functions),
- GFP_KERNEL);
+ pctl->nfunctions * sizeof(*pctl->functions),
+ GFP_KERNEL);
+ if (!pctl->functions) {
+ kfree(pctl->functions);
+ return -ENOMEM;
+ }
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
- struct sunxi_desc_function *func = pin->functions;
+ struct sunxi_desc_function *func;
- while (func->name) {
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
+
+ for (func = pin->functions; func->name; func++) {
struct sunxi_pinctrl_function *func_item;
const char **func_grp;
+ if (func->variant && !(pctl->variant & func->variant))
+ continue;
+
func_item = sunxi_pinctrl_find_function_by_name(pctl,
func->name);
if (!func_item)
@@ -1116,7 +1145,6 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
func_grp++;
*func_grp = pin->pin.name;
- func++;
}
}
@@ -1208,15 +1236,16 @@ static int sunxi_pinctrl_setup_debounce(struct sunxi_pinctrl *pctl,
return 0;
}
-int sunxi_pinctrl_init(struct platform_device *pdev,
- const struct sunxi_pinctrl_desc *desc)
+int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
+ const struct sunxi_pinctrl_desc *desc,
+ unsigned long variant)
{
struct device_node *node = pdev->dev.of_node;
struct pinctrl_desc *pctrl_desc;
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
struct resource *res;
- int i, ret, last_pin;
+ int i, ret, last_pin, pin_idx;
struct clk *clk;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
@@ -1233,6 +1262,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
+ pctl->variant = variant;
pctl->irq_array = devm_kcalloc(&pdev->dev,
IRQ_PER_BANK * pctl->desc->irq_banks,
@@ -1253,8 +1283,14 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
if (!pins)
return -ENOMEM;
- for (i = 0; i < pctl->desc->npins; i++)
- pins[i] = pctl->desc->pins[i].pin;
+ for (i = 0, pin_idx = 0; i < pctl->desc->npins; i++) {
+ const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
+
+ pins[pin_idx++] = pin->pin;
+ }
pctrl_desc = devm_kzalloc(&pdev->dev,
sizeof(*pctrl_desc),
@@ -1265,7 +1301,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctrl_desc->name = dev_name(&pdev->dev);
pctrl_desc->owner = THIS_MODULE;
pctrl_desc->pins = pins;
- pctrl_desc->npins = pctl->desc->npins;
+ pctrl_desc->npins = pctl->ngroups;
pctrl_desc->confops = &sunxi_pconf_ops;
pctrl_desc->pctlops = &sunxi_pctrl_ops;
pctrl_desc->pmxops = &sunxi_pmx_ops;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index f78a44a03189..e1aedd260b2e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -82,7 +82,14 @@
#define SUN4I_FUNC_INPUT 0
#define SUN4I_FUNC_IRQ 6
+#define PINCTRL_SUN5I_A10S BIT(1)
+#define PINCTRL_SUN5I_A13 BIT(2)
+#define PINCTRL_SUN5I_GR8 BIT(3)
+#define PINCTRL_SUN6I_A31 BIT(4)
+#define PINCTRL_SUN6I_A31S BIT(5)
+
struct sunxi_desc_function {
+ unsigned long variant;
const char *name;
u8 muxval;
u8 irqbank;
@@ -91,6 +98,7 @@ struct sunxi_desc_function {
struct sunxi_desc_pin {
struct pinctrl_pin_desc pin;
+ unsigned long variant;
struct sunxi_desc_function *functions;
};
@@ -128,6 +136,7 @@ struct sunxi_pinctrl {
unsigned *irq_array;
spinlock_t lock;
struct pinctrl_dev *pctl_dev;
+ unsigned long variant;
};
#define SUNXI_PIN(_pin, ...) \
@@ -137,12 +146,27 @@ struct sunxi_pinctrl {
__VA_ARGS__, { } }, \
}
+#define SUNXI_PIN_VARIANT(_pin, _variant, ...) \
+ { \
+ .pin = _pin, \
+ .variant = _variant, \
+ .functions = (struct sunxi_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
#define SUNXI_FUNCTION(_val, _name) \
{ \
.name = _name, \
.muxval = _val, \
}
+#define SUNXI_FUNCTION_VARIANT(_val, _name, _variant) \
+ { \
+ .name = _name, \
+ .muxval = _val, \
+ .variant = _variant, \
+ }
+
#define SUNXI_FUNCTION_IRQ(_val, _irq) \
{ \
.name = "irq", \
@@ -290,7 +314,11 @@ static inline u32 sunxi_irq_status_offset(u16 irq)
return irq_num * IRQ_STATUS_IRQ_BITS;
}
-int sunxi_pinctrl_init(struct platform_device *pdev,
- const struct sunxi_pinctrl_desc *desc);
+int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
+ const struct sunxi_pinctrl_desc *desc,
+ unsigned long variant);
+
+#define sunxi_pinctrl_init(_dev, _desc) \
+ sunxi_pinctrl_init_with_variant(_dev, _desc, 0)
#endif /* __PINCTRL_SUNXI_H */
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig
new file mode 100644
index 000000000000..815a88673d38
--- /dev/null
+++ b/drivers/pinctrl/ti/Kconfig
@@ -0,0 +1,10 @@
+config PINCTRL_TI_IODELAY
+ tristate "TI IODelay Module pinconf driver"
+ depends on OF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select REGMAP_MMIO
+ help
+ Say Y here to support Texas Instruments' IO delay pinconf driver.
+ IO delay module is used for the DRA7 SoC family.
diff --git a/drivers/pinctrl/ti/Makefile b/drivers/pinctrl/ti/Makefile
new file mode 100644
index 000000000000..913744e8b8fa
--- /dev/null
+++ b/drivers/pinctrl/ti/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PINCTRL_TI_IODELAY) += pinctrl-ti-iodelay.o
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
new file mode 100644
index 000000000000..717e3404900c
--- /dev/null
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -0,0 +1,937 @@
+/*
+ * Support for configuration of IO Delay module found on Texas Instruments SoCs
+ * such as DRA7
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../devicetree.h"
+
+#define DRIVER_NAME "ti-iodelay"
+
+/**
+ * struct ti_iodelay_reg_data - Describes the registers for the iodelay instance
+ * @signature_mask: CONFIG_REG mask for the signature bits (see TRM)
+ * @signature_value: CONFIG_REG signature value to be written (see TRM)
+ * @lock_mask: CONFIG_REG mask for the lock bits (see TRM)
+ * @lock_val: CONFIG_REG lock value for the lock bits (see TRM)
+ * @unlock_val:CONFIG_REG unlock value for the lock bits (see TRM)
+ * @binary_data_coarse_mask: CONFIG_REG coarse mask (see TRM)
+ * @binary_data_fine_mask: CONFIG_REG fine mask (see TRM)
+ * @reg_refclk_offset: Refclk register offset
+ * @refclk_period_mask: Refclk mask
+ * @reg_coarse_offset: Coarse register configuration offset
+ * @coarse_delay_count_mask: Coarse delay count mask
+ * @coarse_ref_count_mask: Coarse ref count mask
+ * @reg_fine_offset: Fine register configuration offset
+ * @fine_delay_count_mask: Fine delay count mask
+ * @fine_ref_count_mask: Fine ref count mask
+ * @reg_global_lock_offset: Global iodelay module lock register offset
+ * @global_lock_mask: Lock mask
+ * @global_unlock_val: Unlock value
+ * @global_lock_val: Lock value
+ * @reg_start_offset: Offset to iodelay registers after the CONFIG_REG_0 to 8
+ * @reg_nr_per_pin: Number of iodelay registers for each pin
+ * @regmap_config: Regmap configuration for the IODelay region
+ */
+struct ti_iodelay_reg_data {
+ u32 signature_mask;
+ u32 signature_value;
+ u32 lock_mask;
+ u32 lock_val;
+ u32 unlock_val;
+ u32 binary_data_coarse_mask;
+ u32 binary_data_fine_mask;
+
+ u32 reg_refclk_offset;
+ u32 refclk_period_mask;
+
+ u32 reg_coarse_offset;
+ u32 coarse_delay_count_mask;
+ u32 coarse_ref_count_mask;
+
+ u32 reg_fine_offset;
+ u32 fine_delay_count_mask;
+ u32 fine_ref_count_mask;
+
+ u32 reg_global_lock_offset;
+ u32 global_lock_mask;
+ u32 global_unlock_val;
+ u32 global_lock_val;
+
+ u32 reg_start_offset;
+ u32 reg_nr_per_pin;
+
+ struct regmap_config *regmap_config;
+};
+
+/**
+ * struct ti_iodelay_reg_values - Computed io_reg configuration values (see TRM)
+ * @coarse_ref_count: Coarse reference count
+ * @coarse_delay_count: Coarse delay count
+ * @fine_ref_count: Fine reference count
+ * @fine_delay_count: Fine Delay count
+ * @ref_clk_period: Reference Clock period
+ * @cdpe: Coarse delay parameter
+ * @fdpe: Fine delay parameter
+ */
+struct ti_iodelay_reg_values {
+ u16 coarse_ref_count;
+ u16 coarse_delay_count;
+
+ u16 fine_ref_count;
+ u16 fine_delay_count;
+
+ u16 ref_clk_period;
+
+ u32 cdpe;
+ u32 fdpe;
+};
+
+/**
+ * struct ti_iodelay_cfg - Description of each configuration parameters
+ * @offset: Configuration register offset
+ * @a_delay: Agnostic Delay (in ps)
+ * @g_delay: Gnostic Delay (in ps)
+ */
+struct ti_iodelay_cfg {
+ u16 offset;
+ u16 a_delay;
+ u16 g_delay;
+};
+
+/**
+ * struct ti_iodelay_pingroup - Structure that describes one group
+ * @cfg: configuration array for the pin (from dt)
+ * @ncfg: number of configuration values allocated
+ * @config: pinconf "Config" - currently a dummy value
+ */
+struct ti_iodelay_pingroup {
+ struct ti_iodelay_cfg *cfg;
+ int ncfg;
+ unsigned long config;
+};
+
+/**
+ * struct ti_iodelay_device - Represents information for a iodelay instance
+ * @dev: Device pointer
+ * @phys_base: Physical address base of the iodelay device
+ * @reg_base: Virtual address base of the iodelay device
+ * @regmap: Regmap for this iodelay instance
+ * @pctl: Pinctrl device
+ * @desc: pinctrl descriptor for pctl
+ * @pa: pinctrl pin wise description
+ * @reg_data: Register definition data for the IODelay instance
+ * @reg_init_conf_values: Initial configuration values.
+ */
+struct ti_iodelay_device {
+ struct device *dev;
+ unsigned long phys_base;
+ void __iomem *reg_base;
+ struct regmap *regmap;
+
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pa;
+
+ const struct ti_iodelay_reg_data *reg_data;
+ struct ti_iodelay_reg_values reg_init_conf_values;
+};
+
+/**
+ * ti_iodelay_extract() - extract bits for a field
+ * @val: Register value
+ * @mask: Mask
+ *
+ * Return: extracted value which is appropriately shifted
+ */
+static inline u32 ti_iodelay_extract(u32 val, u32 mask)
+{
+ return (val & mask) >> __ffs(mask);
+}
+
+/**
+ * ti_iodelay_compute_dpe() - Compute equation for delay parameter
+ * @period: Period to use
+ * @ref: Reference Count
+ * @delay: Delay count
+ * @delay_m: Delay multiplier
+ *
+ * Return: Computed delay parameter
+ */
+static inline u32 ti_iodelay_compute_dpe(u16 period, u16 ref, u16 delay,
+ u16 delay_m)
+{
+ u64 m, d;
+
+ /* Handle overflow conditions */
+ m = 10 * (u64)period * (u64)ref;
+ d = 2 * (u64)delay * (u64)delay_m;
+
+ /* Truncate result back to 32 bits */
+ return div64_u64(m, d);
+}
+
+/**
+ * ti_iodelay_pinconf_set() - Configure the pin configuration
+ * @iod: iodelay device
+ * @cfg: Configuration
+ *
+ * Update the configuration register as per TRM and lockup once done.
+ * *IMPORTANT NOTE* SoC TRM does recommend doing iodelay programmation only
+ * while in Isolation. But, then, isolation also implies that every pin
+ * on the SoC (including DDR) will be isolated out. The only benefit being
+ * a glitchless configuration, However, the intent of this driver is purely
+ * to support a "glitchy" configuration where applicable.
+ *
+ * Return: 0 in case of success, else appropriate error value
+ */
+static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod,
+ struct ti_iodelay_cfg *cfg)
+{
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+ struct ti_iodelay_reg_values *ival = &iod->reg_init_conf_values;
+ struct device *dev = iod->dev;
+ u32 g_delay_coarse, g_delay_fine;
+ u32 a_delay_coarse, a_delay_fine;
+ u32 c_elements, f_elements;
+ u32 total_delay;
+ u32 reg_mask, reg_val, tmp_val;
+ int r;
+
+ /* NOTE: Truncation is expected in all division below */
+ g_delay_coarse = cfg->g_delay / 920;
+ g_delay_fine = ((cfg->g_delay % 920) * 10) / 60;
+
+ a_delay_coarse = cfg->a_delay / ival->cdpe;
+ a_delay_fine = ((cfg->a_delay % ival->cdpe) * 10) / ival->fdpe;
+
+ c_elements = g_delay_coarse + a_delay_coarse;
+ f_elements = (g_delay_fine + a_delay_fine) / 10;
+
+ if (f_elements > 22) {
+ total_delay = c_elements * ival->cdpe + f_elements * ival->fdpe;
+ c_elements = total_delay / ival->cdpe;
+ f_elements = (total_delay % ival->cdpe) / ival->fdpe;
+ }
+
+ reg_mask = reg->signature_mask;
+ reg_val = reg->signature_value << __ffs(reg->signature_mask);
+
+ reg_mask |= reg->binary_data_coarse_mask;
+ tmp_val = c_elements << __ffs(reg->binary_data_coarse_mask);
+ if (tmp_val & ~reg->binary_data_coarse_mask) {
+ dev_err(dev, "Masking overflow of coarse elements %08x\n",
+ tmp_val);
+ tmp_val &= reg->binary_data_coarse_mask;
+ }
+ reg_val |= tmp_val;
+
+ reg_mask |= reg->binary_data_fine_mask;
+ tmp_val = f_elements << __ffs(reg->binary_data_fine_mask);
+ if (tmp_val & ~reg->binary_data_fine_mask) {
+ dev_err(dev, "Masking overflow of fine elements %08x\n",
+ tmp_val);
+ tmp_val &= reg->binary_data_fine_mask;
+ }
+ reg_val |= tmp_val;
+
+ /*
+ * NOTE: we leave the iodelay values unlocked - this is to work around
+ * situations such as those found with mmc mode change.
+ * However, this leaves open any unwarranted changes to padconf register
+ * impacting iodelay configuration. Use with care!
+ */
+ reg_mask |= reg->lock_mask;
+ reg_val |= reg->unlock_val << __ffs(reg->lock_mask);
+ r = regmap_update_bits(iod->regmap, cfg->offset, reg_mask, reg_val);
+
+ dev_info(dev, "Set reg 0x%x Delay(a: %d g: %d), Elements(C=%d F=%d)0x%x\n",
+ cfg->offset, cfg->a_delay, cfg->g_delay, c_elements,
+ f_elements, reg_val);
+
+ return r;
+}
+
+/**
+ * ti_iodelay_pinconf_init_dev() - Initialize IODelay device
+ * @iod: iodelay device
+ *
+ * Unlocks the iodelay region, computes the common parameters
+ *
+ * Return: 0 in case of success, else appropriate error value
+ */
+static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
+{
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+ struct device *dev = iod->dev;
+ struct ti_iodelay_reg_values *ival = &iod->reg_init_conf_values;
+ u32 val;
+ int r;
+
+ /* unlock the iodelay region */
+ r = regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
+ reg->global_lock_mask, reg->global_unlock_val);
+ if (r)
+ return r;
+
+ /* Read up Recalibration sequence done by bootloader */
+ r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val);
+ if (r)
+ return r;
+ ival->ref_clk_period = ti_iodelay_extract(val, reg->refclk_period_mask);
+ dev_dbg(dev, "refclk_period=0x%04x\n", ival->ref_clk_period);
+
+ r = regmap_read(iod->regmap, reg->reg_coarse_offset, &val);
+ if (r)
+ return r;
+ ival->coarse_ref_count =
+ ti_iodelay_extract(val, reg->coarse_ref_count_mask);
+ ival->coarse_delay_count =
+ ti_iodelay_extract(val, reg->coarse_delay_count_mask);
+ if (!ival->coarse_delay_count) {
+ dev_err(dev, "Invalid Coarse delay count (0) (reg=0x%08x)\n",
+ val);
+ return -EINVAL;
+ }
+ ival->cdpe = ti_iodelay_compute_dpe(ival->ref_clk_period,
+ ival->coarse_ref_count,
+ ival->coarse_delay_count, 88);
+ if (!ival->cdpe) {
+ dev_err(dev, "Invalid cdpe computed params = %d %d %d\n",
+ ival->ref_clk_period, ival->coarse_ref_count,
+ ival->coarse_delay_count);
+ return -EINVAL;
+ }
+ dev_dbg(iod->dev, "coarse: ref=0x%04x delay=0x%04x cdpe=0x%08x\n",
+ ival->coarse_ref_count, ival->coarse_delay_count, ival->cdpe);
+
+ r = regmap_read(iod->regmap, reg->reg_fine_offset, &val);
+ if (r)
+ return r;
+ ival->fine_ref_count =
+ ti_iodelay_extract(val, reg->fine_ref_count_mask);
+ ival->fine_delay_count =
+ ti_iodelay_extract(val, reg->fine_delay_count_mask);
+ if (!ival->fine_delay_count) {
+ dev_err(dev, "Invalid Fine delay count (0) (reg=0x%08x)\n",
+ val);
+ return -EINVAL;
+ }
+ ival->fdpe = ti_iodelay_compute_dpe(ival->ref_clk_period,
+ ival->fine_ref_count,
+ ival->fine_delay_count, 264);
+ if (!ival->fdpe) {
+ dev_err(dev, "Invalid fdpe(0) computed params = %d %d %d\n",
+ ival->ref_clk_period, ival->fine_ref_count,
+ ival->fine_delay_count);
+ return -EINVAL;
+ }
+ dev_dbg(iod->dev, "fine: ref=0x%04x delay=0x%04x fdpe=0x%08x\n",
+ ival->fine_ref_count, ival->fine_delay_count, ival->fdpe);
+
+ return 0;
+}
+
+/**
+ * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
+ * @iod: IODelay device
+ *
+ * Deinitialize the IODelay device (basically just lock the region back up.
+ */
+static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod)
+{
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+
+ /* lock the iodelay region back again */
+ regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
+ reg->global_lock_mask, reg->global_lock_val);
+}
+
+/**
+ * ti_iodelay_get_pingroup() - Find the group mapped by a group selector
+ * @iod: iodelay device
+ * @selector: Group Selector
+ *
+ * Return: Corresponding group representing group selector
+ */
+static struct ti_iodelay_pingroup *
+ti_iodelay_get_pingroup(struct ti_iodelay_device *iod, unsigned int selector)
+{
+ struct group_desc *g;
+
+ g = pinctrl_generic_get_group(iod->pctl, selector);
+ if (!g) {
+ dev_err(iod->dev, "%s could not find pingroup %i\n", __func__,
+ selector);
+
+ return NULL;
+ }
+
+ return g->data;
+}
+
+/**
+ * ti_iodelay_offset_to_pin() - get a pin index based on the register offset
+ * @iod: iodelay driver instance
+ * @offset: register offset from the base
+ */
+static int ti_iodelay_offset_to_pin(struct ti_iodelay_device *iod,
+ unsigned int offset)
+{
+ const struct ti_iodelay_reg_data *r = iod->reg_data;
+ unsigned int index;
+
+ if (offset > r->regmap_config->max_register) {
+ dev_err(iod->dev, "mux offset out of range: 0x%x (0x%x)\n",
+ offset, r->regmap_config->max_register);
+ return -EINVAL;
+ }
+
+ index = (offset - r->reg_start_offset) / r->regmap_config->reg_stride;
+ index /= r->reg_nr_per_pin;
+
+ return index;
+}
+
+/**
+ * ti_iodelay_node_iterator() - Iterate iodelay node
+ * @pctldev: Pin controller driver
+ * @np: Device node
+ * @pinctrl_spec: Parsed arguments from device tree
+ * @pins: Array of pins in the pin group
+ * @pin_index: Pin index in the pin array
+ * @data: Pin controller driver specific data
+ *
+ */
+static int ti_iodelay_node_iterator(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ const struct of_phandle_args *pinctrl_spec,
+ int *pins, int pin_index, void *data)
+{
+ struct ti_iodelay_device *iod;
+ struct ti_iodelay_cfg *cfg = data;
+ const struct ti_iodelay_reg_data *r;
+ struct pinctrl_pin_desc *pd;
+ int pin;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ if (!iod)
+ return -EINVAL;
+
+ r = iod->reg_data;
+
+ if (pinctrl_spec->args_count < r->reg_nr_per_pin) {
+ dev_err(iod->dev, "invalid args_count for spec: %i\n",
+ pinctrl_spec->args_count);
+
+ return -EINVAL;
+ }
+
+ /* Index plus two value cells */
+ cfg[pin_index].offset = pinctrl_spec->args[0];
+ cfg[pin_index].a_delay = pinctrl_spec->args[1] & 0xffff;
+ cfg[pin_index].g_delay = pinctrl_spec->args[2] & 0xffff;
+
+ pin = ti_iodelay_offset_to_pin(iod, cfg[pin_index].offset);
+ if (pin < 0) {
+ dev_err(iod->dev, "could not add functions for %s %ux\n",
+ np->name, cfg[pin_index].offset);
+ return -ENODEV;
+ }
+ pins[pin_index] = pin;
+
+ pd = &iod->pa[pin];
+ pd->drv_data = &cfg[pin_index];
+
+ dev_dbg(iod->dev, "%s offset=%x a_delay = %d g_delay = %d\n",
+ np->name, cfg[pin_index].offset, cfg[pin_index].a_delay,
+ cfg[pin_index].g_delay);
+
+ return 0;
+}
+
+/**
+ * ti_iodelay_dt_node_to_map() - Map a device tree node to appropriate group
+ * @pctldev: pinctrl device representing IODelay device
+ * @np: Node Pointer (device tree)
+ * @map: Pinctrl Map returned back to pinctrl framework
+ * @num_maps: Number of maps (1)
+ *
+ * Maps the device tree description into a group of configuration parameters
+ * for iodelay block entry.
+ *
+ * Return: 0 in case of success, else appropriate error value
+ */
+static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct ti_iodelay_device *iod;
+ struct ti_iodelay_cfg *cfg;
+ struct ti_iodelay_pingroup *g;
+ const char *name = "pinctrl-pin-array";
+ int rows, *pins, error = -EINVAL, i;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ if (!iod)
+ return -EINVAL;
+
+ rows = pinctrl_count_index_with_args(np, name);
+ if (rows == -EINVAL)
+ return rows;
+
+ *map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
+ if (!*map)
+ return -ENOMEM;
+ *num_maps = 0;
+
+ g = devm_kzalloc(iod->dev, sizeof(*g), GFP_KERNEL);
+ if (!g) {
+ error = -ENOMEM;
+ goto free_map;
+ }
+
+ pins = devm_kzalloc(iod->dev, sizeof(*pins) * rows, GFP_KERNEL);
+ if (!pins)
+ goto free_group;
+
+ cfg = devm_kzalloc(iod->dev, sizeof(*cfg) * rows, GFP_KERNEL);
+ if (!cfg) {
+ error = -ENOMEM;
+ goto free_pins;
+ }
+
+ for (i = 0; i < rows; i++) {
+ struct of_phandle_args pinctrl_spec;
+
+ error = pinctrl_parse_index_with_args(np, name, i,
+ &pinctrl_spec);
+ if (error)
+ goto free_data;
+
+ error = ti_iodelay_node_iterator(pctldev, np, &pinctrl_spec,
+ pins, i, cfg);
+ if (error)
+ goto free_data;
+ }
+
+ g->cfg = cfg;
+ g->ncfg = i;
+ g->config = PIN_CONFIG_END;
+
+ error = pinctrl_generic_add_group(iod->pctl, np->name, pins, i, g);
+ if (error < 0)
+ goto free_data;
+
+ (*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ (*map)->data.configs.group_or_pin = np->name;
+ (*map)->data.configs.configs = &g->config;
+ (*map)->data.configs.num_configs = 1;
+ *num_maps = 1;
+
+ return 0;
+
+free_data:
+ devm_kfree(iod->dev, cfg);
+free_pins:
+ devm_kfree(iod->dev, pins);
+free_group:
+ devm_kfree(iod->dev, g);
+free_map:
+ devm_kfree(iod->dev, *map);
+
+ return error;
+}
+
+/**
+ * ti_iodelay_pinconf_group_get() - Get the group configuration
+ * @pctldev: pinctrl device representing IODelay device
+ * @selector: Group selector
+ * @config: Configuration returned
+ *
+ * Return: The configuration if the group is valid, else returns -EINVAL
+ */
+static int ti_iodelay_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *config)
+{
+ struct ti_iodelay_device *iod;
+ struct device *dev;
+ struct ti_iodelay_pingroup *group;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ dev = iod->dev;
+ group = ti_iodelay_get_pingroup(iod, selector);
+
+ if (!group)
+ return -EINVAL;
+
+ *config = group->config;
+ return 0;
+}
+
+/**
+ * ti_iodelay_pinconf_group_set() - Configure the groups of pins
+ * @pctldev: pinctrl device representing IODelay device
+ * @selector: Group selector
+ * @configs: Configurations
+ * @num_configs: Number of configurations
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct ti_iodelay_device *iod;
+ struct device *dev;
+ struct ti_iodelay_pingroup *group;
+ int i;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ dev = iod->dev;
+ group = ti_iodelay_get_pingroup(iod, selector);
+
+ if (num_configs != 1) {
+ dev_err(dev, "Unsupported number of configurations %d\n",
+ num_configs);
+ return -EINVAL;
+ }
+
+ if (*configs != PIN_CONFIG_END) {
+ dev_err(dev, "Unsupported configuration\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < group->ncfg; i++) {
+ if (ti_iodelay_pinconf_set(iod, &group->cfg[i]))
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * ti_iodelay_pin_to_offset() - get pin register offset based on the pin index
+ * @iod: iodelay driver instance
+ * @selector: Pin index
+ */
+static unsigned int ti_iodelay_pin_to_offset(struct ti_iodelay_device *iod,
+ unsigned int selector)
+{
+ const struct ti_iodelay_reg_data *r = iod->reg_data;
+ unsigned int offset;
+
+ offset = selector * r->regmap_config->reg_stride;
+ offset *= r->reg_nr_per_pin;
+ offset += r->reg_start_offset;
+
+ return offset;
+}
+
+static void ti_iodelay_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int pin)
+{
+ struct ti_iodelay_device *iod;
+ struct pinctrl_pin_desc *pd;
+ struct ti_iodelay_cfg *cfg;
+ const struct ti_iodelay_reg_data *r;
+ unsigned long offset;
+ u32 in, oen, out;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ r = iod->reg_data;
+
+ offset = ti_iodelay_pin_to_offset(iod, pin);
+ pd = &iod->pa[pin];
+ cfg = pd->drv_data;
+
+ regmap_read(iod->regmap, offset, &in);
+ regmap_read(iod->regmap, offset + r->regmap_config->reg_stride, &oen);
+ regmap_read(iod->regmap, offset + r->regmap_config->reg_stride * 2,
+ &out);
+
+ seq_printf(s, "%lx a: %i g: %i (%08x %08x %08x) %s ",
+ iod->phys_base + offset,
+ cfg ? cfg->a_delay : -1,
+ cfg ? cfg->g_delay : -1,
+ in, oen, out, DRIVER_NAME);
+}
+
+/**
+ * ti_iodelay_pinconf_group_dbg_show() - show the group information
+ * @pctldev: Show the group information
+ * @s: Sequence file
+ * @selector: Group selector
+ *
+ * Provide the configuration information of the selected group
+ */
+static void ti_iodelay_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int selector)
+{
+ struct ti_iodelay_device *iod;
+ struct device *dev;
+ struct ti_iodelay_pingroup *group;
+ int i;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ dev = iod->dev;
+ group = ti_iodelay_get_pingroup(iod, selector);
+ if (!group)
+ return;
+
+ for (i = 0; i < group->ncfg; i++) {
+ struct ti_iodelay_cfg *cfg;
+ u32 reg = 0;
+
+ cfg = &group->cfg[i];
+ regmap_read(iod->regmap, cfg->offset, &reg),
+ seq_printf(s, "\n\t0x%08x = 0x%08x (%3d, %3d)",
+ cfg->offset, reg, cfg->a_delay,
+ cfg->g_delay);
+ }
+}
+#endif
+
+static struct pinctrl_ops ti_iodelay_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+ .pin_dbg_show = ti_iodelay_pin_dbg_show,
+#endif
+ .dt_node_to_map = ti_iodelay_dt_node_to_map,
+};
+
+static struct pinconf_ops ti_iodelay_pinctrl_pinconf_ops = {
+ .pin_config_group_get = ti_iodelay_pinconf_group_get,
+ .pin_config_group_set = ti_iodelay_pinconf_group_set,
+#ifdef CONFIG_DEBUG_FS
+ .pin_config_group_dbg_show = ti_iodelay_pinconf_group_dbg_show,
+#endif
+};
+
+/**
+ * ti_iodelay_alloc_pins() - Allocate structures needed for pins for iodelay
+ * @dev: Device pointer
+ * @iod: iodelay device
+ * @base_phy: Base Physical Address
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_alloc_pins(struct device *dev,
+ struct ti_iodelay_device *iod, u32 base_phy)
+{
+ const struct ti_iodelay_reg_data *r = iod->reg_data;
+ struct pinctrl_pin_desc *pin;
+ u32 phy_reg;
+ int nr_pins, i;
+
+ nr_pins = ti_iodelay_offset_to_pin(iod, r->regmap_config->max_register);
+ dev_dbg(dev, "Allocating %i pins\n", nr_pins);
+
+ iod->pa = devm_kzalloc(dev, sizeof(*iod->pa) * nr_pins, GFP_KERNEL);
+ if (!iod->pa)
+ return -ENOMEM;
+
+ iod->desc.pins = iod->pa;
+ iod->desc.npins = nr_pins;
+
+ phy_reg = r->reg_start_offset + base_phy;
+
+ for (i = 0; i < nr_pins; i++, phy_reg += 4) {
+ pin = &iod->pa[i];
+ pin->number = i;
+ }
+
+ return 0;
+}
+
+static struct regmap_config dra7_iodelay_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd1c,
+};
+
+static struct ti_iodelay_reg_data dra7_iodelay_data = {
+ .signature_mask = 0x0003f000,
+ .signature_value = 0x29,
+ .lock_mask = 0x00000400,
+ .lock_val = 1,
+ .unlock_val = 0,
+ .binary_data_coarse_mask = 0x000003e0,
+ .binary_data_fine_mask = 0x0000001f,
+
+ .reg_refclk_offset = 0x14,
+ .refclk_period_mask = 0xffff,
+
+ .reg_coarse_offset = 0x18,
+ .coarse_delay_count_mask = 0xffff0000,
+ .coarse_ref_count_mask = 0x0000ffff,
+
+ .reg_fine_offset = 0x1C,
+ .fine_delay_count_mask = 0xffff0000,
+ .fine_ref_count_mask = 0x0000ffff,
+
+ .reg_global_lock_offset = 0x2c,
+ .global_lock_mask = 0x0000ffff,
+ .global_unlock_val = 0x0000aaaa,
+ .global_lock_val = 0x0000aaab,
+
+ .reg_start_offset = 0x30,
+ .reg_nr_per_pin = 3,
+ .regmap_config = &dra7_iodelay_regmap_config,
+};
+
+static const struct of_device_id ti_iodelay_of_match[] = {
+ {.compatible = "ti,dra7-iodelay", .data = &dra7_iodelay_data},
+ { /* Hopefully no more.. */ },
+};
+MODULE_DEVICE_TABLE(of, ti_iodelay_of_match);
+
+/**
+ * ti_iodelay_probe() - Standard probe
+ * @pdev: platform device
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = of_node_get(dev->of_node);
+ const struct of_device_id *match;
+ struct resource *res;
+ struct ti_iodelay_device *iod;
+ int ret = 0;
+
+ if (!np) {
+ ret = -EINVAL;
+ dev_err(dev, "No OF node\n");
+ goto exit_out;
+ }
+
+ match = of_match_device(ti_iodelay_of_match, dev);
+ if (!match) {
+ ret = -EINVAL;
+ dev_err(dev, "No DATA match\n");
+ goto exit_out;
+ }
+
+ iod = devm_kzalloc(dev, sizeof(*iod), GFP_KERNEL);
+ if (!iod) {
+ ret = -ENOMEM;
+ goto exit_out;
+ }
+ iod->dev = dev;
+ iod->reg_data = match->data;
+
+ /* So far We can assume there is only 1 bank of registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Missing MEM resource\n");
+ ret = -ENODEV;
+ goto exit_out;
+ }
+
+ iod->phys_base = res->start;
+ iod->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iod->reg_base)) {
+ ret = PTR_ERR(iod->reg_base);
+ goto exit_out;
+ }
+
+ iod->regmap = devm_regmap_init_mmio(dev, iod->reg_base,
+ iod->reg_data->regmap_config);
+ if (IS_ERR(iod->regmap)) {
+ dev_err(dev, "Regmap MMIO init failed.\n");
+ ret = PTR_ERR(iod->regmap);
+ goto exit_out;
+ }
+
+ if (ti_iodelay_pinconf_init_dev(iod))
+ goto exit_out;
+
+ ret = ti_iodelay_alloc_pins(dev, iod, res->start);
+ if (ret)
+ goto exit_out;
+
+ iod->desc.pctlops = &ti_iodelay_pinctrl_ops;
+ /* no pinmux ops - we are pinconf */
+ iod->desc.confops = &ti_iodelay_pinctrl_pinconf_ops;
+ iod->desc.name = dev_name(dev);
+ iod->desc.owner = THIS_MODULE;
+
+ ret = pinctrl_register_and_init(&iod->desc, dev, iod, &iod->pctl);
+ if (ret) {
+ dev_err(dev, "Failed to register pinctrl\n");
+ goto exit_out;
+ }
+
+ platform_set_drvdata(pdev, iod);
+
+exit_out:
+ of_node_put(np);
+ return ret;
+}
+
+/**
+ * ti_iodelay_remove() - standard remove
+ * @pdev: platform device
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_remove(struct platform_device *pdev)
+{
+ struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
+
+ if (!iod)
+ return 0;
+
+ if (iod->pctl)
+ pinctrl_unregister(iod->pctl);
+
+ ti_iodelay_pinconf_deinit_dev(iod);
+
+ /* Expect other allocations to be freed by devm */
+
+ return 0;
+}
+
+static struct platform_driver ti_iodelay_driver = {
+ .probe = ti_iodelay_probe,
+ .remove = ti_iodelay_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = ti_iodelay_of_match,
+ },
+};
+module_platform_driver(ti_iodelay_driver);
+
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("Pinconf driver for TI's IO Delay module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 9b2ee717bccc..546f23c9040c 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -297,7 +297,7 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
const struct pin_desc *desc,
- enum pin_config_param param, u16 arg)
+ enum pin_config_param param, u32 arg)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
enum uniphier_pin_pull_dir pull_dir =
@@ -468,7 +468,7 @@ static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
enum pin_config_param param =
pinconf_to_config_param(configs[i]);
- u16 arg = pinconf_to_config_argument(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index aa8bd9794683..96686336e3a3 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
static const unsigned i2c0_pins[] = {63, 64};
static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {65, 66};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 270ca2a47a8c..c207e60b734f 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -428,7 +428,7 @@ static int wmt_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
{
struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
u32 bank = WMT_BANK_FROM_PIN(pin);
u32 bit = WMT_BIT_FROM_PIN(pin);
u32 reg_pull_en = data->banks[bank].reg_pull_en;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 59aa8e302bc3..49a594855f98 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -816,13 +816,6 @@ config INTEL_SCU_IPC_UTIL
low level access for debug work and updating the firmware. Say
N unless you will be doing this on an Intel MID platform.
-config GPIO_INTEL_PMIC
- bool "Intel PMIC GPIO support"
- depends on INTEL_SCU_IPC && GPIOLIB
- ---help---
- Say Y here to support GPIO via the SCU IPC interface
- on Intel MID platforms.
-
config INTEL_MID_POWER_BUTTON
tristate "power button driver for Intel MID platforms"
depends on INTEL_SCU_IPC && INPUT
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index d4111f0f8a78..b2f52a7690af 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
-obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o
obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 410741acb3c9..f46ece2ce3c4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 8:
case 7:
case 6:
+ case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 1fc0de870ff8..361770568ad0 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
DRIVER_NAME, input);
if (error) {
dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
deleted file mode 100644
index 91ae58510d92..000000000000
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/* Moorestown PMIC GPIO (access through IPC) driver
- * Copyright (c) 2008 - 2009, Intel Corporation.
- *
- * Author: Alek Du <alek.du@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/* Supports:
- * Moorestown platform PMIC chip
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/gpio/driver.h>
-#include <asm/intel_scu_ipc.h>
-#include <linux/device.h>
-#include <linux/intel_pmic_gpio.h>
-#include <linux/platform_device.h>
-
-#define DRIVER_NAME "pmic_gpio"
-
-/* register offset that IPC driver should use
- * 8 GPIO + 8 GPOSW (6 controllable) + 8GPO
- */
-enum pmic_gpio_register {
- GPIO0 = 0xE0,
- GPIO7 = 0xE7,
- GPIOINT = 0xE8,
- GPOSWCTL0 = 0xEC,
- GPOSWCTL5 = 0xF1,
- GPO = 0xF4,
-};
-
-/* bits definition for GPIO & GPOSW */
-#define GPIO_DRV 0x01
-#define GPIO_DIR 0x02
-#define GPIO_DIN 0x04
-#define GPIO_DOU 0x08
-#define GPIO_INTCTL 0x30
-#define GPIO_DBC 0xc0
-
-#define GPOSW_DRV 0x01
-#define GPOSW_DOU 0x08
-#define GPOSW_RDRV 0x30
-
-#define GPIO_UPDATE_TYPE 0x80000000
-
-#define NUM_GPIO 24
-
-struct pmic_gpio {
- struct mutex buslock;
- struct gpio_chip chip;
- void *gpiointr;
- int irq;
- unsigned irq_base;
- unsigned int update_type;
- u32 trigger_type;
-};
-
-static void pmic_program_irqtype(int gpio, int type)
-{
- if (type & IRQ_TYPE_EDGE_RISING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- if (offset >= 8) {
- pr_err("only pin 0-7 support input\n");
- return -1;/* we only have 8 GPIO can use as input */
- }
- return intel_scu_ipc_update_register(GPIO0 + offset,
- GPIO_DIR, GPIO_DIR);
-}
-
-static int pmic_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- int rc = 0;
-
- if (offset < 8)/* it is GPIO */
- rc = intel_scu_ipc_update_register(GPIO0 + offset,
- GPIO_DRV | (value ? GPIO_DOU : 0),
- GPIO_DRV | GPIO_DOU | GPIO_DIR);
- else if (offset < 16)/* it is GPOSW */
- rc = intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
- GPOSW_DRV | (value ? GPOSW_DOU : 0),
- GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
- else if (offset > 15 && offset < 24)/* it is GPO */
- rc = intel_scu_ipc_update_register(GPO,
- value ? 1 << (offset - 16) : 0,
- 1 << (offset - 16));
- else {
- pr_err("invalid PMIC GPIO pin %d!\n", offset);
- WARN_ON(1);
- }
-
- return rc;
-}
-
-static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- u8 r;
- int ret;
-
- /* we only have 8 GPIO pins we can use as input */
- if (offset >= 8)
- return -EOPNOTSUPP;
- ret = intel_scu_ipc_ioread8(GPIO0 + offset, &r);
- if (ret < 0)
- return ret;
- return r & GPIO_DIN;
-}
-
-static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- if (offset < 8)/* it is GPIO */
- intel_scu_ipc_update_register(GPIO0 + offset,
- GPIO_DRV | (value ? GPIO_DOU : 0),
- GPIO_DRV | GPIO_DOU);
- else if (offset < 16)/* it is GPOSW */
- intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
- GPOSW_DRV | (value ? GPOSW_DOU : 0),
- GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
- else if (offset > 15 && offset < 24) /* it is GPO */
- intel_scu_ipc_update_register(GPO,
- value ? 1 << (offset - 16) : 0,
- 1 << (offset - 16));
-}
-
-/*
- * This is called from genirq with pg->buslock locked and
- * irq_desc->lock held. We can not access the scu bus here, so we
- * store the change and update in the bus_sync_unlock() function below
- */
-static int pmic_irq_type(struct irq_data *data, unsigned type)
-{
- struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
- u32 gpio = data->irq - pg->irq_base;
-
- if (gpio >= pg->chip.ngpio)
- return -EINVAL;
-
- pg->trigger_type = type;
- pg->update_type = gpio | GPIO_UPDATE_TYPE;
- return 0;
-}
-
-static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pmic_gpio *pg = gpiochip_get_data(chip);
-
- return pg->irq_base + offset;
-}
-
-static void pmic_bus_lock(struct irq_data *data)
-{
- struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&pg->buslock);
-}
-
-static void pmic_bus_sync_unlock(struct irq_data *data)
-{
- struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
-
- if (pg->update_type) {
- unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
-
- pmic_program_irqtype(gpio, pg->trigger_type);
- pg->update_type = 0;
- }
- mutex_unlock(&pg->buslock);
-}
-
-/* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(struct irq_data *data) { }
-
-static void pmic_irq_mask(struct irq_data *data) { }
-
-static struct irq_chip pmic_irqchip = {
- .name = "PMIC-GPIO",
- .irq_mask = pmic_irq_mask,
- .irq_unmask = pmic_irq_unmask,
- .irq_set_type = pmic_irq_type,
- .irq_bus_lock = pmic_bus_lock,
- .irq_bus_sync_unlock = pmic_bus_sync_unlock,
-};
-
-static irqreturn_t pmic_irq_handler(int irq, void *data)
-{
- struct pmic_gpio *pg = data;
- u8 intsts = *((u8 *)pg->gpiointr + 4);
- int gpio;
- irqreturn_t ret = IRQ_NONE;
-
- for (gpio = 0; gpio < 8; gpio++) {
- if (intsts & (1 << gpio)) {
- pr_debug("pmic pin %d triggered\n", gpio);
- generic_handle_irq(pg->irq_base + gpio);
- ret = IRQ_HANDLED;
- }
- }
- return ret;
-}
-
-static int platform_pmic_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int irq = platform_get_irq(pdev, 0);
- struct intel_pmic_gpio_platform_data *pdata = dev->platform_data;
-
- struct pmic_gpio *pg;
- int retval;
- int i;
-
- if (irq < 0) {
- dev_dbg(dev, "no IRQ line\n");
- return -EINVAL;
- }
-
- if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
- dev_dbg(dev, "incorrect or missing platform data\n");
- return -EINVAL;
- }
-
- pg = kzalloc(sizeof(*pg), GFP_KERNEL);
- if (!pg)
- return -ENOMEM;
-
- dev_set_drvdata(dev, pg);
-
- pg->irq = irq;
- /* setting up SRAM mapping for GPIOINT register */
- pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
- if (!pg->gpiointr) {
- pr_err("Can not map GPIOINT\n");
- retval = -EINVAL;
- goto err2;
- }
- pg->irq_base = pdata->irq_base;
- pg->chip.label = "intel_pmic";
- pg->chip.direction_input = pmic_gpio_direction_input;
- pg->chip.direction_output = pmic_gpio_direction_output;
- pg->chip.get = pmic_gpio_get;
- pg->chip.set = pmic_gpio_set;
- pg->chip.to_irq = pmic_gpio_to_irq;
- pg->chip.base = pdata->gpio_base;
- pg->chip.ngpio = NUM_GPIO;
- pg->chip.can_sleep = 1;
- pg->chip.parent = dev;
-
- mutex_init(&pg->buslock);
-
- pg->chip.parent = dev;
- retval = gpiochip_add_data(&pg->chip, pg);
- if (retval) {
- pr_err("Can not add pmic gpio chip\n");
- goto err;
- }
-
- retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
- if (retval) {
- pr_warn("Interrupt request failed\n");
- goto fail_request_irq;
- }
-
- for (i = 0; i < 8; i++) {
- irq_set_chip_and_handler_name(i + pg->irq_base,
- &pmic_irqchip,
- handle_simple_irq,
- "demux");
- irq_set_chip_data(i + pg->irq_base, pg);
- }
- return 0;
-
-fail_request_irq:
- gpiochip_remove(&pg->chip);
-err:
- iounmap(pg->gpiointr);
-err2:
- kfree(pg);
- return retval;
-}
-
-/* at the same time, register a platform driver
- * this supports the sfi 0.81 fw */
-static struct platform_driver platform_pmic_gpio_driver = {
- .driver = {
- .name = DRIVER_NAME,
- },
- .probe = platform_pmic_gpio_probe,
-};
-
-static int __init platform_pmic_gpio_init(void)
-{
- return platform_driver_register(&platform_pmic_gpio_driver);
-}
-subsys_initcall(platform_pmic_gpio_init);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 97b4c3a219c0..25f15df5c2d7 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
return 0;
fail_platform_mux_register:
- for (i--; i > 0 ; i--)
+ while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
fail_alloc:
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
index cbf4d83a7271..25b176996cb7 100644
--- a/drivers/platform/x86/surface3-wmi.c
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
static int s3_wmi_check_platform_device(struct device *dev, void *data)
{
- struct acpi_device *adev, *ts_adev;
+ struct acpi_device *adev, *ts_adev = NULL;
acpi_handle handle;
acpi_status status;
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
return 0;
}
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
{
s3_wmi_send_lid_state();
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
static struct platform_driver s3_wmi_driver = {
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index abeb77217a21..b8cacccf18c8 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -32,7 +32,7 @@ config POWER_RESET_AT91_RESET
config POWER_RESET_AT91_SAMA5D2_SHDWC
tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_AT91
default SOC_SAMA5
help
This driver supports the alternate shutdown controller for some Atmel
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index a85dd4d233af..c6c3beea72f9 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -14,9 +14,12 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <soc/at91/at91sam9_ddrsdr.h>
+
#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
#define AT91_SHDW_SHDW BIT(0) /* Shut Down command */
#define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */
@@ -50,6 +53,7 @@ static const char *shdwc_wakeup_modes[] = {
static void __iomem *at91_shdwc_base;
static struct clk *sclk;
+static void __iomem *mpddrc_base;
static void __init at91_wakeup_status(void)
{
@@ -73,6 +77,29 @@ static void at91_poweroff(void)
writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR);
}
+static void at91_lpddr_poweroff(void)
+{
+ asm volatile(
+ /* Align to cache lines */
+ ".balign 32\n\t"
+
+ /* Ensure AT91_SHDW_CR is in the TLB by reading it */
+ " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ /* Power down SDRAM0 */
+ " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ /* Shutdown CPU */
+ " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ " b .\n\t"
+ :
+ : "r" (mpddrc_base),
+ "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
+ "r" (at91_shdwc_base),
+ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+ : "r0");
+}
+
static int at91_poweroff_get_wakeup_mode(struct device_node *np)
{
const char *pm;
@@ -124,6 +151,8 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
static int __init at91_poweroff_probe(struct platform_device *pdev)
{
struct resource *res;
+ struct device_node *np;
+ u32 ddr_type;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -150,12 +179,30 @@ static int __init at91_poweroff_probe(struct platform_device *pdev)
pm_power_off = at91_poweroff;
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
+ if (!np)
+ return 0;
+
+ mpddrc_base = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!mpddrc_base)
+ return 0;
+
+ ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
+ if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
+ (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
+ pm_power_off = at91_lpddr_poweroff;
+ else
+ iounmap(mpddrc_base);
+
return 0;
}
static int __exit at91_poweroff_remove(struct platform_device *pdev)
{
- if (pm_power_off == at91_poweroff)
+ if (pm_power_off == at91_poweroff ||
+ pm_power_off == at91_lpddr_poweroff)
pm_power_off = NULL;
clk_disable_unprepare(sclk);
@@ -163,6 +210,11 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id at91_ramc_of_match[] = {
+ { .compatible = "atmel,sama5d3-ddramc", },
+ { /* sentinel */ }
+};
+
static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9260-shdwc", },
{ .compatible = "atmel,at91sam9rl-shdwc", },
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 568580cf0655..b99769f8ab15 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -134,6 +134,15 @@ static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
+static int samx7_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
+{
+ writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PROCRST),
+ at91_rstc_base);
+
+ return NOTIFY_DONE;
+}
+
static void __init at91_reset_status(struct platform_device *pdev)
{
u32 reg = readl(at91_rstc_base + AT91_RSTC_SR);
@@ -173,6 +182,7 @@ static const struct of_device_id at91_reset_of_match[] = {
{ .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart },
{ .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
{ .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
+ { .compatible = "atmel,samx7-rstc", .data = samx7_restart },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_reset_of_match);
@@ -238,20 +248,12 @@ static int __exit at91_reset_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id at91_reset_plat_match[] = {
- { "at91-sam9260-reset", (unsigned long)at91sam9260_restart },
- { "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(platform, at91_reset_plat_match);
-
static struct platform_driver at91_reset_driver = {
.remove = __exit_p(at91_reset_remove),
.driver = {
.name = "at91-reset",
.of_match_table = at91_reset_of_match,
},
- .id_table = at91_reset_plat_match,
};
module_platform_driver_probe(at91_reset_driver, at91_reset_probe);
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 8a5ac9706c9c..90b0b5a70ce5 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -22,9 +22,12 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <soc/at91/at91sam9_ddrsdr.h>
+
#define SLOW_CLOCK_FREQ 32768
#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
@@ -75,6 +78,7 @@ struct shdwc {
*/
static struct shdwc *at91_shdwc;
static struct clk *sclk;
+static void __iomem *mpddrc_base;
static const unsigned long long sdwc_dbc_period[] = {
0, 3, 32, 512, 4096, 32768,
@@ -108,6 +112,29 @@ static void at91_poweroff(void)
at91_shdwc->at91_shdwc_base + AT91_SHDW_CR);
}
+static void at91_lpddr_poweroff(void)
+{
+ asm volatile(
+ /* Align to cache lines */
+ ".balign 32\n\t"
+
+ /* Ensure AT91_SHDW_CR is in the TLB by reading it */
+ " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ /* Power down SDRAM0 */
+ " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ /* Shutdown CPU */
+ " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ " b .\n\t"
+ :
+ : "r" (mpddrc_base),
+ "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
+ "r" (at91_shdwc->at91_shdwc_base),
+ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+ : "r0");
+}
+
static u32 at91_shdwc_debouncer_value(struct platform_device *pdev,
u32 in_period_us)
{
@@ -212,6 +239,8 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
{
struct resource *res;
const struct of_device_id *match;
+ struct device_node *np;
+ u32 ddr_type;
int ret;
if (!pdev->dev.of_node)
@@ -249,6 +278,23 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
pm_power_off = at91_poweroff;
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
+ if (!np)
+ return 0;
+
+ mpddrc_base = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!mpddrc_base)
+ return 0;
+
+ ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
+ if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
+ (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
+ pm_power_off = at91_lpddr_poweroff;
+ else
+ iounmap(mpddrc_base);
+
return 0;
}
@@ -256,7 +302,8 @@ static int __exit at91_shdwc_remove(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
- if (pm_power_off == at91_poweroff)
+ if (pm_power_off == at91_poweroff ||
+ pm_power_off == at91_lpddr_poweroff)
pm_power_off = NULL;
/* Reset values to disable wake-up features */
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 76806a0be820..da54ac88f068 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -164,6 +164,12 @@ config BATTERY_SBS
Say Y to include support for SBS battery driver for SBS-compliant
gas gauges.
+config CHARGER_SBS
+ tristate "SBS Compliant charger"
+ depends on I2C
+ help
+ Say Y to include support for SBS compilant battery chargers.
+
config BATTERY_BQ27XXX
tristate "BQ27xxx battery driver"
help
@@ -214,6 +220,18 @@ config BATTERY_DA9150
This driver can also be built as a module. If so, the module will be
called da9150-fg.
+config CHARGER_AXP20X
+ tristate "X-Powers AXP20X and AXP22X AC power supply driver"
+ depends on MFD_AXP20X
+ depends on AXP20X_ADC
+ depends on IIO
+ help
+ Say Y here to enable support for X-Powers AXP20X and AXP22X PMICs' AC
+ power supply.
+
+ This driver can also be built as a module. If so, the module will be
+ called axp20x_ac_power.
+
config AXP288_CHARGER
tristate "X-Powers AXP288 Charger"
depends on MFD_AXP20X && EXTCON_AXP288
@@ -292,13 +310,6 @@ config BATTERY_JZ4740
This driver can be build as a module. If so, the module will be
called jz4740-battery.
-config BATTERY_INTEL_MID
- tristate "Battery driver for Intel MID platforms"
- depends on INTEL_SCU_IPC && SPI
- help
- Say Y here to enable the battery driver on Intel MID
- platforms.
-
config BATTERY_RX51
tristate "Nokia RX-51 (N900) battery driver"
depends on TWL4030_MADC
@@ -370,6 +381,16 @@ config CHARGER_MAX14577
Say Y to enable support for the battery charger control sysfs and
platform data of MAX14577/77836 MUICs.
+config CHARGER_DETECTOR_MAX14656
+ tristate "Maxim MAX14656 USB charger detector"
+ depends on I2C
+ depends on OF
+ help
+ Say Y to enable support for the Maxim MAX14656 USB charger detector.
+ The device is compliant with the USB Battery Charging Specification
+ Revision 1.2 and can be found e.g. in Kindle 4/5th generation
+ readers and certain LG devices.
+
config CHARGER_MAX77693
tristate "Maxim MAX77693 battery charger driver"
depends on MFD_MAX77693
@@ -395,6 +416,7 @@ config CHARGER_QCOM_SMBB
depends on MFD_SPMI_PMIC || COMPILE_TEST
depends on OF
depends on EXTCON
+ depends on REGULATOR
help
Say Y to include support for the Switch-Mode Battery Charger and
Boost (SMBB) hardware found in Qualcomm PM8941 PMICs. The charger
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 36c599d9a495..3789a2c06fdf 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TEST_POWER) += test_power.o
obj-$(CONFIG_BATTERY_88PM860X) += 88pm860x_battery.o
obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
+obj-$(CONFIG_CHARGER_AXP20X) += axp20x_ac_power.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
+obj-$(CONFIG_CHARGER_SBS) += sbs-charger.o
obj-$(CONFIG_BATTERY_BQ27XXX) += bq27xxx_battery.o
obj-$(CONFIG_BATTERY_BQ27XXX_I2C) += bq27xxx_battery_i2c.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
@@ -47,7 +49,6 @@ obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
-obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o pm2301_charger.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
+obj-$(CONFIG_CHARGER_DETECTOR_MAX14656) += max14656_charger_detector.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 6ffdc18f2599..f7a35ebfbab2 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -76,8 +76,8 @@ struct ab8500_btemp_ranges {
* @dev: Pointer to the structure device
* @node: List of AB8500 BTEMPs, hence prepared for reentrance
* @curr_source: What current source we use, in uA
- * @bat_temp: Dispatched battery temperature in degree Celcius
- * @prev_bat_temp Last measured battery temperature in degree Celcius
+ * @bat_temp: Dispatched battery temperature in degree Celsius
+ * @prev_bat_temp Last measured battery temperature in degree Celsius
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
* @fg: Pointer to the struct fg
@@ -123,10 +123,7 @@ static LIST_HEAD(ab8500_btemp_list);
*/
struct ab8500_btemp *ab8500_btemp_get(void)
{
- struct ab8500_btemp *btemp;
- btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
-
- return btemp;
+ return list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
}
EXPORT_SYMBOL(ab8500_btemp_get);
@@ -464,13 +461,13 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
* @tbl_size: size of the resistance to temperature table
* @res: resistance to calculate the temperature from
*
- * This function returns the battery temperature in degrees Celcius
+ * This function returns the battery temperature in degrees Celsius
* based on the NTC resistance.
*/
static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
const struct abx500_res_to_temp *tbl, int tbl_size, int res)
{
- int i, temp;
+ int i;
/*
* Calculate the formula for the straight line
* Simple interpolation if we are within
@@ -488,9 +485,8 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
i++;
}
- temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+ return tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
(res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
- return temp;
}
/**
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
new file mode 100644
index 000000000000..38f4e87cf24d
--- /dev/null
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -0,0 +1,253 @@
+/*
+ * AXP20X and AXP22X PMICs' ACIN power supply driver
+ *
+ * Copyright (C) 2016 Free Electrons
+ * Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/iio/consumer.h>
+
+#define AXP20X_PWR_STATUS_ACIN_PRESENT BIT(7)
+#define AXP20X_PWR_STATUS_ACIN_AVAIL BIT(6)
+
+#define DRVNAME "axp20x-ac-power-supply"
+
+struct axp20x_ac_power {
+ struct regmap *regmap;
+ struct power_supply *supply;
+ struct iio_channel *acin_v;
+ struct iio_channel *acin_i;
+};
+
+static irqreturn_t axp20x_ac_power_irq(int irq, void *devid)
+{
+ struct axp20x_ac_power *power = devid;
+
+ power_supply_changed(power->supply);
+
+ return IRQ_HANDLED;
+}
+
+static int axp20x_ac_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct axp20x_ac_power *power = power_supply_get_drvdata(psy);
+ int ret, reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ if (reg & AXP20X_PWR_STATUS_ACIN_PRESENT) {
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return 0;
+ }
+
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ return 0;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_PRESENT);
+ return 0;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_AVAIL);
+ return 0;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = iio_read_channel_processed(power->acin_v, &val->intval);
+ if (ret)
+ return ret;
+
+ /* IIO framework gives mV but Power Supply framework gives uV */
+ val->intval *= 1000;
+
+ return 0;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = iio_read_channel_processed(power->acin_i, &val->intval);
+ if (ret)
+ return ret;
+
+ /* IIO framework gives mA but Power Supply framework gives uA */
+ val->intval *= 1000;
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static enum power_supply_property axp20x_ac_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static enum power_supply_property axp22x_ac_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc axp20x_ac_power_desc = {
+ .name = "axp20x-ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = axp20x_ac_power_properties,
+ .num_properties = ARRAY_SIZE(axp20x_ac_power_properties),
+ .get_property = axp20x_ac_power_get_property,
+};
+
+static const struct power_supply_desc axp22x_ac_power_desc = {
+ .name = "axp22x-ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = axp22x_ac_power_properties,
+ .num_properties = ARRAY_SIZE(axp22x_ac_power_properties),
+ .get_property = axp20x_ac_power_get_property,
+};
+
+struct axp_data {
+ const struct power_supply_desc *power_desc;
+ bool acin_adc;
+};
+
+static const struct axp_data axp20x_data = {
+ .power_desc = &axp20x_ac_power_desc,
+ .acin_adc = true,
+};
+
+static const struct axp_data axp22x_data = {
+ .power_desc = &axp22x_ac_power_desc,
+ .acin_adc = false,
+};
+
+static int axp20x_ac_power_probe(struct platform_device *pdev)
+{
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct power_supply_config psy_cfg = {};
+ struct axp20x_ac_power *power;
+ struct axp_data *axp_data;
+ static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
+ NULL };
+ int i, irq, ret;
+
+ if (!of_device_is_available(pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!axp20x) {
+ dev_err(&pdev->dev, "Parent drvdata not set\n");
+ return -EINVAL;
+ }
+
+ power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+
+ axp_data = (struct axp_data *)of_device_get_match_data(&pdev->dev);
+
+ if (axp_data->acin_adc) {
+ power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
+ if (IS_ERR(power->acin_v)) {
+ if (PTR_ERR(power->acin_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(power->acin_v);
+ }
+
+ power->acin_i = devm_iio_channel_get(&pdev->dev, "acin_i");
+ if (IS_ERR(power->acin_i)) {
+ if (PTR_ERR(power->acin_i) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(power->acin_i);
+ }
+ }
+
+ power->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+
+ platform_set_drvdata(pdev, power);
+
+ psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.drv_data = power;
+
+ power->supply = devm_power_supply_register(&pdev->dev,
+ axp_data->power_desc,
+ &psy_cfg);
+ if (IS_ERR(power->supply))
+ return PTR_ERR(power->supply);
+
+ /* Request irqs after registering, as irqs may trigger immediately */
+ for (i = 0; irq_names[i]; i++) {
+ irq = platform_get_irq_byname(pdev, irq_names[i]);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
+ irq_names[i], irq);
+ continue;
+ }
+ irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+ ret = devm_request_any_context_irq(&pdev->dev, irq,
+ axp20x_ac_power_irq, 0,
+ DRVNAME, power);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
+ irq_names[i], ret);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id axp20x_ac_power_match[] = {
+ {
+ .compatible = "x-powers,axp202-ac-power-supply",
+ .data = (void *)&axp20x_data,
+ }, {
+ .compatible = "x-powers,axp221-ac-power-supply",
+ .data = (void *)&axp22x_data,
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
+
+static struct platform_driver axp20x_ac_power_driver = {
+ .probe = axp20x_ac_power_probe,
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = axp20x_ac_power_match,
+ },
+};
+
+module_platform_driver(axp20x_ac_power_driver);
+
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20X and AXP22X PMICs' AC power supply driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 6af6feb7058d..2397c482656e 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -17,10 +17,12 @@
#include <linux/mfd/axp20x.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/iio/consumer.h>
#define DRVNAME "axp20x-usb-power-supply"
@@ -30,6 +32,8 @@
#define AXP20X_USB_STATUS_VBUS_VALID BIT(2)
#define AXP20X_VBUS_VHOLD_uV(b) (4000000 + (((b) >> 3) & 7) * 100000)
+#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
+#define AXP20X_VBUS_VHOLD_OFFSET 3
#define AXP20X_VBUS_CLIMIT_MASK 3
#define AXP20X_VBUC_CLIMIT_900mA 0
#define AXP20X_VBUC_CLIMIT_500mA 1
@@ -45,6 +49,9 @@ struct axp20x_usb_power {
struct device_node *np;
struct regmap *regmap;
struct power_supply *supply;
+ enum axp20x_variants axp20x_id;
+ struct iio_channel *vbus_v;
+ struct iio_channel *vbus_i;
};
static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
@@ -72,6 +79,20 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = AXP20X_VBUS_VHOLD_uV(v);
return 0;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
+ ret = iio_read_channel_processed(power->vbus_v,
+ &val->intval);
+ if (ret)
+ return ret;
+
+ /*
+ * IIO framework gives mV but Power Supply framework
+ * gives uV.
+ */
+ val->intval *= 1000;
+ return 0;
+ }
+
ret = axp20x_read_variable_width(power->regmap,
AXP20X_VBUS_V_ADC_H, 12);
if (ret < 0)
@@ -86,12 +107,10 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
switch (v & AXP20X_VBUS_CLIMIT_MASK) {
case AXP20X_VBUC_CLIMIT_100mA:
- if (of_device_is_compatible(power->np,
- "x-powers,axp202-usb-power-supply")) {
- val->intval = 100000;
- } else {
+ if (power->axp20x_id == AXP221_ID)
val->intval = -1; /* No 100mA limit */
- }
+ else
+ val->intval = 100000;
break;
case AXP20X_VBUC_CLIMIT_500mA:
val->intval = 500000;
@@ -105,6 +124,20 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
}
return 0;
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
+ ret = iio_read_channel_processed(power->vbus_i,
+ &val->intval);
+ if (ret)
+ return ret;
+
+ /*
+ * IIO framework gives mA but Power Supply framework
+ * gives uA.
+ */
+ val->intval *= 1000;
+ return 0;
+ }
+
ret = axp20x_read_variable_width(power->regmap,
AXP20X_VBUS_I_ADC_H, 12);
if (ret < 0)
@@ -130,8 +163,7 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_HEALTH_GOOD;
- if (of_device_is_compatible(power->np,
- "x-powers,axp202-usb-power-supply")) {
+ if (power->axp20x_id == AXP202_ID) {
ret = regmap_read(power->regmap,
AXP20X_USB_OTG_STATUS, &v);
if (ret)
@@ -155,6 +187,81 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
return 0;
}
+static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val;
+
+ switch (intval) {
+ case 4000000:
+ case 4100000:
+ case 4200000:
+ case 4300000:
+ case 4400000:
+ case 4500000:
+ case 4600000:
+ case 4700000:
+ val = (intval - 4000000) / 100000;
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_VHOLD_MASK,
+ val << AXP20X_VBUS_VHOLD_OFFSET);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val;
+
+ switch (intval) {
+ case 100000:
+ if (power->axp20x_id == AXP221_ID)
+ return -EINVAL;
+ case 500000:
+ case 900000:
+ val = (900000 - intval) / 400000;
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_CLIMIT_MASK, val);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_usb_power_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return axp20x_usb_power_set_voltage_min(power, val->intval);
+
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return axp20x_usb_power_set_current_max(power, val->intval);
+
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+ psp == POWER_SUPPLY_PROP_CURRENT_MAX;
+}
+
static enum power_supply_property axp20x_usb_power_properties[] = {
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
@@ -178,7 +285,9 @@ static const struct power_supply_desc axp20x_usb_power_desc = {
.type = POWER_SUPPLY_TYPE_USB,
.properties = axp20x_usb_power_properties,
.num_properties = ARRAY_SIZE(axp20x_usb_power_properties),
+ .property_is_writeable = axp20x_usb_power_prop_writeable,
.get_property = axp20x_usb_power_get_property,
+ .set_property = axp20x_usb_power_set_property,
};
static const struct power_supply_desc axp22x_usb_power_desc = {
@@ -186,9 +295,41 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
.type = POWER_SUPPLY_TYPE_USB,
.properties = axp22x_usb_power_properties,
.num_properties = ARRAY_SIZE(axp22x_usb_power_properties),
+ .property_is_writeable = axp20x_usb_power_prop_writeable,
.get_property = axp20x_usb_power_get_property,
+ .set_property = axp20x_usb_power_set_property,
};
+static int configure_iio_channels(struct platform_device *pdev,
+ struct axp20x_usb_power *power)
+{
+ power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ if (IS_ERR(power->vbus_v)) {
+ if (PTR_ERR(power->vbus_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(power->vbus_v);
+ }
+
+ power->vbus_i = devm_iio_channel_get(&pdev->dev, "vbus_i");
+ if (IS_ERR(power->vbus_i)) {
+ if (PTR_ERR(power->vbus_i) == -ENODEV)
+ return -EPROBE_DEFER;
+ return PTR_ERR(power->vbus_i);
+ }
+
+ return 0;
+}
+
+static int configure_adc_registers(struct axp20x_usb_power *power)
+{
+ /* Enable vbus voltage and current measurement */
+ return regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
+ AXP20X_ADC_EN1_VBUS_CURR |
+ AXP20X_ADC_EN1_VBUS_VOLT,
+ AXP20X_ADC_EN1_VBUS_CURR |
+ AXP20X_ADC_EN1_VBUS_VOLT);
+}
+
static int axp20x_usb_power_probe(struct platform_device *pdev)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -214,11 +355,13 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
+ power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
+ &pdev->dev);
+
power->np = pdev->dev.of_node;
power->regmap = axp20x->regmap;
- if (of_device_is_compatible(power->np,
- "x-powers,axp202-usb-power-supply")) {
+ if (power->axp20x_id == AXP202_ID) {
/* Enable vbus valid checking */
ret = regmap_update_bits(power->regmap, AXP20X_VBUS_MON,
AXP20X_VBUS_MON_VBUS_VALID,
@@ -226,17 +369,18 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Enable vbus voltage and current measurement */
- ret = regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
- AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT,
- AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT);
+ if (IS_ENABLED(CONFIG_AXP20X_ADC))
+ ret = configure_iio_channels(pdev, power);
+ else
+ ret = configure_adc_registers(power);
+
if (ret)
return ret;
usb_power_desc = &axp20x_usb_power_desc;
irq_names = axp20x_irq_names;
- } else if (of_device_is_compatible(power->np,
- "x-powers,axp221-usb-power-supply")) {
+ } else if (power->axp20x_id == AXP221_ID ||
+ power->axp20x_id == AXP223_ID) {
usb_power_desc = &axp22x_usb_power_desc;
irq_names = axp22x_irq_names;
} else {
@@ -273,9 +417,16 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
}
static const struct of_device_id axp20x_usb_power_match[] = {
- { .compatible = "x-powers,axp202-usb-power-supply" },
- { .compatible = "x-powers,axp221-usb-power-supply" },
- { }
+ {
+ .compatible = "x-powers,axp202-usb-power-supply",
+ .data = (void *)AXP202_ID,
+ }, {
+ .compatible = "x-powers,axp221-usb-power-supply",
+ .data = (void *)AXP221_ID,
+ }, {
+ .compatible = "x-powers,axp223-usb-power-supply",
+ .data = (void *)AXP223_ID,
+ }, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 75b8e0c7402b..6be2fe27bb07 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -90,20 +90,6 @@
#define CHRG_VLTFC_0C 0xA5 /* 0 DegC */
#define CHRG_VHTFC_45C 0x1F /* 45 DegC */
-#define BAT_IRQ_CFG_CHRG_DONE (1 << 2)
-#define BAT_IRQ_CFG_CHRG_START (1 << 3)
-#define BAT_IRQ_CFG_BAT_SAFE_EXIT (1 << 4)
-#define BAT_IRQ_CFG_BAT_SAFE_ENTER (1 << 5)
-#define BAT_IRQ_CFG_BAT_DISCON (1 << 6)
-#define BAT_IRQ_CFG_BAT_CONN (1 << 7)
-#define BAT_IRQ_CFG_BAT_MASK 0xFC
-
-#define TEMP_IRQ_CFG_QCBTU (1 << 4)
-#define TEMP_IRQ_CFG_CBTU (1 << 5)
-#define TEMP_IRQ_CFG_QCBTO (1 << 6)
-#define TEMP_IRQ_CFG_CBTO (1 << 7)
-#define TEMP_IRQ_CFG_MASK 0xF0
-
#define FG_CNTL_OCV_ADJ_EN (1 << 3)
#define CV_4100MV 4100 /* 4100mV */
@@ -127,6 +113,10 @@
#define ILIM_3000MA 3000 /* 3000mA */
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
+#define USB_HOST_EXTCON_DEV_NAME "INT3496:00"
+
+static const unsigned int cable_ids[] =
+ { EXTCON_CHG_USB_SDP, EXTCON_CHG_USB_CDP, EXTCON_CHG_USB_DCP };
enum {
VBUS_OV_IRQ = 0,
@@ -143,7 +133,6 @@ enum {
struct axp288_chrg_info {
struct platform_device *pdev;
- struct axp20x_chrg_pdata *pdata;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
int irq[CHRG_INTR_END];
@@ -163,20 +152,16 @@ struct axp288_chrg_info {
struct extcon_dev *edev;
bool connected;
enum power_supply_type chg_type;
- struct notifier_block nb;
+ struct notifier_block nb[ARRAY_SIZE(cable_ids)];
struct work_struct work;
} cable;
- int health;
int inlmt;
int cc;
int cv;
int max_cc;
int max_cv;
- bool online;
- bool present;
- bool enable_charger;
- bool is_charger_enabled;
+ int is_charger_enabled;
};
static inline int axp288_charger_set_cc(struct axp288_chrg_info *info, int cc)
@@ -305,6 +290,9 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
{
int ret;
+ if ((int)enable == info->is_charger_enabled)
+ return 0;
+
if (enable)
ret = regmap_update_bits(info->regmap, AXP20X_CHRG_CTRL1,
CHRG_CCCV_CHG_EN, CHRG_CCCV_CHG_EN);
@@ -430,8 +418,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
ret = axp288_charger_is_present(info);
if (ret < 0)
goto psy_get_prop_fail;
- info->present = ret;
- val->intval = info->present;
+ val->intval = ret;
break;
case POWER_SUPPLY_PROP_ONLINE:
/* Check for OTG case first */
@@ -442,8 +429,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
ret = axp288_charger_is_online(info);
if (ret < 0)
goto psy_get_prop_fail;
- info->online = ret;
- val->intval = info->online;
+ val->intval = ret;
break;
case POWER_SUPPLY_PROP_HEALTH:
val->intval = axp288_get_charger_health(info);
@@ -576,20 +562,20 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
struct axp288_chrg_info *info =
container_of(work, struct axp288_chrg_info, cable.work);
int ret, current_limit;
- bool changed = false;
struct extcon_dev *edev = info->cable.edev;
bool old_connected = info->cable.connected;
+ enum power_supply_type old_chg_type = info->cable.chg_type;
/* Determine cable/charger type */
- if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_SDP) > 0) {
+ if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
- } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_CDP) > 0) {
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
- } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_DCP) > 0) {
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
@@ -601,22 +587,15 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
}
/* Cable status changed */
- if (old_connected != info->cable.connected)
- changed = true;
-
- if (!changed)
+ if (old_connected == info->cable.connected &&
+ old_chg_type == info->cable.chg_type)
return;
mutex_lock(&info->lock);
- if (info->is_charger_enabled && !info->cable.connected) {
- info->enable_charger = false;
- ret = axp288_charger_enable_charger(info, info->enable_charger);
- if (ret < 0)
- dev_err(&info->pdev->dev,
- "cannot disable charger (%d)", ret);
+ if (info->cable.connected) {
+ axp288_charger_enable_charger(info, false);
- } else if (!info->is_charger_enabled && info->cable.connected) {
switch (info->cable.chg_type) {
case POWER_SUPPLY_TYPE_USB:
current_limit = ILIM_500MA;
@@ -635,36 +614,49 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
/* Set vbus current limit first, then enable charger */
ret = axp288_charger_set_vbus_inlmt(info, current_limit);
- if (ret < 0) {
+ if (ret == 0)
+ axp288_charger_enable_charger(info, true);
+ else
dev_err(&info->pdev->dev,
"error setting current limit (%d)", ret);
- } else {
- info->enable_charger = (current_limit > 0);
- ret = axp288_charger_enable_charger(info,
- info->enable_charger);
- if (ret < 0)
- dev_err(&info->pdev->dev,
- "cannot enable charger (%d)", ret);
- }
+ } else {
+ axp288_charger_enable_charger(info, false);
}
- if (changed)
- info->health = axp288_get_charger_health(info);
-
mutex_unlock(&info->lock);
- if (changed)
- power_supply_changed(info->psy_usb);
+ power_supply_changed(info->psy_usb);
}
-static int axp288_charger_handle_cable_evt(struct notifier_block *nb,
- unsigned long event, void *param)
+/*
+ * We need 3 copies of this, because there is no way to find out for which
+ * cable id we are being called from the passed in arguments; and we must
+ * have a separate nb for each extcon_register_notifier call.
+ */
+static int axp288_charger_handle_cable0_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
{
struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb);
+ container_of(nb, struct axp288_chrg_info, cable.nb[0]);
+ schedule_work(&info->cable.work);
+ return NOTIFY_OK;
+}
+static int axp288_charger_handle_cable1_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+ struct axp288_chrg_info *info =
+ container_of(nb, struct axp288_chrg_info, cable.nb[1]);
schedule_work(&info->cable.work);
+ return NOTIFY_OK;
+}
+static int axp288_charger_handle_cable2_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+ struct axp288_chrg_info *info =
+ container_of(nb, struct axp288_chrg_info, cable.nb[2]);
+ schedule_work(&info->cable.work);
return NOTIFY_OK;
}
@@ -672,7 +664,17 @@ static void axp288_charger_otg_evt_worker(struct work_struct *work)
{
struct axp288_chrg_info *info =
container_of(work, struct axp288_chrg_info, otg.work);
- int ret;
+ struct extcon_dev *edev = info->otg.cable;
+ int ret, usb_host = extcon_get_state(edev, EXTCON_USB_HOST);
+
+ dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
+ usb_host ? "attached" : "detached");
+
+ /*
+ * Set usb_id_short flag to avoid running charger detection logic
+ * in case usb host.
+ */
+ info->otg.id_short = usb_host;
/* Disable VBUS path before enabling the 5V boost */
ret = axp288_charger_vbus_path_select(info, !info->otg.id_short);
@@ -685,135 +687,109 @@ static int axp288_charger_handle_otg_evt(struct notifier_block *nb,
{
struct axp288_chrg_info *info =
container_of(nb, struct axp288_chrg_info, otg.id_nb);
- struct extcon_dev *edev = info->otg.cable;
- int usb_host = extcon_get_cable_state_(edev, EXTCON_USB_HOST);
- dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
- usb_host ? "attached" : "detached");
-
- /*
- * Set usb_id_short flag to avoid running charger detection logic
- * in case usb host.
- */
- info->otg.id_short = usb_host;
schedule_work(&info->otg.work);
return NOTIFY_OK;
}
-static void charger_init_hw_regs(struct axp288_chrg_info *info)
+static int charger_init_hw_regs(struct axp288_chrg_info *info)
{
int ret, cc, cv;
unsigned int val;
/* Program temperature thresholds */
ret = regmap_write(info->regmap, AXP20X_V_LTF_CHRG, CHRG_VLTFC_0C);
- if (ret < 0)
- dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
AXP20X_V_LTF_CHRG, ret);
+ return ret;
+ }
ret = regmap_write(info->regmap, AXP20X_V_HTF_CHRG, CHRG_VHTFC_45C);
- if (ret < 0)
- dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
AXP20X_V_HTF_CHRG, ret);
+ return ret;
+ }
/* Do not turn-off charger o/p after charge cycle ends */
ret = regmap_update_bits(info->regmap,
AXP20X_CHRG_CTRL2,
- CNTL2_CHG_OUT_TURNON, 1);
- if (ret < 0)
- dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+ CNTL2_CHG_OUT_TURNON, CNTL2_CHG_OUT_TURNON);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
AXP20X_CHRG_CTRL2, ret);
-
- /* Enable interrupts */
- ret = regmap_update_bits(info->regmap,
- AXP20X_IRQ2_EN,
- BAT_IRQ_CFG_BAT_MASK, 1);
- if (ret < 0)
- dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
- AXP20X_IRQ2_EN, ret);
-
- ret = regmap_update_bits(info->regmap, AXP20X_IRQ3_EN,
- TEMP_IRQ_CFG_MASK, 1);
- if (ret < 0)
- dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
- AXP20X_IRQ3_EN, ret);
+ return ret;
+ }
/* Setup ending condition for charging to be 10% of I(chrg) */
ret = regmap_update_bits(info->regmap,
AXP20X_CHRG_CTRL1,
CHRG_CCCV_ITERM_20P, 0);
- if (ret < 0)
- dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
AXP20X_CHRG_CTRL1, ret);
+ return ret;
+ }
/* Disable OCV-SOC curve calibration */
ret = regmap_update_bits(info->regmap,
AXP20X_CC_CTRL,
FG_CNTL_OCV_ADJ_EN, 0);
- if (ret < 0)
- dev_warn(&info->pdev->dev, "register(%x) write error(%d)\n",
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
AXP20X_CC_CTRL, ret);
-
- /* Init charging current and voltage */
- info->max_cc = info->pdata->max_cc;
- info->max_cv = info->pdata->max_cv;
+ return ret;
+ }
/* Read current charge voltage and current limit */
ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val);
if (ret < 0) {
- /* Assume default if cannot read */
- info->cc = info->pdata->def_cc;
- info->cv = info->pdata->def_cv;
- } else {
- /* Determine charge voltage */
- cv = (val & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS;
- switch (cv) {
- case CHRG_CCCV_CV_4100MV:
- info->cv = CV_4100MV;
- break;
- case CHRG_CCCV_CV_4150MV:
- info->cv = CV_4150MV;
- break;
- case CHRG_CCCV_CV_4200MV:
- info->cv = CV_4200MV;
- break;
- case CHRG_CCCV_CV_4350MV:
- info->cv = CV_4350MV;
- break;
- default:
- info->cv = INT_MAX;
- break;
- }
+ dev_err(&info->pdev->dev, "register(%x) read error(%d)\n",
+ AXP20X_CHRG_CTRL1, ret);
+ return ret;
+ }
- /* Determine charge current limit */
- cc = (ret & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
- cc = (cc * CHRG_CCCV_CC_LSB_RES) + CHRG_CCCV_CC_OFFSET;
- info->cc = cc;
+ /* Determine charge voltage */
+ cv = (val & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS;
+ switch (cv) {
+ case CHRG_CCCV_CV_4100MV:
+ info->cv = CV_4100MV;
+ break;
+ case CHRG_CCCV_CV_4150MV:
+ info->cv = CV_4150MV;
+ break;
+ case CHRG_CCCV_CV_4200MV:
+ info->cv = CV_4200MV;
+ break;
+ case CHRG_CCCV_CV_4350MV:
+ info->cv = CV_4350MV;
+ break;
+ }
- /* Program default charging voltage and current */
- cc = min(info->pdata->def_cc, info->max_cc);
- cv = min(info->pdata->def_cv, info->max_cv);
+ /* Determine charge current limit */
+ cc = (ret & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
+ cc = (cc * CHRG_CCCV_CC_LSB_RES) + CHRG_CCCV_CC_OFFSET;
+ info->cc = cc;
- ret = axp288_charger_set_cc(info, cc);
- if (ret < 0)
- dev_warn(&info->pdev->dev,
- "error(%d) in setting CC\n", ret);
+ /*
+ * Do not allow the user to configure higher settings then those
+ * set by the firmware
+ */
+ info->max_cv = info->cv;
+ info->max_cc = info->cc;
- ret = axp288_charger_set_cv(info, cv);
- if (ret < 0)
- dev_warn(&info->pdev->dev,
- "error(%d) in setting CV\n", ret);
- }
+ return 0;
}
static int axp288_charger_probe(struct platform_device *pdev)
{
int ret, i, pirq;
struct axp288_chrg_info *info;
+ struct device *dev = &pdev->dev;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config charger_cfg = {};
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -821,15 +797,8 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->pdev = pdev;
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
- info->pdata = pdev->dev.platform_data;
-
- if (!info->pdata) {
- /* Try ACPI provided pdata via device properties */
- if (!device_property_present(&pdev->dev,
- "axp288_charger_data\n"))
- dev_err(&pdev->dev, "failed to get platform data\n");
- return -ENODEV;
- }
+ info->cable.chg_type = -1;
+ info->is_charger_enabled = -1;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
if (info->cable.edev == NULL) {
@@ -838,63 +807,55 @@ static int axp288_charger_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- /* Register for extcon notification */
- INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
- info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
- ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
- &info->cable.nb);
- if (ret) {
- dev_err(&info->pdev->dev,
- "failed to register extcon notifier for SDP %d\n", ret);
- return ret;
- }
-
- ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
- &info->cable.nb);
- if (ret) {
- dev_err(&info->pdev->dev,
- "failed to register extcon notifier for CDP %d\n", ret);
- extcon_unregister_notifier(info->cable.edev,
- EXTCON_CHG_USB_SDP, &info->cable.nb);
- return ret;
- }
-
- ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
- &info->cable.nb);
- if (ret) {
- dev_err(&info->pdev->dev,
- "failed to register extcon notifier for DCP %d\n", ret);
- extcon_unregister_notifier(info->cable.edev,
- EXTCON_CHG_USB_SDP, &info->cable.nb);
- extcon_unregister_notifier(info->cable.edev,
- EXTCON_CHG_USB_CDP, &info->cable.nb);
- return ret;
+ info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_DEV_NAME);
+ if (info->otg.cable == NULL) {
+ dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
+ return -EPROBE_DEFER;
}
platform_set_drvdata(pdev, info);
mutex_init(&info->lock);
+ ret = charger_init_hw_regs(info);
+ if (ret)
+ return ret;
+
/* Register with power supply class */
charger_cfg.drv_data = info;
- info->psy_usb = power_supply_register(&pdev->dev, &axp288_charger_desc,
- &charger_cfg);
+ info->psy_usb = devm_power_supply_register(dev, &axp288_charger_desc,
+ &charger_cfg);
if (IS_ERR(info->psy_usb)) {
- dev_err(&pdev->dev, "failed to register power supply charger\n");
ret = PTR_ERR(info->psy_usb);
- goto psy_reg_failed;
+ dev_err(dev, "failed to register power supply: %d\n", ret);
+ return ret;
+ }
+
+ /* Register for extcon notification */
+ INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
+ info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
+ info->cable.nb[1].notifier_call = axp288_charger_handle_cable1_evt;
+ info->cable.nb[2].notifier_call = axp288_charger_handle_cable2_evt;
+ for (i = 0; i < ARRAY_SIZE(cable_ids); i++) {
+ ret = devm_extcon_register_notifier(dev, info->cable.edev,
+ cable_ids[i], &info->cable.nb[i]);
+ if (ret) {
+ dev_err(dev, "failed to register extcon notifier for %u: %d\n",
+ cable_ids[i], ret);
+ return ret;
+ }
}
+ schedule_work(&info->cable.work);
/* Register for OTG notification */
INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
- ret = extcon_register_notifier(info->otg.cable, EXTCON_USB_HOST,
- &info->otg.id_nb);
- if (ret)
- dev_warn(&pdev->dev, "failed to register otg notifier\n");
-
- if (info->otg.cable)
- info->otg.id_short = extcon_get_cable_state_(
- info->otg.cable, EXTCON_USB_HOST);
+ ret = devm_extcon_register_notifier(&pdev->dev, info->otg.cable,
+ EXTCON_USB_HOST, &info->otg.id_nb);
+ if (ret) {
+ dev_err(dev, "failed to register EXTCON_USB_HOST notifier\n");
+ return ret;
+ }
+ schedule_work(&info->otg.work);
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
@@ -903,8 +864,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev,
"failed to get virtual interrupt=%d\n", pirq);
- ret = info->irq[i];
- goto intr_reg_failed;
+ return info->irq[i];
}
ret = devm_request_threaded_irq(&info->pdev->dev, info->irq[i],
NULL, axp288_charger_irq_thread_handler,
@@ -912,51 +872,22 @@ static int axp288_charger_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request interrupt=%d\n",
info->irq[i]);
- goto intr_reg_failed;
+ return ret;
}
}
- charger_init_hw_regs(info);
-
return 0;
-
-intr_reg_failed:
- if (info->otg.cable)
- extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
- &info->otg.id_nb);
- power_supply_unregister(info->psy_usb);
-psy_reg_failed:
- extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
- &info->cable.nb);
- extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
- &info->cable.nb);
- extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
- &info->cable.nb);
- return ret;
}
-static int axp288_charger_remove(struct platform_device *pdev)
-{
- struct axp288_chrg_info *info = dev_get_drvdata(&pdev->dev);
-
- if (info->otg.cable)
- extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
- &info->otg.id_nb);
-
- extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
- &info->cable.nb);
- extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
- &info->cable.nb);
- extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
- &info->cable.nb);
- power_supply_unregister(info->psy_usb);
-
- return 0;
-}
+static const struct platform_device_id axp288_charger_id_table[] = {
+ { .name = "axp288_charger" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, axp288_charger_id_table);
static struct platform_driver axp288_charger_driver = {
.probe = axp288_charger_probe,
- .remove = axp288_charger_remove,
+ .id_table = axp288_charger_id_table,
.driver = {
.name = "axp288_charger",
},
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 539eb41504bb..a8dcabc32721 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -29,6 +29,7 @@
#include <linux/iio/consumer.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <asm/unaligned.h>
#define CHRG_STAT_BAT_SAFE_MODE (1 << 3)
#define CHRG_STAT_BAT_VALID (1 << 4)
@@ -49,23 +50,6 @@
#define CHRG_CCCV_CV_4350MV 0x3 /* 4.35V */
#define CHRG_CCCV_CHG_EN (1 << 7)
-#define CV_4100 4100 /* 4100mV */
-#define CV_4150 4150 /* 4150mV */
-#define CV_4200 4200 /* 4200mV */
-#define CV_4350 4350 /* 4350mV */
-
-#define TEMP_IRQ_CFG_QWBTU (1 << 0)
-#define TEMP_IRQ_CFG_WBTU (1 << 1)
-#define TEMP_IRQ_CFG_QWBTO (1 << 2)
-#define TEMP_IRQ_CFG_WBTO (1 << 3)
-#define TEMP_IRQ_CFG_MASK 0xf
-
-#define FG_IRQ_CFG_LOWBATT_WL2 (1 << 0)
-#define FG_IRQ_CFG_LOWBATT_WL1 (1 << 1)
-#define FG_IRQ_CFG_LOWBATT_MASK 0x3
-#define LOWBAT_IRQ_STAT_LOWBATT_WL2 (1 << 0)
-#define LOWBAT_IRQ_STAT_LOWBATT_WL1 (1 << 1)
-
#define FG_CNTL_OCV_ADJ_STAT (1 << 2)
#define FG_CNTL_OCV_ADJ_EN (1 << 3)
#define FG_CNTL_CAP_ADJ_STAT (1 << 4)
@@ -73,17 +57,15 @@
#define FG_CNTL_CC_EN (1 << 6)
#define FG_CNTL_GAUGE_EN (1 << 7)
+#define FG_15BIT_WORD_VALID (1 << 15)
+#define FG_15BIT_VAL_MASK 0x7fff
+
#define FG_REP_CAP_VALID (1 << 7)
#define FG_REP_CAP_VAL_MASK 0x7F
#define FG_DES_CAP1_VALID (1 << 7)
-#define FG_DES_CAP1_VAL_MASK 0x7F
-#define FG_DES_CAP0_VAL_MASK 0xFF
#define FG_DES_CAP_RES_LSB 1456 /* 1.456mAhr */
-#define FG_CC_MTR1_VALID (1 << 7)
-#define FG_CC_MTR1_VAL_MASK 0x7F
-#define FG_CC_MTR0_VAL_MASK 0xFF
#define FG_DES_CC_RES_LSB 1456 /* 1.456mAhr */
#define FG_OCV_CAP_VALID (1 << 7)
@@ -104,9 +86,7 @@
/* 1.1mV per LSB expressed in uV */
#define VOLTAGE_FROM_ADC(a) ((a * 11) / 10)
-/* properties converted to tenths of degrees, uV, uA, uW */
-#define PROP_TEMP(a) ((a) * 10)
-#define UNPROP_TEMP(a) ((a) / 10)
+/* properties converted to uV, uA */
#define PROP_VOLT(a) ((a) * 1000)
#define PROP_CURR(a) ((a) * 1000)
@@ -122,13 +102,13 @@ enum {
struct axp288_fg_info {
struct platform_device *pdev;
- struct axp20x_fg_pdata *pdata;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
int irq[AXP288_FG_INTR_NUM];
struct power_supply *bat;
struct mutex lock;
int status;
+ int max_volt;
struct delayed_work status_monitor;
struct dentry *debug_file;
};
@@ -138,22 +118,14 @@ static enum power_supply_property fuel_gauge_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_TEMP_MAX,
- POWER_SUPPLY_PROP_TEMP_MIN,
- POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
- POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_MODEL_NAME,
};
static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
@@ -169,8 +141,10 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
break;
}
- if (ret < 0)
+ if (ret < 0) {
dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
+ return ret;
+ }
return val;
}
@@ -187,6 +161,44 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
return ret;
}
+static int fuel_gauge_read_15bit_word(struct axp288_fg_info *info, int reg)
+{
+ unsigned char buf[2];
+ int ret;
+
+ ret = regmap_bulk_read(info->regmap, reg, buf, 2);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ ret = get_unaligned_be16(buf);
+ if (!(ret & FG_15BIT_WORD_VALID)) {
+ dev_err(&info->pdev->dev, "Error reg 0x%02x contents not valid\n",
+ reg);
+ return -ENXIO;
+ }
+
+ return ret & FG_15BIT_VAL_MASK;
+}
+
+static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
+{
+ unsigned char buf[2];
+ int ret;
+
+ ret = regmap_bulk_read(info->regmap, reg, buf, 2);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ /* 12-bit data values have upper 8 bits in buf[0], lower 4 in buf[1] */
+ return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
+}
+
static int pmic_read_adc_val(const char *name, int *raw_val,
struct axp288_fg_info *info)
{
@@ -247,24 +259,15 @@ static int fuel_gauge_debug_show(struct seq_file *s, void *data)
seq_printf(s, " FG_RDC0[%02x] : %02x\n",
AXP288_FG_RDC0_REG,
fuel_gauge_reg_readb(info, AXP288_FG_RDC0_REG));
- seq_printf(s, " FG_OCVH[%02x] : %02x\n",
+ seq_printf(s, " FG_OCV[%02x] : %04x\n",
AXP288_FG_OCVH_REG,
- fuel_gauge_reg_readb(info, AXP288_FG_OCVH_REG));
- seq_printf(s, " FG_OCVL[%02x] : %02x\n",
- AXP288_FG_OCVL_REG,
- fuel_gauge_reg_readb(info, AXP288_FG_OCVL_REG));
- seq_printf(s, "FG_DES_CAP1[%02x] : %02x\n",
+ fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG));
+ seq_printf(s, " FG_DES_CAP[%02x] : %04x\n",
AXP288_FG_DES_CAP1_REG,
- fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG));
- seq_printf(s, "FG_DES_CAP0[%02x] : %02x\n",
- AXP288_FG_DES_CAP0_REG,
- fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP0_REG));
- seq_printf(s, " FG_CC_MTR1[%02x] : %02x\n",
+ fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG));
+ seq_printf(s, " FG_CC_MTR[%02x] : %04x\n",
AXP288_FG_CC_MTR1_REG,
- fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR1_REG));
- seq_printf(s, " FG_CC_MTR0[%02x] : %02x\n",
- AXP288_FG_CC_MTR0_REG,
- fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR0_REG));
+ fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG));
seq_printf(s, " FG_OCV_CAP[%02x] : %02x\n",
AXP288_FG_OCV_CAP_REG,
fuel_gauge_reg_readb(info, AXP288_FG_OCV_CAP_REG));
@@ -417,143 +420,27 @@ current_read_fail:
return ret;
}
-static int temp_to_adc(struct axp288_fg_info *info, int tval)
-{
- int rntc = 0, i, ret, adc_val;
- int rmin, rmax, tmin, tmax;
- int tcsz = info->pdata->tcsz;
-
- /* get the Rntc resitance value for this temp */
- if (tval > info->pdata->thermistor_curve[0][1]) {
- rntc = info->pdata->thermistor_curve[0][0];
- } else if (tval <= info->pdata->thermistor_curve[tcsz-1][1]) {
- rntc = info->pdata->thermistor_curve[tcsz-1][0];
- } else {
- for (i = 1; i < tcsz; i++) {
- if (tval > info->pdata->thermistor_curve[i][1]) {
- rmin = info->pdata->thermistor_curve[i-1][0];
- rmax = info->pdata->thermistor_curve[i][0];
- tmin = info->pdata->thermistor_curve[i-1][1];
- tmax = info->pdata->thermistor_curve[i][1];
- rntc = rmin + ((rmax - rmin) *
- (tval - tmin) / (tmax - tmin));
- break;
- }
- }
- }
-
- /* we need the current to calculate the proper adc voltage */
- ret = fuel_gauge_reg_readb(info, AXP20X_ADC_RATE);
- if (ret < 0) {
- dev_err(&info->pdev->dev, "%s:read err:%d\n", __func__, ret);
- ret = 0x30;
- }
-
- /*
- * temperature is proportional to NTS thermistor resistance
- * ADC_RATE[5-4] determines current, 00=20uA,01=40uA,10=60uA,11=80uA
- * [12-bit ADC VAL] = R_NTC(Ω) * current / 800
- */
- adc_val = rntc * (20 + (20 * ((ret >> 4) & 0x3))) / 800;
-
- return adc_val;
-}
-
-static int adc_to_temp(struct axp288_fg_info *info, int adc_val)
-{
- int ret, r, i, tval = 0;
- int rmin, rmax, tmin, tmax;
- int tcsz = info->pdata->tcsz;
-
- ret = fuel_gauge_reg_readb(info, AXP20X_ADC_RATE);
- if (ret < 0) {
- dev_err(&info->pdev->dev, "%s:read err:%d\n", __func__, ret);
- ret = 0x30;
- }
-
- /*
- * temperature is proportional to NTS thermistor resistance
- * ADC_RATE[5-4] determines current, 00=20uA,01=40uA,10=60uA,11=80uA
- * R_NTC(Ω) = [12-bit ADC VAL] * 800 / current
- */
- r = adc_val * 800 / (20 + (20 * ((ret >> 4) & 0x3)));
-
- if (r < info->pdata->thermistor_curve[0][0]) {
- tval = info->pdata->thermistor_curve[0][1];
- } else if (r >= info->pdata->thermistor_curve[tcsz-1][0]) {
- tval = info->pdata->thermistor_curve[tcsz-1][1];
- } else {
- for (i = 1; i < tcsz; i++) {
- if (r < info->pdata->thermistor_curve[i][0]) {
- rmin = info->pdata->thermistor_curve[i-1][0];
- rmax = info->pdata->thermistor_curve[i][0];
- tmin = info->pdata->thermistor_curve[i-1][1];
- tmax = info->pdata->thermistor_curve[i][1];
- tval = tmin + ((tmax - tmin) *
- (r - rmin) / (rmax - rmin));
- break;
- }
- }
- }
-
- return tval;
-}
-
-static int fuel_gauge_get_btemp(struct axp288_fg_info *info, int *btemp)
-{
- int ret, raw_val = 0;
-
- ret = pmic_read_adc_val("axp288-batt-temp", &raw_val, info);
- if (ret < 0)
- goto temp_read_fail;
-
- *btemp = adc_to_temp(info, raw_val);
-
-temp_read_fail:
- return ret;
-}
-
static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
{
- int ret, value;
-
- /* 12-bit data value, upper 8 in OCVH, lower 4 in OCVL */
- ret = fuel_gauge_reg_readb(info, AXP288_FG_OCVH_REG);
- if (ret < 0)
- goto vocv_read_fail;
- value = ret << 4;
+ int ret;
- ret = fuel_gauge_reg_readb(info, AXP288_FG_OCVL_REG);
- if (ret < 0)
- goto vocv_read_fail;
- value |= (ret & 0xf);
+ ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
+ if (ret >= 0)
+ *vocv = VOLTAGE_FROM_ADC(ret);
- *vocv = VOLTAGE_FROM_ADC(value);
-vocv_read_fail:
return ret;
}
static int fuel_gauge_battery_health(struct axp288_fg_info *info)
{
- int temp, vocv;
- int ret, health = POWER_SUPPLY_HEALTH_UNKNOWN;
-
- ret = fuel_gauge_get_btemp(info, &temp);
- if (ret < 0)
- goto health_read_fail;
+ int ret, vocv, health = POWER_SUPPLY_HEALTH_UNKNOWN;
ret = fuel_gauge_get_vocv(info, &vocv);
if (ret < 0)
goto health_read_fail;
- if (vocv > info->pdata->max_volt)
+ if (vocv > info->max_volt)
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- else if (temp > info->pdata->max_temp)
- health = POWER_SUPPLY_HEALTH_OVERHEAT;
- else if (temp < info->pdata->min_temp)
- health = POWER_SUPPLY_HEALTH_COLD;
- else if (vocv < info->pdata->min_volt)
- health = POWER_SUPPLY_HEALTH_DEAD;
else
health = POWER_SUPPLY_HEALTH_GOOD;
@@ -561,28 +448,6 @@ health_read_fail:
return health;
}
-static int fuel_gauge_set_high_btemp_alert(struct axp288_fg_info *info)
-{
- int ret, adc_val;
-
- /* program temperature threshold as 1/16 ADC value */
- adc_val = temp_to_adc(info, info->pdata->max_temp);
- ret = fuel_gauge_reg_writeb(info, AXP20X_V_HTF_DISCHRG, adc_val >> 4);
-
- return ret;
-}
-
-static int fuel_gauge_set_low_btemp_alert(struct axp288_fg_info *info)
-{
- int ret, adc_val;
-
- /* program temperature threshold as 1/16 ADC value */
- adc_val = temp_to_adc(info, info->pdata->min_temp);
- ret = fuel_gauge_reg_writeb(info, AXP20X_V_LTF_DISCHRG, adc_val >> 4);
-
- return ret;
-}
-
static int fuel_gauge_get_property(struct power_supply *ps,
enum power_supply_property prop,
union power_supply_propval *val)
@@ -643,58 +508,25 @@ static int fuel_gauge_get_property(struct power_supply *ps,
goto fuel_gauge_read_err;
val->intval = (ret & 0x0f);
break;
- case POWER_SUPPLY_PROP_TEMP:
- ret = fuel_gauge_get_btemp(info, &value);
- if (ret < 0)
- goto fuel_gauge_read_err;
- val->intval = PROP_TEMP(value);
- break;
- case POWER_SUPPLY_PROP_TEMP_MAX:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- val->intval = PROP_TEMP(info->pdata->max_temp);
- break;
- case POWER_SUPPLY_PROP_TEMP_MIN:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- val->intval = PROP_TEMP(info->pdata->min_temp);
- break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
- ret = fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR1_REG);
+ ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
if (ret < 0)
goto fuel_gauge_read_err;
- value = (ret & FG_CC_MTR1_VAL_MASK) << 8;
- ret = fuel_gauge_reg_readb(info, AXP288_FG_CC_MTR0_REG);
- if (ret < 0)
- goto fuel_gauge_read_err;
- value |= (ret & FG_CC_MTR0_VAL_MASK);
- val->intval = value * FG_DES_CAP_RES_LSB;
+ val->intval = ret * FG_DES_CAP_RES_LSB;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
- ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+ ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
if (ret < 0)
goto fuel_gauge_read_err;
- value = (ret & FG_DES_CAP1_VAL_MASK) << 8;
- ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP0_REG);
- if (ret < 0)
- goto fuel_gauge_read_err;
- value |= (ret & FG_DES_CAP0_VAL_MASK);
- val->intval = value * FG_DES_CAP_RES_LSB;
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = PROP_CURR(info->pdata->design_cap);
+ val->intval = ret * FG_DES_CAP_RES_LSB;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = PROP_VOLT(info->pdata->max_volt);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = PROP_VOLT(info->pdata->min_volt);
- break;
- case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = info->pdata->battid;
+ val->intval = PROP_VOLT(info->max_volt);
break;
default:
mutex_unlock(&info->lock);
@@ -718,35 +550,6 @@ static int fuel_gauge_set_property(struct power_supply *ps,
mutex_lock(&info->lock);
switch (prop) {
- case POWER_SUPPLY_PROP_STATUS:
- info->status = val->intval;
- break;
- case POWER_SUPPLY_PROP_TEMP_MIN:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- if ((val->intval < PD_DEF_MIN_TEMP) ||
- (val->intval > PD_DEF_MAX_TEMP)) {
- ret = -EINVAL;
- break;
- }
- info->pdata->min_temp = UNPROP_TEMP(val->intval);
- ret = fuel_gauge_set_low_btemp_alert(info);
- if (ret < 0)
- dev_err(&info->pdev->dev,
- "temp alert min set fail:%d\n", ret);
- break;
- case POWER_SUPPLY_PROP_TEMP_MAX:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- if ((val->intval < PD_DEF_MIN_TEMP) ||
- (val->intval > PD_DEF_MAX_TEMP)) {
- ret = -EINVAL;
- break;
- }
- info->pdata->max_temp = UNPROP_TEMP(val->intval);
- ret = fuel_gauge_set_high_btemp_alert(info);
- if (ret < 0)
- dev_err(&info->pdev->dev,
- "temp alert max set fail:%d\n", ret);
- break;
case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
if ((val->intval < 0) || (val->intval > 15)) {
ret = -EINVAL;
@@ -774,11 +577,6 @@ static int fuel_gauge_property_is_writeable(struct power_supply *psy,
int ret;
switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- case POWER_SUPPLY_PROP_TEMP_MIN:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- case POWER_SUPPLY_PROP_TEMP_MAX:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
ret = 1;
break;
@@ -863,158 +661,6 @@ static const struct power_supply_desc fuel_gauge_desc = {
.external_power_changed = fuel_gauge_external_power_changed,
};
-static int fuel_gauge_set_lowbatt_thresholds(struct axp288_fg_info *info)
-{
- int ret;
- u8 reg_val;
-
- ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
- if (ret < 0) {
- dev_err(&info->pdev->dev, "%s:read err:%d\n", __func__, ret);
- return ret;
- }
- ret = (ret & FG_REP_CAP_VAL_MASK);
-
- if (ret > FG_LOW_CAP_WARN_THR)
- reg_val = FG_LOW_CAP_WARN_THR;
- else if (ret > FG_LOW_CAP_CRIT_THR)
- reg_val = FG_LOW_CAP_CRIT_THR;
- else
- reg_val = FG_LOW_CAP_SHDN_THR;
-
- reg_val |= FG_LOW_CAP_THR1_VAL;
- ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, reg_val);
- if (ret < 0)
- dev_err(&info->pdev->dev, "%s:write err:%d\n", __func__, ret);
-
- return ret;
-}
-
-static int fuel_gauge_program_vbatt_full(struct axp288_fg_info *info)
-{
- int ret;
- u8 val;
-
- ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
- if (ret < 0)
- goto fg_prog_ocv_fail;
- else
- val = (ret & ~CHRG_CCCV_CV_MASK);
-
- switch (info->pdata->max_volt) {
- case CV_4100:
- val |= (CHRG_CCCV_CV_4100MV << CHRG_CCCV_CV_BIT_POS);
- break;
- case CV_4150:
- val |= (CHRG_CCCV_CV_4150MV << CHRG_CCCV_CV_BIT_POS);
- break;
- case CV_4200:
- val |= (CHRG_CCCV_CV_4200MV << CHRG_CCCV_CV_BIT_POS);
- break;
- case CV_4350:
- val |= (CHRG_CCCV_CV_4350MV << CHRG_CCCV_CV_BIT_POS);
- break;
- default:
- val |= (CHRG_CCCV_CV_4200MV << CHRG_CCCV_CV_BIT_POS);
- break;
- }
-
- ret = fuel_gauge_reg_writeb(info, AXP20X_CHRG_CTRL1, val);
-fg_prog_ocv_fail:
- return ret;
-}
-
-static int fuel_gauge_program_design_cap(struct axp288_fg_info *info)
-{
- int ret;
-
- ret = fuel_gauge_reg_writeb(info,
- AXP288_FG_DES_CAP1_REG, info->pdata->cap1);
- if (ret < 0)
- goto fg_prog_descap_fail;
-
- ret = fuel_gauge_reg_writeb(info,
- AXP288_FG_DES_CAP0_REG, info->pdata->cap0);
-
-fg_prog_descap_fail:
- return ret;
-}
-
-static int fuel_gauge_program_ocv_curve(struct axp288_fg_info *info)
-{
- int ret = 0, i;
-
- for (i = 0; i < OCV_CURVE_SIZE; i++) {
- ret = fuel_gauge_reg_writeb(info,
- AXP288_FG_OCV_CURVE_REG + i, info->pdata->ocv_curve[i]);
- if (ret < 0)
- goto fg_prog_ocv_fail;
- }
-
-fg_prog_ocv_fail:
- return ret;
-}
-
-static int fuel_gauge_program_rdc_vals(struct axp288_fg_info *info)
-{
- int ret;
-
- ret = fuel_gauge_reg_writeb(info,
- AXP288_FG_RDC1_REG, info->pdata->rdc1);
- if (ret < 0)
- goto fg_prog_ocv_fail;
-
- ret = fuel_gauge_reg_writeb(info,
- AXP288_FG_RDC0_REG, info->pdata->rdc0);
-
-fg_prog_ocv_fail:
- return ret;
-}
-
-static void fuel_gauge_init_config_regs(struct axp288_fg_info *info)
-{
- int ret;
-
- /*
- * check if the config data is already
- * programmed and if so just return.
- */
-
- ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
- if (ret < 0) {
- dev_warn(&info->pdev->dev, "CAP1 reg read err!!\n");
- } else if (!(ret & FG_DES_CAP1_VALID)) {
- dev_info(&info->pdev->dev, "FG data needs to be initialized\n");
- } else {
- dev_info(&info->pdev->dev, "FG data is already initialized\n");
- return;
- }
-
- ret = fuel_gauge_program_vbatt_full(info);
- if (ret < 0)
- dev_err(&info->pdev->dev, "set vbatt full fail:%d\n", ret);
-
- ret = fuel_gauge_program_design_cap(info);
- if (ret < 0)
- dev_err(&info->pdev->dev, "set design cap fail:%d\n", ret);
-
- ret = fuel_gauge_program_rdc_vals(info);
- if (ret < 0)
- dev_err(&info->pdev->dev, "set rdc fail:%d\n", ret);
-
- ret = fuel_gauge_program_ocv_curve(info);
- if (ret < 0)
- dev_err(&info->pdev->dev, "set ocv curve fail:%d\n", ret);
-
- ret = fuel_gauge_set_lowbatt_thresholds(info);
- if (ret < 0)
- dev_err(&info->pdev->dev, "lowbatt thr set fail:%d\n", ret);
-
- ret = fuel_gauge_reg_writeb(info, AXP20X_CC_CTRL, 0xef);
- if (ret < 0)
- dev_err(&info->pdev->dev, "gauge cntl set fail:%d\n", ret);
-}
-
static void fuel_gauge_init_irq(struct axp288_fg_info *info)
{
int ret, i, pirq;
@@ -1052,29 +698,6 @@ intr_failed:
}
}
-static void fuel_gauge_init_hw_regs(struct axp288_fg_info *info)
-{
- int ret;
- unsigned int val;
-
- ret = fuel_gauge_set_high_btemp_alert(info);
- if (ret < 0)
- dev_err(&info->pdev->dev, "high batt temp set fail:%d\n", ret);
-
- ret = fuel_gauge_set_low_btemp_alert(info);
- if (ret < 0)
- dev_err(&info->pdev->dev, "low batt temp set fail:%d\n", ret);
-
- /* enable interrupts */
- val = fuel_gauge_reg_readb(info, AXP20X_IRQ3_EN);
- val |= TEMP_IRQ_CFG_MASK;
- fuel_gauge_reg_writeb(info, AXP20X_IRQ3_EN, val);
-
- val = fuel_gauge_reg_readb(info, AXP20X_IRQ4_EN);
- val |= FG_IRQ_CFG_LOWBATT_MASK;
- val = fuel_gauge_reg_writeb(info, AXP20X_IRQ4_EN, val);
-}
-
static int axp288_fuel_gauge_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -1090,15 +713,39 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->status = POWER_SUPPLY_STATUS_UNKNOWN;
- info->pdata = pdev->dev.platform_data;
- if (!info->pdata)
- return -ENODEV;
platform_set_drvdata(pdev, info);
mutex_init(&info->lock);
INIT_DELAYED_WORK(&info->status_monitor, fuel_gauge_status_monitor);
+ ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & FG_DES_CAP1_VALID)) {
+ dev_err(&pdev->dev, "axp288 not configured by firmware\n");
+ return -ENODEV;
+ }
+
+ ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
+ if (ret < 0)
+ return ret;
+ switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
+ case CHRG_CCCV_CV_4100MV:
+ info->max_volt = 4100;
+ break;
+ case CHRG_CCCV_CV_4150MV:
+ info->max_volt = 4150;
+ break;
+ case CHRG_CCCV_CV_4200MV:
+ info->max_volt = 4200;
+ break;
+ case CHRG_CCCV_CV_4350MV:
+ info->max_volt = 4350;
+ break;
+ }
+
psy_cfg.drv_data = info;
info->bat = power_supply_register(&pdev->dev, &fuel_gauge_desc, &psy_cfg);
if (IS_ERR(info->bat)) {
@@ -1108,12 +755,10 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
}
fuel_gauge_create_debugfs(info);
- fuel_gauge_init_config_regs(info);
fuel_gauge_init_irq(info);
- fuel_gauge_init_hw_regs(info);
schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
- return ret;
+ return 0;
}
static const struct platform_device_id axp288_fg_id_table[] = {
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 73e2f0b79dd4..c4770a94cc8e 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1569,6 +1569,11 @@ static int bq2415x_probe(struct i2c_client *client,
acpi_id =
acpi_match_device(client->dev.driver->acpi_match_table,
&client->dev);
+ if (!acpi_id) {
+ dev_err(&client->dev, "failed to match device name\n");
+ ret = -ENODEV;
+ goto error_1;
+ }
name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
}
if (!name) {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index e9584330aeed..a4f08492abeb 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -144,10 +144,7 @@
* so the first read after a fault returns the latched value and subsequent
* reads return the current value. In order to return the fault status
* to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine. Each routine has its own
- * flag indicating whether it should use the value stored by the last run
- * of the interrupt handler or do an actual reg read. That way each routine
- * can report back whatever fault may have occured.
+ * it in the appropriate health/status routine.
*/
struct bq24190_dev_info {
struct i2c_client *client;
@@ -159,10 +156,6 @@ struct bq24190_dev_info {
unsigned int gpio_int;
unsigned int irq;
struct mutex f_reg_lock;
- bool first_time;
- bool charger_health_valid;
- bool battery_health_valid;
- bool battery_status_valid;
u8 f_reg;
u8 ss_reg;
u8 watchdog;
@@ -199,7 +192,7 @@ static const int bq24190_cvc_vreg_values[] = {
4400000
};
-/* REG06[1:0] (TREG) in tenths of degrees Celcius */
+/* REG06[1:0] (TREG) in tenths of degrees Celsius */
static const int bq24190_ictrc_treg_values[] = {
600, 800, 1000, 1200
};
@@ -636,21 +629,11 @@ static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
union power_supply_propval *val)
{
u8 v;
- int health, ret;
+ int health;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->charger_health_valid) {
- v = bdi->f_reg;
- bdi->charger_health_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &v);
- if (ret < 0)
- return ret;
- }
+ v = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
/*
@@ -937,18 +920,8 @@ static int bq24190_battery_get_status(struct bq24190_dev_info *bdi,
int status, ret;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->battery_status_valid) {
- chrg_fault = bdi->f_reg;
- bdi->battery_status_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
- if (ret < 0)
- return ret;
- }
+ chrg_fault = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
@@ -996,21 +969,11 @@ static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
union power_supply_propval *val)
{
u8 v;
- int health, ret;
+ int health;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->battery_health_valid) {
- v = bdi->f_reg;
- bdi->battery_health_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &v);
- if (ret < 0)
- return ret;
- }
+ v = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
@@ -1197,9 +1160,12 @@ static const struct power_supply_desc bq24190_battery_desc = {
static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
{
struct bq24190_dev_info *bdi = data;
- bool alert_userspace = false;
+ const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
+ const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
+ | BQ24190_REG_F_NTC_FAULT_MASK;
+ bool alert_charger = false, alert_battery = false;
u8 ss_reg = 0, f_reg = 0;
- int ret;
+ int i, ret;
pm_runtime_get_sync(bdi->dev);
@@ -1209,6 +1175,32 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
goto out;
}
+ i = 0;
+ do {
+ ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+ if (ret < 0) {
+ dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+ goto out;
+ }
+ } while (f_reg && ++i < 2);
+
+ if (f_reg != bdi->f_reg) {
+ dev_info(bdi->dev,
+ "Fault: boost %d, charge %d, battery %d, ntc %d\n",
+ !!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+
+ mutex_lock(&bdi->f_reg_lock);
+ if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
+ alert_battery = true;
+ if ((bdi->f_reg & ~battery_mask_f) != (f_reg & ~battery_mask_f))
+ alert_charger = true;
+ bdi->f_reg = f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
+ }
+
if (ss_reg != bdi->ss_reg) {
/*
* The device is in host mode so when PG_STAT goes from 1->0
@@ -1225,47 +1217,17 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
ret);
}
+ if ((bdi->ss_reg & battery_mask_ss) != (ss_reg & battery_mask_ss))
+ alert_battery = true;
+ if ((bdi->ss_reg & ~battery_mask_ss) != (ss_reg & ~battery_mask_ss))
+ alert_charger = true;
bdi->ss_reg = ss_reg;
- alert_userspace = true;
- }
-
- mutex_lock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
- if (ret < 0) {
- mutex_unlock(&bdi->f_reg_lock);
- dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
- goto out;
- }
-
- if (f_reg != bdi->f_reg) {
- bdi->f_reg = f_reg;
- bdi->charger_health_valid = true;
- bdi->battery_health_valid = true;
- bdi->battery_status_valid = true;
-
- alert_userspace = true;
}
- mutex_unlock(&bdi->f_reg_lock);
-
- /*
- * Sometimes bq24190 gives a steady trickle of interrupts even
- * though the watchdog timer is turned off and neither the STATUS
- * nor FAULT registers have changed. Weed out these sprurious
- * interrupts so userspace isn't alerted for no reason.
- * In addition, the chip always generates an interrupt after
- * register reset so we should ignore that one (the very first
- * interrupt received).
- */
- if (alert_userspace) {
- if (!bdi->first_time) {
- power_supply_changed(bdi->charger);
- power_supply_changed(bdi->battery);
- } else {
- bdi->first_time = false;
- }
- }
+ if (alert_charger)
+ power_supply_changed(bdi->charger);
+ if (alert_battery)
+ power_supply_changed(bdi->battery);
out:
pm_runtime_put_sync(bdi->dev);
@@ -1300,6 +1262,10 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi)
goto out;
ret = bq24190_set_mode_host(bdi);
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
out:
pm_runtime_put_sync(bdi->dev);
return ret;
@@ -1375,10 +1341,8 @@ static int bq24190_probe(struct i2c_client *client,
bdi->model = id->driver_data;
strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
mutex_init(&bdi->f_reg_lock);
- bdi->first_time = true;
- bdi->charger_health_valid = false;
- bdi->battery_health_valid = false;
- bdi->battery_status_valid = false;
+ bdi->f_reg = 0;
+ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
i2c_set_clientdata(client, bdi);
@@ -1392,22 +1356,13 @@ static int bq24190_probe(struct i2c_client *client,
return -EINVAL;
}
- ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
- bq24190_irq_handler_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "bq24190-charger", bdi);
- if (ret < 0) {
- dev_err(dev, "Can't set up irq handler\n");
- goto out1;
- }
-
pm_runtime_enable(dev);
pm_runtime_resume(dev);
ret = bq24190_hw_init(bdi);
if (ret < 0) {
dev_err(dev, "Hardware init failed\n");
- goto out2;
+ goto out1;
}
charger_cfg.drv_data = bdi;
@@ -1418,7 +1373,7 @@ static int bq24190_probe(struct i2c_client *client,
if (IS_ERR(bdi->charger)) {
dev_err(dev, "Can't register charger\n");
ret = PTR_ERR(bdi->charger);
- goto out2;
+ goto out1;
}
battery_cfg.drv_data = bdi;
@@ -1427,27 +1382,39 @@ static int bq24190_probe(struct i2c_client *client,
if (IS_ERR(bdi->battery)) {
dev_err(dev, "Can't register battery\n");
ret = PTR_ERR(bdi->battery);
- goto out3;
+ goto out2;
}
ret = bq24190_sysfs_create_group(bdi);
if (ret) {
dev_err(dev, "Can't create sysfs entries\n");
+ goto out3;
+ }
+
+ ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+ bq24190_irq_handler_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "bq24190-charger", bdi);
+ if (ret < 0) {
+ dev_err(dev, "Can't set up irq handler\n");
goto out4;
}
return 0;
out4:
- power_supply_unregister(bdi->battery);
+ bq24190_sysfs_remove_group(bdi);
+
out3:
- power_supply_unregister(bdi->charger);
+ power_supply_unregister(bdi->battery);
+
out2:
- pm_runtime_disable(dev);
+ power_supply_unregister(bdi->charger);
+
out1:
+ pm_runtime_disable(dev);
if (bdi->gpio_int)
gpio_free(bdi->gpio_int);
-
return ret;
}
@@ -1488,12 +1455,13 @@ static int bq24190_pm_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
- bdi->charger_health_valid = false;
- bdi->battery_health_valid = false;
- bdi->battery_status_valid = false;
+ bdi->f_reg = 0;
+ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
pm_runtime_get_sync(bdi->dev);
bq24190_register_reset(bdi);
+ bq24190_set_mode_host(bdi);
+ bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
pm_runtime_put_sync(bdi->dev);
/* Things may have changed while suspended so alert upper layer */
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index eb7783b42e0a..eb0145380def 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -50,6 +50,8 @@ struct bq24735 {
struct bq24735_platform *pdata;
struct mutex lock;
struct gpio_desc *status_gpio;
+ struct delayed_work poll;
+ u32 poll_interval;
bool charging;
};
@@ -105,26 +107,6 @@ static int bq24735_update_word(struct i2c_client *client, u8 reg,
return bq24735_write_word(client, reg, tmp);
}
-static inline int bq24735_enable_charging(struct bq24735 *charger)
-{
- if (charger->pdata->ext_control)
- return 0;
-
- return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
- BQ24735_CHG_OPT_CHARGE_DISABLE,
- ~BQ24735_CHG_OPT_CHARGE_DISABLE);
-}
-
-static inline int bq24735_disable_charging(struct bq24735 *charger)
-{
- if (charger->pdata->ext_control)
- return 0;
-
- return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
- BQ24735_CHG_OPT_CHARGE_DISABLE,
- BQ24735_CHG_OPT_CHARGE_DISABLE);
-}
-
static int bq24735_config_charger(struct bq24735 *charger)
{
struct bq24735_platform *pdata = charger->pdata;
@@ -176,6 +158,31 @@ static int bq24735_config_charger(struct bq24735 *charger)
return 0;
}
+static inline int bq24735_enable_charging(struct bq24735 *charger)
+{
+ int ret;
+
+ if (charger->pdata->ext_control)
+ return 0;
+
+ ret = bq24735_config_charger(charger);
+ if (ret)
+ return ret;
+
+ return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+ BQ24735_CHG_OPT_CHARGE_DISABLE, 0);
+}
+
+static inline int bq24735_disable_charging(struct bq24735 *charger)
+{
+ if (charger->pdata->ext_control)
+ return 0;
+
+ return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
+ BQ24735_CHG_OPT_CHARGE_DISABLE,
+ BQ24735_CHG_OPT_CHARGE_DISABLE);
+}
+
static bool bq24735_charger_is_present(struct bq24735 *charger)
{
if (charger->status_gpio) {
@@ -185,7 +192,7 @@ static bool bq24735_charger_is_present(struct bq24735 *charger)
ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
if (ac < 0) {
- dev_err(&charger->client->dev,
+ dev_dbg(&charger->client->dev,
"Failed to read charger options : %d\n",
ac);
return false;
@@ -210,11 +217,8 @@ static int bq24735_charger_is_charging(struct bq24735 *charger)
return !(ret & BQ24735_CHG_OPT_CHARGE_DISABLE);
}
-static irqreturn_t bq24735_charger_isr(int irq, void *devid)
+static void bq24735_update(struct bq24735 *charger)
{
- struct power_supply *psy = devid;
- struct bq24735 *charger = to_bq24735(psy);
-
mutex_lock(&charger->lock);
if (charger->charging && bq24735_charger_is_present(charger))
@@ -224,11 +228,29 @@ static irqreturn_t bq24735_charger_isr(int irq, void *devid)
mutex_unlock(&charger->lock);
- power_supply_changed(psy);
+ power_supply_changed(charger->charger);
+}
+
+static irqreturn_t bq24735_charger_isr(int irq, void *devid)
+{
+ struct power_supply *psy = devid;
+ struct bq24735 *charger = to_bq24735(psy);
+
+ bq24735_update(charger);
return IRQ_HANDLED;
}
+static void bq24735_poll(struct work_struct *work)
+{
+ struct bq24735 *charger = container_of(work, struct bq24735, poll.work);
+
+ bq24735_update(charger);
+
+ schedule_delayed_work(&charger->poll,
+ msecs_to_jiffies(charger->poll_interval));
+}
+
static int bq24735_charger_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -276,7 +298,6 @@ static int bq24735_charger_set_property(struct power_supply *psy,
mutex_unlock(&charger->lock);
if (ret)
return ret;
- bq24735_config_charger(charger);
break;
case POWER_SUPPLY_STATUS_DISCHARGING:
case POWER_SUPPLY_STATUS_NOT_CHARGING:
@@ -395,7 +416,7 @@ static int bq24735_charger_probe(struct i2c_client *client,
return ret;
}
- if (!charger->status_gpio || bq24735_charger_is_present(charger)) {
+ if (bq24735_charger_is_present(charger)) {
ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID);
if (ret < 0) {
dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
@@ -416,16 +437,7 @@ static int bq24735_charger_probe(struct i2c_client *client,
"device id mismatch. 0x000b != 0x%04x\n", ret);
return -ENODEV;
}
- }
-
- ret = bq24735_config_charger(charger);
- if (ret < 0) {
- dev_err(&client->dev, "failed in configuring charger");
- return ret;
- }
- /* check for AC adapter presence */
- if (bq24735_charger_is_present(charger)) {
ret = bq24735_enable_charging(charger);
if (ret < 0) {
dev_err(&client->dev, "Failed to enable charging\n");
@@ -456,11 +468,32 @@ static int bq24735_charger_probe(struct i2c_client *client,
client->irq, ret);
return ret;
}
+ } else {
+ ret = device_property_read_u32(&client->dev, "poll-interval",
+ &charger->poll_interval);
+ if (ret)
+ return 0;
+ if (!charger->poll_interval)
+ return 0;
+
+ INIT_DELAYED_WORK(&charger->poll, bq24735_poll);
+ schedule_delayed_work(&charger->poll,
+ msecs_to_jiffies(charger->poll_interval));
}
return 0;
}
+static int bq24735_charger_remove(struct i2c_client *client)
+{
+ struct bq24735 *charger = i2c_get_clientdata(client);
+
+ if (charger->poll_interval)
+ cancel_delayed_work_sync(&charger->poll);
+
+ return 0;
+}
+
static const struct i2c_device_id bq24735_charger_id[] = {
{ "bq24735-charger", 0 },
{}
@@ -479,6 +512,7 @@ static struct i2c_driver bq24735_charger_driver = {
.of_match_table = bq24735_match_ids,
},
.probe = bq24735_charger_probe,
+ .remove = bq24735_charger_remove,
.id_table = bq24735_charger_id,
};
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 08c36b8e04bd..398801a21b86 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -22,8 +22,14 @@
* http://www.ti.com/product/bq27010
* http://www.ti.com/product/bq27210
* http://www.ti.com/product/bq27500
+ * http://www.ti.com/product/bq27510-g1
+ * http://www.ti.com/product/bq27510-g2
* http://www.ti.com/product/bq27510-g3
* http://www.ti.com/product/bq27520-g4
+ * http://www.ti.com/product/bq27520-g1
+ * http://www.ti.com/product/bq27520-g2
+ * http://www.ti.com/product/bq27520-g3
+ * http://www.ti.com/product/bq27520-g4
* http://www.ti.com/product/bq27530-g1
* http://www.ti.com/product/bq27531-g1
* http://www.ti.com/product/bq27541-g1
@@ -145,7 +151,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_DCAP] = 0x76,
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
},
- [BQ27500] = {
+ [BQ2750X] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
[BQ27XXX_REG_INT_TEMP] = 0x28,
@@ -164,7 +170,83 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_DCAP] = 0x3c,
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
},
- [BQ27510] = {
+ [BQ2751X] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1a,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x1e,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x20,
+ [BQ27XXX_REG_DCAP] = 0x2e,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
+ [BQ27500] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = 0x18,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = 0x22,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27510G1] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = 0x18,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = 0x22,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27510G2] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = 0x18,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = 0x22,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27510G3] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
[BQ27XXX_REG_INT_TEMP] = 0x28,
@@ -183,6 +265,82 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_DCAP] = 0x2e,
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
},
+ [BQ27520G1] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = 0x18,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AE] = 0x22,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27520G2] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x36,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = 0x18,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = 0x22,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27520G3] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x36,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = 0x22,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27520G4] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x1e,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x20,
+ [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
[BQ27530] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -303,6 +461,42 @@ static enum power_supply_property bq27010_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq2750x_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq2751x_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
static enum power_supply_property bq27500_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -312,6 +506,69 @@ static enum power_supply_property bq27500_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27510g1_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27510g2_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27510g3_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
@@ -321,7 +578,27 @@ static enum power_supply_property bq27500_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
-static enum power_supply_property bq27510_battery_props[] = {
+static enum power_supply_property bq27520g1_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27520g2_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -330,11 +607,51 @@ static enum power_supply_property bq27510_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27520g3_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_property bq27520g4_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_MANUFACTURER,
};
@@ -421,8 +738,16 @@ static struct {
} bq27xxx_battery_props[] = {
BQ27XXX_PROP(BQ27000, bq27000_battery_props),
BQ27XXX_PROP(BQ27010, bq27010_battery_props),
+ BQ27XXX_PROP(BQ2750X, bq2750x_battery_props),
+ BQ27XXX_PROP(BQ2751X, bq2751x_battery_props),
BQ27XXX_PROP(BQ27500, bq27500_battery_props),
- BQ27XXX_PROP(BQ27510, bq27510_battery_props),
+ BQ27XXX_PROP(BQ27510G1, bq27510g1_battery_props),
+ BQ27XXX_PROP(BQ27510G2, bq27510g2_battery_props),
+ BQ27XXX_PROP(BQ27510G3, bq27510g3_battery_props),
+ BQ27XXX_PROP(BQ27520G1, bq27520g1_battery_props),
+ BQ27XXX_PROP(BQ27520G2, bq27520g2_battery_props),
+ BQ27XXX_PROP(BQ27520G3, bq27520g3_battery_props),
+ BQ27XXX_PROP(BQ27520G4, bq27520g4_battery_props),
BQ27XXX_PROP(BQ27530, bq27530_battery_props),
BQ27XXX_PROP(BQ27541, bq27541_battery_props),
BQ27XXX_PROP(BQ27545, bq27545_battery_props),
@@ -674,13 +999,26 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
*/
static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
{
- if (di->chip == BQ27500 || di->chip == BQ27510 ||
- di->chip == BQ27541 || di->chip == BQ27545)
+ switch (di->chip) {
+ case BQ2750X:
+ case BQ2751X:
+ case BQ27500:
+ case BQ27510G1:
+ case BQ27510G2:
+ case BQ27510G3:
+ case BQ27520G1:
+ case BQ27520G2:
+ case BQ27520G3:
+ case BQ27520G4:
+ case BQ27541:
+ case BQ27545:
return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
- if (di->chip == BQ27530 || di->chip == BQ27421)
+ case BQ27530:
+ case BQ27421:
return flags & BQ27XXX_FLAG_OT;
-
- return false;
+ default:
+ return false;
+ }
}
/*
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 5c5c3a6f9923..c68fbc3fe50a 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -148,9 +148,17 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27200", BQ27000 },
{ "bq27210", BQ27010 },
- { "bq27500", BQ27500 },
- { "bq27510", BQ27510 },
- { "bq27520", BQ27510 },
+ { "bq27500", BQ2750X },
+ { "bq27510", BQ2751X },
+ { "bq27520", BQ2751X },
+ { "bq27500-1", BQ27500 },
+ { "bq27510g1", BQ27510G1 },
+ { "bq27510g2", BQ27510G2 },
+ { "bq27510g3", BQ27510G3 },
+ { "bq27520g1", BQ27520G1 },
+ { "bq27520g2", BQ27520G2 },
+ { "bq27520g3", BQ27520G3 },
+ { "bq27520g4", BQ27520G4 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27530 },
{ "bq27541", BQ27541 },
@@ -173,6 +181,14 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27500" },
{ .compatible = "ti,bq27510" },
{ .compatible = "ti,bq27520" },
+ { .compatible = "ti,bq27500-1" },
+ { .compatible = "ti,bq27510g1" },
+ { .compatible = "ti,bq27510g2" },
+ { .compatible = "ti,bq27510g3" },
+ { .compatible = "ti,bq27520g1" },
+ { .compatible = "ti,bq27520g2" },
+ { .compatible = "ti,bq27520g3" },
+ { .compatible = "ti,bq27520g4" },
{ .compatible = "ti,bq27530" },
{ .compatible = "ti,bq27531" },
{ .compatible = "ti,bq27541" },
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index c5869b1941ac..001731e88718 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -14,7 +14,7 @@
*/
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio.h> /* For legacy platform data */
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -23,7 +23,7 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/power/gpio-charger.h>
@@ -34,6 +34,8 @@ struct gpio_charger {
struct power_supply *charger;
struct power_supply_desc charger_desc;
+ struct gpio_desc *gpiod;
+ bool legacy_gpio_requested;
};
static irqreturn_t gpio_charger_irq(int irq, void *devid)
@@ -58,7 +60,8 @@ static int gpio_charger_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = !!gpio_get_value_cansleep(pdata->gpio);
+ val->intval = gpiod_get_value_cansleep(gpio_charger->gpiod);
+ /* This xor is only ever used with legacy pdata GPIO */
val->intval ^= pdata->gpio_active_low;
break;
default:
@@ -78,7 +81,6 @@ struct gpio_charger_platform_data *gpio_charger_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct gpio_charger_platform_data *pdata;
const char *chargetype;
- enum of_gpio_flags flags;
int ret;
if (!np)
@@ -89,16 +91,6 @@ struct gpio_charger_platform_data *gpio_charger_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
pdata->name = np->name;
-
- pdata->gpio = of_get_gpio_flags(np, 0, &flags);
- if (pdata->gpio < 0) {
- if (pdata->gpio != -EPROBE_DEFER)
- dev_err(dev, "could not get charger gpio\n");
- return ERR_PTR(pdata->gpio);
- }
-
- pdata->gpio_active_low = !!(flags & OF_GPIO_ACTIVE_LOW);
-
pdata->type = POWER_SUPPLY_TYPE_UNKNOWN;
ret = of_property_read_string(np, "charger-type", &chargetype);
if (ret >= 0) {
@@ -144,11 +136,6 @@ static int gpio_charger_probe(struct platform_device *pdev)
}
}
- if (!gpio_is_valid(pdata->gpio)) {
- dev_err(&pdev->dev, "Invalid gpio pin\n");
- return -EINVAL;
- }
-
gpio_charger = devm_kzalloc(&pdev->dev, sizeof(*gpio_charger),
GFP_KERNEL);
if (!gpio_charger) {
@@ -156,6 +143,45 @@ static int gpio_charger_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /*
+ * This will fetch a GPIO descriptor from device tree, ACPI or
+ * boardfile descriptor tables. It's good to try this first.
+ */
+ gpio_charger->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
+
+ /*
+ * If this fails and we're not using device tree, try the
+ * legacy platform data method.
+ */
+ if (IS_ERR(gpio_charger->gpiod) && !pdev->dev.of_node) {
+ /* Non-DT: use legacy GPIO numbers */
+ if (!gpio_is_valid(pdata->gpio)) {
+ dev_err(&pdev->dev, "Invalid gpio pin in pdata\n");
+ return -EINVAL;
+ }
+ ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request gpio pin: %d\n",
+ ret);
+ return ret;
+ }
+ gpio_charger->legacy_gpio_requested = true;
+ ret = gpio_direction_input(pdata->gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set gpio to input: %d\n",
+ ret);
+ goto err_gpio_free;
+ }
+ /* Then convert this to gpiod for now */
+ gpio_charger->gpiod = gpio_to_desc(pdata->gpio);
+ } else if (IS_ERR(gpio_charger->gpiod)) {
+ /* Just try again if this happens */
+ if (PTR_ERR(gpio_charger->gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "error getting GPIO descriptor\n");
+ return PTR_ERR(gpio_charger->gpiod);
+ }
+
charger_desc = &gpio_charger->charger_desc;
charger_desc->name = pdata->name ? pdata->name : "gpio-charger";
@@ -169,17 +195,6 @@ static int gpio_charger_probe(struct platform_device *pdev)
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = gpio_charger;
- ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
- if (ret) {
- dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret);
- goto err_free;
- }
- ret = gpio_direction_input(pdata->gpio);
- if (ret) {
- dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret);
- goto err_gpio_free;
- }
-
gpio_charger->pdata = pdata;
gpio_charger->charger = power_supply_register(&pdev->dev,
@@ -191,7 +206,7 @@ static int gpio_charger_probe(struct platform_device *pdev)
goto err_gpio_free;
}
- irq = gpio_to_irq(pdata->gpio);
+ irq = gpiod_to_irq(gpio_charger->gpiod);
if (irq > 0) {
ret = request_any_context_irq(irq, gpio_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -209,8 +224,8 @@ static int gpio_charger_probe(struct platform_device *pdev)
return 0;
err_gpio_free:
- gpio_free(pdata->gpio);
-err_free:
+ if (gpio_charger->legacy_gpio_requested)
+ gpio_free(pdata->gpio);
return ret;
}
@@ -223,7 +238,8 @@ static int gpio_charger_remove(struct platform_device *pdev)
power_supply_unregister(gpio_charger->charger);
- gpio_free(gpio_charger->pdata->gpio);
+ if (gpio_charger->legacy_gpio_requested)
+ gpio_free(gpio_charger->pdata->gpio);
return 0;
}
diff --git a/drivers/power/supply/intel_mid_battery.c b/drivers/power/supply/intel_mid_battery.c
deleted file mode 100644
index dc7feef1bea4..000000000000
--- a/drivers/power/supply/intel_mid_battery.c
+++ /dev/null
@@ -1,795 +0,0 @@
-/*
- * intel_mid_battery.c - Intel MID PMIC Battery Driver
- *
- * Copyright (C) 2009 Intel Corporation
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * Author: Nithish Mahalingam <nithish.mahalingam@intel.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/jiffies.h>
-#include <linux/param.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#define DRIVER_NAME "pmic_battery"
-
-/*********************************************************************
- * Generic defines
- *********************************************************************/
-
-static int debug;
-module_param(debug, int, 0444);
-MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
-
-#define PMIC_BATT_DRV_INFO_UPDATED 1
-#define PMIC_BATT_PRESENT 1
-#define PMIC_BATT_NOT_PRESENT 0
-#define PMIC_USB_PRESENT PMIC_BATT_PRESENT
-#define PMIC_USB_NOT_PRESENT PMIC_BATT_NOT_PRESENT
-
-/* pmic battery register related */
-#define PMIC_BATT_CHR_SCHRGINT_ADDR 0xD2
-#define PMIC_BATT_CHR_SBATOVP_MASK (1 << 1)
-#define PMIC_BATT_CHR_STEMP_MASK (1 << 2)
-#define PMIC_BATT_CHR_SCOMP_MASK (1 << 3)
-#define PMIC_BATT_CHR_SUSBDET_MASK (1 << 4)
-#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
-#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
-#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK 0x86
-
-#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
-#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
-
-/* pmic ipc related */
-#define PMIC_BATT_CHR_IPC_FCHRG_SUBID 0x4
-#define PMIC_BATT_CHR_IPC_TCHRG_SUBID 0x6
-
-/* types of battery charging */
-enum batt_charge_type {
- BATT_USBOTG_500MA_CHARGE,
- BATT_USBOTG_TRICKLE_CHARGE,
-};
-
-/* valid battery events */
-enum batt_event {
- BATT_EVENT_BATOVP_EXCPT,
- BATT_EVENT_USBOVP_EXCPT,
- BATT_EVENT_TEMP_EXCPT,
- BATT_EVENT_DCLMT_EXCPT,
- BATT_EVENT_EXCPT
-};
-
-
-/*********************************************************************
- * Battery properties
- *********************************************************************/
-
-/*
- * pmic battery info
- */
-struct pmic_power_module_info {
- bool is_dev_info_updated;
- struct device *dev;
- /* pmic battery data */
- unsigned long update_time; /* jiffies when data read */
- unsigned int usb_is_present;
- unsigned int batt_is_present;
- unsigned int batt_health;
- unsigned int usb_health;
- unsigned int batt_status;
- unsigned int batt_charge_now; /* in mAS */
- unsigned int batt_prev_charge_full; /* in mAS */
- unsigned int batt_charge_rate; /* in units per second */
-
- struct power_supply *usb;
- struct power_supply *batt;
- int irq; /* GPE_ID or IRQ# */
- struct workqueue_struct *monitor_wqueue;
- struct delayed_work monitor_battery;
- struct work_struct handler;
-};
-
-static unsigned int delay_time = 2000; /* in ms */
-
-/*
- * pmic ac properties
- */
-static enum power_supply_property pmic_usb_props[] = {
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_HEALTH,
-};
-
-/*
- * pmic battery properties
- */
-static enum power_supply_property pmic_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_FULL,
-};
-
-
-/*
- * Glue functions for talking to the IPC
- */
-
-struct battery_property {
- u32 capacity; /* Charger capacity */
- u8 crnt; /* Quick charge current value*/
- u8 volt; /* Fine adjustment of constant charge voltage */
- u8 prot; /* CHRGPROT register value */
- u8 prot2; /* CHRGPROT1 register value */
- u8 timer; /* Charging timer */
-};
-
-#define IPCMSG_BATTERY 0xEF
-
-/* Battery coulomb counter accumulator commands */
-#define IPC_CMD_CC_WR 0 /* Update coulomb counter value */
-#define IPC_CMD_CC_RD 1 /* Read coulomb counter value */
-#define IPC_CMD_BATTERY_PROPERTY 2 /* Read Battery property */
-
-/**
- * pmic_scu_ipc_battery_cc_read - read battery cc
- * @value: battery coulomb counter read
- *
- * Reads the battery couloumb counter value, returns 0 on success, or
- * an error code
- *
- * This function may sleep. Locking for SCU accesses is handled for
- * the caller.
- */
-static int pmic_scu_ipc_battery_cc_read(u32 *value)
-{
- return intel_scu_ipc_command(IPCMSG_BATTERY, IPC_CMD_CC_RD,
- NULL, 0, value, 1);
-}
-
-/**
- * pmic_scu_ipc_battery_property_get - fetch properties
- * @prop: battery properties
- *
- * Retrieve the battery properties from the power management
- *
- * This function may sleep. Locking for SCU accesses is handled for
- * the caller.
- */
-static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
-{
- u32 data[3];
- u8 *p = (u8 *)&data[1];
- int err = intel_scu_ipc_command(IPCMSG_BATTERY,
- IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
-
- prop->capacity = data[0];
- prop->crnt = *p++;
- prop->volt = *p++;
- prop->prot = *p++;
- prop->prot2 = *p++;
- prop->timer = *p++;
-
- return err;
-}
-
-/**
- * pmic_scu_ipc_set_charger - set charger
- * @charger: charger to select
- *
- * Switch the charging mode for the SCU
- */
-
-static int pmic_scu_ipc_set_charger(int charger)
-{
- return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
-}
-
-/**
- * pmic_battery_log_event - log battery events
- * @event: battery event to be logged
- * Context: can sleep
- *
- * There are multiple battery events which may be of interest to users;
- * this battery function logs the different battery events onto the
- * kernel log messages.
- */
-static void pmic_battery_log_event(enum batt_event event)
-{
- printk(KERN_WARNING "pmic-battery: ");
- switch (event) {
- case BATT_EVENT_BATOVP_EXCPT:
- printk(KERN_CONT "battery overvoltage condition\n");
- break;
- case BATT_EVENT_USBOVP_EXCPT:
- printk(KERN_CONT "usb charger overvoltage condition\n");
- break;
- case BATT_EVENT_TEMP_EXCPT:
- printk(KERN_CONT "high battery temperature condition\n");
- break;
- case BATT_EVENT_DCLMT_EXCPT:
- printk(KERN_CONT "over battery charge current condition\n");
- break;
- default:
- printk(KERN_CONT "charger/battery exception %d\n", event);
- break;
- }
-}
-
-/**
- * pmic_battery_read_status - read battery status information
- * @pbi: device info structure to update the read information
- * Context: can sleep
- *
- * PMIC power source information need to be updated based on the data read
- * from the PMIC battery registers.
- *
- */
-static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
-{
- unsigned int update_time_intrvl;
- unsigned int chrg_val;
- u32 ccval;
- u8 r8;
- struct battery_property batt_prop;
- int batt_present = 0;
- int usb_present = 0;
- int batt_exception = 0;
-
- /* make sure the last batt_status read happened delay_time before */
- if (pbi->update_time && time_before(jiffies, pbi->update_time +
- msecs_to_jiffies(delay_time)))
- return;
-
- update_time_intrvl = jiffies_to_msecs(jiffies - pbi->update_time);
- pbi->update_time = jiffies;
-
- /* read coulomb counter registers and schrgint register */
- if (pmic_scu_ipc_battery_cc_read(&ccval)) {
- dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
- __func__);
- return;
- }
-
- if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
- dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
- __func__);
- return;
- }
-
- /*
- * set pmic_power_module_info members based on pmic register values
- * read.
- */
-
- /* set batt_is_present */
- if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
- pbi->batt_is_present = PMIC_BATT_PRESENT;
- batt_present = 1;
- } else {
- pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
- pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
- pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
- }
-
- /* set batt_health */
- if (batt_present) {
- if (r8 & PMIC_BATT_CHR_SBATOVP_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
- batt_exception = 1;
- } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_TEMP_EXCPT);
- batt_exception = 1;
- } else {
- pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
- if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
- /* PMIC will change charging current automatically */
- pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
- }
- }
- }
-
- /* set usb_is_present */
- if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
- pbi->usb_is_present = PMIC_USB_PRESENT;
- usb_present = 1;
- } else {
- pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
- pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
-
- if (usb_present) {
- if (r8 & PMIC_BATT_CHR_SUSBOVP_MASK) {
- pbi->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pmic_battery_log_event(BATT_EVENT_USBOVP_EXCPT);
- } else {
- pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
- }
- }
-
- chrg_val = ccval & PMIC_BATT_ADC_ACCCHRGVAL_MASK;
-
- /* set batt_prev_charge_full to battery capacity the first time */
- if (!pbi->is_dev_info_updated) {
- if (pmic_scu_ipc_battery_property_get(&batt_prop)) {
- dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
- __func__);
- return;
- }
- pbi->batt_prev_charge_full = batt_prop.capacity;
- }
-
- /* set batt_status */
- if (batt_present && !batt_exception) {
- if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
- pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
- pbi->batt_prev_charge_full = chrg_val;
- } else if (ccval & PMIC_BATT_ADC_ACCCHRG_MASK) {
- pbi->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
- } else {
- pbi->batt_status = POWER_SUPPLY_STATUS_CHARGING;
- }
- }
-
- /* set batt_charge_rate */
- if (pbi->is_dev_info_updated && batt_present && !batt_exception) {
- if (pbi->batt_status == POWER_SUPPLY_STATUS_DISCHARGING) {
- if (pbi->batt_charge_now - chrg_val) {
- pbi->batt_charge_rate = ((pbi->batt_charge_now -
- chrg_val) * 1000 * 60) /
- update_time_intrvl;
- }
- } else if (pbi->batt_status == POWER_SUPPLY_STATUS_CHARGING) {
- if (chrg_val - pbi->batt_charge_now) {
- pbi->batt_charge_rate = ((chrg_val -
- pbi->batt_charge_now) * 1000 * 60) /
- update_time_intrvl;
- }
- } else
- pbi->batt_charge_rate = 0;
- } else {
- pbi->batt_charge_rate = -1;
- }
-
- /* batt_charge_now */
- if (batt_present && !batt_exception)
- pbi->batt_charge_now = chrg_val;
- else
- pbi->batt_charge_now = -1;
-
- pbi->is_dev_info_updated = PMIC_BATT_DRV_INFO_UPDATED;
-}
-
-/**
- * pmic_usb_get_property - usb power source get property
- * @psy: usb power supply context
- * @psp: usb power source property
- * @val: usb power source property value
- * Context: can sleep
- *
- * PMIC usb power source property needs to be provided to power_supply
- * subsytem for it to provide the information to users.
- */
-static int pmic_usb_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct pmic_power_module_info *pbi = power_supply_get_drvdata(psy);
-
- /* update pmic_power_module_info members */
- pmic_battery_read_status(pbi);
-
- switch (psp) {
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = pbi->usb_is_present;
- break;
- case POWER_SUPPLY_PROP_HEALTH:
- val->intval = pbi->usb_health;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static inline unsigned long mAStouAh(unsigned long v)
-{
- /* seconds to hours, mA to µA */
- return (v * 1000) / 3600;
-}
-
-/**
- * pmic_battery_get_property - battery power source get property
- * @psy: battery power supply context
- * @psp: battery power source property
- * @val: battery power source property value
- * Context: can sleep
- *
- * PMIC battery power source property needs to be provided to power_supply
- * subsytem for it to provide the information to users.
- */
-static int pmic_battery_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct pmic_power_module_info *pbi = power_supply_get_drvdata(psy);
-
- /* update pmic_power_module_info members */
- pmic_battery_read_status(pbi);
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = pbi->batt_status;
- break;
- case POWER_SUPPLY_PROP_HEALTH:
- val->intval = pbi->batt_health;
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = pbi->batt_is_present;
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW:
- val->intval = mAStouAh(pbi->batt_charge_now);
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL:
- val->intval = mAStouAh(pbi->batt_prev_charge_full);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * pmic_battery_monitor - monitor battery status
- * @work: work structure
- * Context: can sleep
- *
- * PMIC battery status needs to be monitored for any change
- * and information needs to be frequently updated.
- */
-static void pmic_battery_monitor(struct work_struct *work)
-{
- struct pmic_power_module_info *pbi = container_of(work,
- struct pmic_power_module_info, monitor_battery.work);
-
- /* update pmic_power_module_info members */
- pmic_battery_read_status(pbi);
- queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 10);
-}
-
-/**
- * pmic_battery_set_charger - set battery charger
- * @pbi: device info structure
- * @chrg: charge mode to set battery charger in
- * Context: can sleep
- *
- * PMIC battery charger needs to be enabled based on the usb charge
- * capabilities connected to the platform.
- */
-static int pmic_battery_set_charger(struct pmic_power_module_info *pbi,
- enum batt_charge_type chrg)
-{
- int retval;
-
- /* set usblmt bits and chrgcntl register bits appropriately */
- switch (chrg) {
- case BATT_USBOTG_500MA_CHARGE:
- retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_FCHRG_SUBID);
- break;
- case BATT_USBOTG_TRICKLE_CHARGE:
- retval = pmic_scu_ipc_set_charger(PMIC_BATT_CHR_IPC_TCHRG_SUBID);
- break;
- default:
- dev_warn(pbi->dev, "%s(): out of range usb charger "
- "charge detected\n", __func__);
- return -EINVAL;
- }
-
- if (retval) {
- dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
- __func__);
- return retval;
- }
-
- return 0;
-}
-
-/**
- * pmic_battery_interrupt_handler - pmic battery interrupt handler
- * Context: interrupt context
- *
- * PMIC battery interrupt handler which will be called with either
- * battery full condition occurs or usb otg & battery connect
- * condition occurs.
- */
-static irqreturn_t pmic_battery_interrupt_handler(int id, void *dev)
-{
- struct pmic_power_module_info *pbi = dev;
-
- schedule_work(&pbi->handler);
-
- return IRQ_HANDLED;
-}
-
-/**
- * pmic_battery_handle_intrpt - pmic battery service interrupt
- * @work: work structure
- * Context: can sleep
- *
- * PMIC battery needs to either update the battery status as full
- * if it detects battery full condition caused the interrupt or needs
- * to enable battery charger if it detects usb and battery detect
- * caused the source of interrupt.
- */
-static void pmic_battery_handle_intrpt(struct work_struct *work)
-{
- struct pmic_power_module_info *pbi = container_of(work,
- struct pmic_power_module_info, handler);
- enum batt_charge_type chrg;
- u8 r8;
-
- if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
- dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
- __func__);
- return;
- }
- /* find the cause of the interrupt */
- if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
- pbi->batt_is_present = PMIC_BATT_PRESENT;
- } else {
- pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
- pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
- pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
- return;
- }
-
- if (r8 & PMIC_BATT_CHR_EXCPT_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
- pmic_battery_log_event(BATT_EVENT_EXCPT);
- return;
- } else {
- pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
- pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
- }
-
- if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
- u32 ccval;
- pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
-
- if (pmic_scu_ipc_battery_cc_read(&ccval)) {
- dev_warn(pbi->dev, "%s(): ipc config cmd "
- "failed\n", __func__);
- return;
- }
- pbi->batt_prev_charge_full = ccval &
- PMIC_BATT_ADC_ACCCHRGVAL_MASK;
- return;
- }
-
- if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
- pbi->usb_is_present = PMIC_USB_PRESENT;
- } else {
- pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
- pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
- return;
- }
-
- /* setup battery charging */
-
-#if 0
- /* check usb otg power capability and set charger accordingly */
- retval = langwell_udc_maxpower(&power);
- if (retval) {
- dev_warn(pbi->dev,
- "%s(): usb otg power query failed with error code %d\n",
- __func__, retval);
- return;
- }
-
- if (power >= 500)
- chrg = BATT_USBOTG_500MA_CHARGE;
- else
-#endif
- chrg = BATT_USBOTG_TRICKLE_CHARGE;
-
- /* enable battery charging */
- if (pmic_battery_set_charger(pbi, chrg)) {
- dev_warn(pbi->dev,
- "%s(): failed to set up battery charging\n", __func__);
- return;
- }
-
- dev_dbg(pbi->dev,
- "pmic-battery: %s() - setting up battery charger successful\n",
- __func__);
-}
-
-/*
- * Description of power supplies
- */
-static const struct power_supply_desc pmic_usb_desc = {
- .name = "pmic-usb",
- .type = POWER_SUPPLY_TYPE_USB,
- .properties = pmic_usb_props,
- .num_properties = ARRAY_SIZE(pmic_usb_props),
- .get_property = pmic_usb_get_property,
-};
-
-static const struct power_supply_desc pmic_batt_desc = {
- .name = "pmic-batt",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = pmic_battery_props,
- .num_properties = ARRAY_SIZE(pmic_battery_props),
- .get_property = pmic_battery_get_property,
-};
-
-/**
- * pmic_battery_probe - pmic battery initialize
- * @irq: pmic battery device irq
- * @dev: pmic battery device structure
- * Context: can sleep
- *
- * PMIC battery initializes its internal data structue and other
- * infrastructure components for it to work as expected.
- */
-static int probe(int irq, struct device *dev)
-{
- int retval = 0;
- struct pmic_power_module_info *pbi;
- struct power_supply_config psy_cfg = {};
-
- dev_dbg(dev, "pmic-battery: found pmic battery device\n");
-
- pbi = kzalloc(sizeof(*pbi), GFP_KERNEL);
- if (!pbi) {
- dev_err(dev, "%s(): memory allocation failed\n",
- __func__);
- return -ENOMEM;
- }
-
- pbi->dev = dev;
- pbi->irq = irq;
- dev_set_drvdata(dev, pbi);
- psy_cfg.drv_data = pbi;
-
- /* initialize all required framework before enabling interrupts */
- INIT_WORK(&pbi->handler, pmic_battery_handle_intrpt);
- INIT_DELAYED_WORK(&pbi->monitor_battery, pmic_battery_monitor);
- pbi->monitor_wqueue = alloc_workqueue(dev_name(dev), WQ_MEM_RECLAIM, 0);
- if (!pbi->monitor_wqueue) {
- dev_err(dev, "%s(): wqueue init failed\n", __func__);
- retval = -ESRCH;
- goto wqueue_failed;
- }
-
- /* register interrupt */
- retval = request_irq(pbi->irq, pmic_battery_interrupt_handler,
- 0, DRIVER_NAME, pbi);
- if (retval) {
- dev_err(dev, "%s(): cannot get IRQ\n", __func__);
- goto requestirq_failed;
- }
-
- /* register pmic-batt with power supply subsystem */
- pbi->batt = power_supply_register(dev, &pmic_usb_desc, &psy_cfg);
- if (IS_ERR(pbi->batt)) {
- dev_err(dev,
- "%s(): failed to register pmic battery device with power supply subsystem\n",
- __func__);
- retval = PTR_ERR(pbi->batt);
- goto power_reg_failed;
- }
-
- dev_dbg(dev, "pmic-battery: %s() - pmic battery device "
- "registration with power supply subsystem successful\n",
- __func__);
-
- queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 1);
-
- /* register pmic-usb with power supply subsystem */
- pbi->usb = power_supply_register(dev, &pmic_batt_desc, &psy_cfg);
- if (IS_ERR(pbi->usb)) {
- dev_err(dev,
- "%s(): failed to register pmic usb device with power supply subsystem\n",
- __func__);
- retval = PTR_ERR(pbi->usb);
- goto power_reg_failed_1;
- }
-
- if (debug)
- printk(KERN_INFO "pmic-battery: %s() - pmic usb device "
- "registration with power supply subsystem successful\n",
- __func__);
-
- return retval;
-
-power_reg_failed_1:
- power_supply_unregister(pbi->batt);
-power_reg_failed:
- cancel_delayed_work_sync(&pbi->monitor_battery);
-requestirq_failed:
- destroy_workqueue(pbi->monitor_wqueue);
-wqueue_failed:
- kfree(pbi);
-
- return retval;
-}
-
-static int platform_pmic_battery_probe(struct platform_device *pdev)
-{
- return probe(pdev->id, &pdev->dev);
-}
-
-/**
- * pmic_battery_remove - pmic battery finalize
- * @dev: pmic battery device structure
- * Context: can sleep
- *
- * PMIC battery finalizes its internal data structue and other
- * infrastructure components that it initialized in
- * pmic_battery_probe.
- */
-
-static int platform_pmic_battery_remove(struct platform_device *pdev)
-{
- struct pmic_power_module_info *pbi = platform_get_drvdata(pdev);
-
- free_irq(pbi->irq, pbi);
- cancel_delayed_work_sync(&pbi->monitor_battery);
- destroy_workqueue(pbi->monitor_wqueue);
-
- power_supply_unregister(pbi->usb);
- power_supply_unregister(pbi->batt);
-
- cancel_work_sync(&pbi->handler);
- kfree(pbi);
- return 0;
-}
-
-static struct platform_driver platform_pmic_battery_driver = {
- .driver = {
- .name = DRIVER_NAME,
- },
- .probe = platform_pmic_battery_probe,
- .remove = platform_pmic_battery_remove,
-};
-
-module_platform_driver(platform_pmic_battery_driver);
-
-MODULE_AUTHOR("Nithish Mahalingam <nithish.mahalingam@intel.com>");
-MODULE_DESCRIPTION("Intel Moorestown PMIC Battery Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
new file mode 100644
index 000000000000..b91b1d2999dc
--- /dev/null
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -0,0 +1,327 @@
+/*
+ * Maxim MAX14656 / AL32 USB Charger Detector driver
+ *
+ * Copyright (C) 2014 LG Electronics, Inc
+ * Copyright (C) 2016 Alexander Kurz <akurz@blala.de>
+ *
+ * Components from Maxim AL32 Charger detection Driver for MX50 Yoshi Board
+ * Copyright (C) Amazon Technologies Inc. All rights reserved.
+ * Manish Lachwani (lachwani@lab126.com)
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+
+#define MAX14656_MANUFACTURER "Maxim Integrated"
+#define MAX14656_NAME "max14656"
+
+#define MAX14656_DEVICE_ID 0x00
+#define MAX14656_INTERRUPT_1 0x01
+#define MAX14656_INTERRUPT_2 0x02
+#define MAX14656_STATUS_1 0x03
+#define MAX14656_STATUS_2 0x04
+#define MAX14656_INTMASK_1 0x05
+#define MAX14656_INTMASK_2 0x06
+#define MAX14656_CONTROL_1 0x07
+#define MAX14656_CONTROL_2 0x08
+#define MAX14656_CONTROL_3 0x09
+
+#define DEVICE_VENDOR_MASK 0xf0
+#define DEVICE_REV_MASK 0x0f
+#define INT_EN_REG_MASK BIT(4)
+#define CHG_TYPE_INT_MASK BIT(0)
+#define STATUS1_VB_VALID_MASK BIT(4)
+#define STATUS1_CHG_TYPE_MASK 0xf
+#define INT1_DCD_TIMEOUT_MASK BIT(7)
+#define CONTROL1_DEFAULT 0x0d
+#define CONTROL1_INT_EN BIT(4)
+#define CONTROL1_INT_ACTIVE_HIGH BIT(5)
+#define CONTROL1_EDGE BIT(7)
+#define CONTROL2_DEFAULT 0x8e
+#define CONTROL2_ADC_EN BIT(0)
+#define CONTROL3_DEFAULT 0x8d
+
+enum max14656_chg_type {
+ MAX14656_NO_CHARGER = 0,
+ MAX14656_SDP_CHARGER,
+ MAX14656_CDP_CHARGER,
+ MAX14656_DCP_CHARGER,
+ MAX14656_APPLE_500MA_CHARGER,
+ MAX14656_APPLE_1A_CHARGER,
+ MAX14656_APPLE_2A_CHARGER,
+ MAX14656_SPECIAL_500MA_CHARGER,
+ MAX14656_APPLE_12W,
+ MAX14656_CHARGER_LAST
+};
+
+static const struct max14656_chg_type_props {
+ enum power_supply_type type;
+} chg_type_props[] = {
+ { POWER_SUPPLY_TYPE_UNKNOWN },
+ { POWER_SUPPLY_TYPE_USB },
+ { POWER_SUPPLY_TYPE_USB_CDP },
+ { POWER_SUPPLY_TYPE_USB_DCP },
+ { POWER_SUPPLY_TYPE_USB_DCP },
+ { POWER_SUPPLY_TYPE_USB_DCP },
+ { POWER_SUPPLY_TYPE_USB_DCP },
+ { POWER_SUPPLY_TYPE_USB_DCP },
+ { POWER_SUPPLY_TYPE_USB },
+};
+
+struct max14656_chip {
+ struct i2c_client *client;
+ struct power_supply *detect_psy;
+ struct power_supply_desc psy_desc;
+ struct delayed_work irq_work;
+
+ int irq;
+ int online;
+};
+
+static int max14656_read_reg(struct i2c_client *client, int reg, u8 *val)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "i2c read fail: can't read from %02x: %d\n",
+ reg, ret);
+ return ret;
+ }
+ *val = ret;
+ return 0;
+}
+
+static int max14656_write_reg(struct i2c_client *client, int reg, u8 val)
+{
+ s32 ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "i2c write fail: can't write %02x to %02x: %d\n",
+ val, reg, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int max14656_read_block_reg(struct i2c_client *client, u8 reg,
+ u8 length, u8 *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg, length, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to block read reg 0x%x: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define REG_TOTAL_NUM 5
+static void max14656_irq_worker(struct work_struct *work)
+{
+ struct max14656_chip *chip =
+ container_of(work, struct max14656_chip, irq_work.work);
+
+ u8 buf[REG_TOTAL_NUM];
+ u8 chg_type;
+ int ret = 0;
+
+ ret = max14656_read_block_reg(chip->client, MAX14656_DEVICE_ID,
+ REG_TOTAL_NUM, buf);
+
+ if ((buf[MAX14656_STATUS_1] & STATUS1_VB_VALID_MASK) &&
+ (buf[MAX14656_STATUS_1] & STATUS1_CHG_TYPE_MASK)) {
+ chg_type = buf[MAX14656_STATUS_1] & STATUS1_CHG_TYPE_MASK;
+ if (chg_type < MAX14656_CHARGER_LAST)
+ chip->psy_desc.type = chg_type_props[chg_type].type;
+ else
+ chip->psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+ chip->online = 1;
+ } else {
+ chip->online = 0;
+ chip->psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+ }
+
+ power_supply_changed(chip->detect_psy);
+}
+
+static irqreturn_t max14656_irq(int irq, void *dev_id)
+{
+ struct max14656_chip *chip = dev_id;
+
+ schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(100));
+
+ return IRQ_HANDLED;
+}
+
+static int max14656_hw_init(struct max14656_chip *chip)
+{
+ uint8_t val = 0;
+ uint8_t rev;
+ struct i2c_client *client = chip->client;
+
+ if (max14656_read_reg(client, MAX14656_DEVICE_ID, &val))
+ return -ENODEV;
+
+ if ((val & DEVICE_VENDOR_MASK) != 0x20) {
+ dev_err(&client->dev, "wrong vendor ID %d\n",
+ ((val & DEVICE_VENDOR_MASK) >> 4));
+ return -ENODEV;
+ }
+ rev = val & DEVICE_REV_MASK;
+
+ /* Turn on ADC_EN */
+ if (max14656_write_reg(client, MAX14656_CONTROL_2, CONTROL2_ADC_EN))
+ return -EINVAL;
+
+ /* turn on interrupts and low power mode */
+ if (max14656_write_reg(client, MAX14656_CONTROL_1,
+ CONTROL1_DEFAULT |
+ CONTROL1_INT_EN |
+ CONTROL1_INT_ACTIVE_HIGH |
+ CONTROL1_EDGE))
+ return -EINVAL;
+
+ if (max14656_write_reg(client, MAX14656_INTMASK_1, 0x3))
+ return -EINVAL;
+
+ if (max14656_write_reg(client, MAX14656_INTMASK_2, 0x1))
+ return -EINVAL;
+
+ dev_info(&client->dev, "detected revision %d\n", rev);
+ return 0;
+}
+
+static int max14656_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max14656_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->online;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = MAX14656_NAME;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = MAX14656_MANUFACTURER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property max14656_battery_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static int max14656_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct device *dev = &client->dev;
+ struct power_supply_config psy_cfg = {};
+ struct max14656_chip *chip;
+ int irq = client->irq;
+ int ret = 0;
+
+ if (irq <= 0) {
+ dev_err(dev, "invalid irq number: %d\n", irq);
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(dev, "No support for SMBUS_BYTE_DATA\n");
+ return -ENODEV;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ psy_cfg.drv_data = chip;
+ chip->client = client;
+ chip->online = 0;
+ chip->psy_desc.name = MAX14656_NAME;
+ chip->psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+ chip->psy_desc.properties = max14656_battery_props;
+ chip->psy_desc.num_properties = ARRAY_SIZE(max14656_battery_props);
+ chip->psy_desc.get_property = max14656_get_property;
+ chip->irq = irq;
+
+ ret = max14656_hw_init(chip);
+ if (ret)
+ return -ENODEV;
+
+ INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
+
+ ret = devm_request_irq(dev, chip->irq, max14656_irq,
+ IRQF_TRIGGER_FALLING,
+ MAX14656_NAME, chip);
+ if (ret) {
+ dev_err(dev, "request_irq %d failed\n", chip->irq);
+ return -EINVAL;
+ }
+ enable_irq_wake(chip->irq);
+
+ chip->detect_psy = devm_power_supply_register(dev,
+ &chip->psy_desc, &psy_cfg);
+ if (IS_ERR(chip->detect_psy)) {
+ dev_err(dev, "power_supply_register failed\n");
+ return -EINVAL;
+ }
+
+ schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000));
+
+ return 0;
+}
+
+static const struct i2c_device_id max14656_id[] = {
+ { "max14656", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, max14656_id);
+
+static const struct of_device_id max14656_match_table[] = {
+ { .compatible = "maxim,max14656", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max14656_match_table);
+
+static struct i2c_driver max14656_i2c_driver = {
+ .driver = {
+ .name = "max14656",
+ .of_match_table = max14656_match_table,
+ },
+ .probe = max14656_probe,
+ .id_table = max14656_id,
+};
+module_i2c_driver(max14656_i2c_driver);
+
+MODULE_DESCRIPTION("MAX14656 USB charger detector");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 290ddc12b040..fa861003fece 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -148,10 +148,8 @@ static int max8997_battery_probe(struct platform_device *pdev)
charger = devm_kzalloc(&pdev->dev, sizeof(struct charger_data),
GFP_KERNEL);
- if (charger == NULL) {
- dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ if (!charger)
return -ENOMEM;
- }
platform_set_drvdata(pdev, charger);
@@ -161,7 +159,7 @@ static int max8997_battery_probe(struct platform_device *pdev)
psy_cfg.drv_data = charger;
- charger->battery = power_supply_register(&pdev->dev,
+ charger->battery = devm_power_supply_register(&pdev->dev,
&max8997_battery_desc,
&psy_cfg);
if (IS_ERR(charger->battery)) {
@@ -172,14 +170,6 @@ static int max8997_battery_probe(struct platform_device *pdev)
return 0;
}
-static int max8997_battery_remove(struct platform_device *pdev)
-{
- struct charger_data *charger = platform_get_drvdata(pdev);
-
- power_supply_unregister(charger->battery);
- return 0;
-}
-
static const struct platform_device_id max8997_battery_id[] = {
{ "max8997-battery", 0 },
{ }
@@ -191,7 +181,6 @@ static struct platform_driver max8997_battery_driver = {
.name = "max8997-battery",
},
.probe = max8997_battery_probe,
- .remove = max8997_battery_remove,
.id_table = max8997_battery_id,
};
diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
index d05597b4e40f..b3c1873ad84d 100644
--- a/drivers/power/supply/pcf50633-charger.c
+++ b/drivers/power/supply/pcf50633-charger.c
@@ -393,7 +393,6 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
{
struct power_supply_config psy_cfg = {};
struct pcf50633_mbc *mbc;
- int ret;
int i;
u8 mbcs1;
@@ -419,8 +418,7 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
&psy_cfg);
if (IS_ERR(mbc->adapter)) {
dev_err(mbc->pcf->dev, "failed to register adapter\n");
- ret = PTR_ERR(mbc->adapter);
- return ret;
+ return PTR_ERR(mbc->adapter);
}
mbc->usb = power_supply_register(&pdev->dev, &pcf50633_mbc_usb_desc,
@@ -428,8 +426,7 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
if (IS_ERR(mbc->usb)) {
dev_err(mbc->pcf->dev, "failed to register usb\n");
power_supply_unregister(mbc->adapter);
- ret = PTR_ERR(mbc->usb);
- return ret;
+ return PTR_ERR(mbc->usb);
}
mbc->ac = power_supply_register(&pdev->dev, &pcf50633_mbc_ac_desc,
@@ -438,12 +435,10 @@ static int pcf50633_mbc_probe(struct platform_device *pdev)
dev_err(mbc->pcf->dev, "failed to register ac\n");
power_supply_unregister(mbc->adapter);
power_supply_unregister(mbc->usb);
- ret = PTR_ERR(mbc->ac);
- return ret;
+ return PTR_ERR(mbc->ac);
}
- ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group);
- if (ret)
+ if (sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group))
dev_err(mbc->pcf->dev, "failed to create sysfs entries\n");
mbcs1 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS1);
diff --git a/drivers/power/supply/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c
index b5896ba2a602..f6a0d245731d 100644
--- a/drivers/power/supply/qcom_smbb.c
+++ b/drivers/power/supply/qcom_smbb.c
@@ -35,6 +35,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/extcon.h>
+#include <linux/regulator/driver.h>
#define SMBB_CHG_VMAX 0x040
#define SMBB_CHG_VSAFE 0x041
@@ -72,6 +73,8 @@
#define BTC_CTRL_HOT_EXT_N BIT(0)
#define SMBB_USB_IMAX 0x344
+#define SMBB_USB_OTG_CTL 0x348
+#define OTG_CTL_EN BIT(0)
#define SMBB_USB_ENUM_TIMER_STOP 0x34e
#define ENUM_TIMER_STOP BIT(0)
#define SMBB_USB_SEC_ACCESS 0x3d0
@@ -125,6 +128,9 @@ struct smbb_charger {
struct power_supply *dc_psy;
struct power_supply *bat_psy;
struct regmap *regmap;
+
+ struct regulator_desc otg_rdesc;
+ struct regulator_dev *otg_reg;
};
static const unsigned int smbb_usb_extcon_cable[] = {
@@ -378,7 +384,7 @@ static irqreturn_t smbb_usb_valid_handler(int irq, void *_data)
struct smbb_charger *chg = _data;
smbb_set_line_flag(chg, irq, STATUS_USBIN_VALID);
- extcon_set_cable_state_(chg->edev, EXTCON_USB,
+ extcon_set_state_sync(chg->edev, EXTCON_USB,
chg->status & STATUS_USBIN_VALID);
power_supply_changed(chg->usb_psy);
@@ -787,12 +793,56 @@ static const struct power_supply_desc dc_psy_desc = {
.property_is_writeable = smbb_charger_writable_property,
};
+static int smbb_chg_otg_enable(struct regulator_dev *rdev)
+{
+ struct smbb_charger *chg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = regmap_update_bits(chg->regmap, chg->addr + SMBB_USB_OTG_CTL,
+ OTG_CTL_EN, OTG_CTL_EN);
+ if (rc)
+ dev_err(chg->dev, "failed to update OTG_CTL\n");
+ return rc;
+}
+
+static int smbb_chg_otg_disable(struct regulator_dev *rdev)
+{
+ struct smbb_charger *chg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = regmap_update_bits(chg->regmap, chg->addr + SMBB_USB_OTG_CTL,
+ OTG_CTL_EN, 0);
+ if (rc)
+ dev_err(chg->dev, "failed to update OTG_CTL\n");
+ return rc;
+}
+
+static int smbb_chg_otg_is_enabled(struct regulator_dev *rdev)
+{
+ struct smbb_charger *chg = rdev_get_drvdata(rdev);
+ unsigned int value = 0;
+ int rc;
+
+ rc = regmap_read(chg->regmap, chg->addr + SMBB_USB_OTG_CTL, &value);
+ if (rc)
+ dev_err(chg->dev, "failed to read OTG_CTL\n");
+
+ return !!(value & OTG_CTL_EN);
+}
+
+static const struct regulator_ops smbb_chg_otg_ops = {
+ .enable = smbb_chg_otg_enable,
+ .disable = smbb_chg_otg_disable,
+ .is_enabled = smbb_chg_otg_is_enabled,
+};
+
static int smbb_charger_probe(struct platform_device *pdev)
{
struct power_supply_config bat_cfg = {};
struct power_supply_config usb_cfg = {};
struct power_supply_config dc_cfg = {};
struct smbb_charger *chg;
+ struct regulator_config config = { };
int rc, i;
chg = devm_kzalloc(&pdev->dev, sizeof(*chg), GFP_KERNEL);
@@ -905,6 +955,26 @@ static int smbb_charger_probe(struct platform_device *pdev)
}
}
+ /*
+ * otg regulator is used to control VBUS voltage direction
+ * when USB switches between host and gadget mode
+ */
+ chg->otg_rdesc.id = -1;
+ chg->otg_rdesc.name = "otg-vbus";
+ chg->otg_rdesc.ops = &smbb_chg_otg_ops;
+ chg->otg_rdesc.owner = THIS_MODULE;
+ chg->otg_rdesc.type = REGULATOR_VOLTAGE;
+ chg->otg_rdesc.supply_name = "usb-otg-in";
+ chg->otg_rdesc.of_match = "otg-vbus";
+
+ config.dev = &pdev->dev;
+ config.driver_data = chg;
+
+ chg->otg_reg = devm_regulator_register(&pdev->dev, &chg->otg_rdesc,
+ &config);
+ if (IS_ERR(chg->otg_reg))
+ return PTR_ERR(chg->otg_reg);
+
chg->jeita_ext_temp = of_property_read_bool(pdev->dev.of_node,
"qcom,jeita-extended-temp-range");
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
new file mode 100644
index 000000000000..353765a5f44c
--- /dev/null
+++ b/drivers/power/supply/sbs-charger.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2016, Prodys S.L.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This adds support for sbs-charger compilant chips as defined here:
+ * http://sbs-forum.org/specs/sbc110.pdf
+ *
+ * Implemetation based on sbs-battery.c
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/of_gpio.h>
+#include <linux/bitops.h>
+
+#define SBS_CHARGER_REG_SPEC_INFO 0x11
+#define SBS_CHARGER_REG_STATUS 0x13
+#define SBS_CHARGER_REG_ALARM_WARNING 0x16
+
+#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(1)
+#define SBS_CHARGER_STATUS_RES_COLD BIT(9)
+#define SBS_CHARGER_STATUS_RES_HOT BIT(10)
+#define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14)
+#define SBS_CHARGER_STATUS_AC_PRESENT BIT(15)
+
+#define SBS_CHARGER_POLL_TIME 500
+
+struct sbs_info {
+ struct i2c_client *client;
+ struct power_supply *power_supply;
+ struct regmap *regmap;
+ struct delayed_work work;
+ unsigned int last_state;
+};
+
+static int sbs_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sbs_info *chip = power_supply_get_drvdata(psy);
+ unsigned int reg;
+
+ reg = chip->last_state;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = !!(reg & SBS_CHARGER_STATUS_BATTERY_PRESENT);
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = !!(reg & SBS_CHARGER_STATUS_AC_PRESENT);
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+
+ if (!(reg & SBS_CHARGER_STATUS_BATTERY_PRESENT))
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (reg & SBS_CHARGER_STATUS_AC_PRESENT &&
+ !(reg & SBS_CHARGER_STATUS_CHARGE_INHIBITED))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (reg & SBS_CHARGER_STATUS_RES_COLD)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ if (reg & SBS_CHARGER_STATUS_RES_HOT)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sbs_check_state(struct sbs_info *chip)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(chip->regmap, SBS_CHARGER_REG_STATUS, &reg);
+ if (!ret && reg != chip->last_state) {
+ chip->last_state = reg;
+ power_supply_changed(chip->power_supply);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void sbs_delayed_work(struct work_struct *work)
+{
+ struct sbs_info *chip = container_of(work, struct sbs_info, work.work);
+
+ sbs_check_state(chip);
+
+ schedule_delayed_work(&chip->work,
+ msecs_to_jiffies(SBS_CHARGER_POLL_TIME));
+}
+
+static irqreturn_t sbs_irq_thread(int irq, void *data)
+{
+ struct sbs_info *chip = data;
+ int ret;
+
+ ret = sbs_check_state(chip);
+
+ return ret ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static enum power_supply_property sbs_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+static bool sbs_readable_reg(struct device *dev, unsigned int reg)
+{
+ if (reg < SBS_CHARGER_REG_SPEC_INFO)
+ return false;
+ else
+ return true;
+}
+
+static bool sbs_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SBS_CHARGER_REG_STATUS:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config sbs_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = SBS_CHARGER_REG_ALARM_WARNING,
+ .readable_reg = sbs_readable_reg,
+ .volatile_reg = sbs_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE, /* since based on SMBus */
+};
+
+static const struct power_supply_desc sbs_desc = {
+ .name = "sbs-charger",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = sbs_properties,
+ .num_properties = ARRAY_SIZE(sbs_properties),
+ .get_property = sbs_get_property,
+};
+
+static int sbs_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct power_supply_config psy_cfg = {};
+ struct sbs_info *chip;
+ int ret, val;
+
+ chip = devm_kzalloc(&client->dev, sizeof(struct sbs_info), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+ psy_cfg.of_node = client->dev.of_node;
+ psy_cfg.drv_data = chip;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->regmap = devm_regmap_init_i2c(client, &sbs_regmap);
+ if (IS_ERR(chip->regmap))
+ return PTR_ERR(chip->regmap);
+
+ /*
+ * Before we register, we need to make sure we can actually talk
+ * to the battery.
+ */
+ ret = regmap_read(chip->regmap, SBS_CHARGER_REG_STATUS, &val);
+ if (ret) {
+ dev_err(&client->dev, "Failed to get device status\n");
+ return ret;
+ }
+ chip->last_state = val;
+
+ chip->power_supply = devm_power_supply_register(&client->dev, &sbs_desc,
+ &psy_cfg);
+ if (IS_ERR(chip->power_supply)) {
+ dev_err(&client->dev, "Failed to register power supply\n");
+ return PTR_ERR(chip->power_supply);
+ }
+
+ /*
+ * The sbs-charger spec doesn't impose the use of an interrupt. So in
+ * the case it wasn't provided we use polling in order get the charger's
+ * status.
+ */
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, sbs_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), chip);
+ if (ret) {
+ dev_err(&client->dev, "Failed to request irq, %d\n", ret);
+ return ret;
+ }
+ } else {
+ INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
+ schedule_delayed_work(&chip->work,
+ msecs_to_jiffies(SBS_CHARGER_POLL_TIME));
+ }
+
+ dev_info(&client->dev,
+ "%s: smart charger device registered\n", client->name);
+
+ return 0;
+}
+
+static int sbs_remove(struct i2c_client *client)
+{
+ struct sbs_info *chip = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&chip->work);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sbs_dt_ids[] = {
+ { .compatible = "sbs,sbs-charger" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sbs_dt_ids);
+#endif
+
+static const struct i2c_device_id sbs_id[] = {
+ { "sbs-charger", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sbs_id);
+
+static struct i2c_driver sbs_driver = {
+ .probe = sbs_probe,
+ .remove = sbs_remove,
+ .id_table = sbs_id,
+ .driver = {
+ .name = "sbs-charger",
+ .of_match_table = of_match_ptr(sbs_dt_ids),
+ },
+};
+module_i2c_driver(sbs_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nicolassaenzj@gmail.com>");
+MODULE_DESCRIPTION("SBS smart charger driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index 9fd019f9b88c..29b61e81b385 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -35,22 +35,22 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps65217.h>
+#define CHARGER_STATUS_PRESENT (TPS65217_STATUS_ACPWR | TPS65217_STATUS_USBPWR)
+#define NUM_CHARGER_IRQS 2
#define POLL_INTERVAL (HZ * 2)
struct tps65217_charger {
struct tps65217 *tps;
struct device *dev;
- struct power_supply *ac;
+ struct power_supply *psy;
- int ac_online;
- int prev_ac_online;
+ int online;
+ int prev_online;
struct task_struct *poll_task;
-
- int irq;
};
-static enum power_supply_property tps65217_ac_props[] = {
+static enum power_supply_property tps65217_charger_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
@@ -95,7 +95,7 @@ static int tps65217_enable_charging(struct tps65217_charger *charger)
int ret;
/* charger already enabled */
- if (charger->ac_online)
+ if (charger->online)
return 0;
dev_dbg(charger->dev, "%s: enable charging\n", __func__);
@@ -110,19 +110,19 @@ static int tps65217_enable_charging(struct tps65217_charger *charger)
return ret;
}
- charger->ac_online = 1;
+ charger->online = 1;
return 0;
}
-static int tps65217_ac_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static int tps65217_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
struct tps65217_charger *charger = power_supply_get_drvdata(psy);
if (psp == POWER_SUPPLY_PROP_ONLINE) {
- val->intval = charger->ac_online;
+ val->intval = charger->online;
return 0;
}
return -EINVAL;
@@ -133,7 +133,7 @@ static irqreturn_t tps65217_charger_irq(int irq, void *dev)
int ret, val;
struct tps65217_charger *charger = dev;
- charger->prev_ac_online = charger->ac_online;
+ charger->prev_online = charger->online;
ret = tps65217_reg_read(charger->tps, TPS65217_REG_STATUS, &val);
if (ret < 0) {
@@ -144,8 +144,8 @@ static irqreturn_t tps65217_charger_irq(int irq, void *dev)
dev_dbg(charger->dev, "%s: 0x%x\n", __func__, val);
- /* check for AC status bit */
- if (val & TPS65217_STATUS_ACPWR) {
+ /* check for charger status bit */
+ if (val & CHARGER_STATUS_PRESENT) {
ret = tps65217_enable_charging(charger);
if (ret) {
dev_err(charger->dev,
@@ -153,11 +153,11 @@ static irqreturn_t tps65217_charger_irq(int irq, void *dev)
return IRQ_HANDLED;
}
} else {
- charger->ac_online = 0;
+ charger->online = 0;
}
- if (charger->prev_ac_online != charger->ac_online)
- power_supply_changed(charger->ac);
+ if (charger->prev_online != charger->online)
+ power_supply_changed(charger->psy);
ret = tps65217_reg_read(charger->tps, TPS65217_REG_CHGCONFIG0, &val);
if (ret < 0) {
@@ -188,11 +188,11 @@ static int tps65217_charger_poll_task(void *data)
}
static const struct power_supply_desc tps65217_charger_desc = {
- .name = "tps65217-ac",
+ .name = "tps65217-charger",
.type = POWER_SUPPLY_TYPE_MAINS,
- .get_property = tps65217_ac_get_property,
- .properties = tps65217_ac_props,
- .num_properties = ARRAY_SIZE(tps65217_ac_props),
+ .get_property = tps65217_charger_get_property,
+ .properties = tps65217_charger_props,
+ .num_properties = ARRAY_SIZE(tps65217_charger_props),
};
static int tps65217_charger_probe(struct platform_device *pdev)
@@ -200,8 +200,10 @@ static int tps65217_charger_probe(struct platform_device *pdev)
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_charger *charger;
struct power_supply_config cfg = {};
- int irq;
+ struct task_struct *poll_task;
+ int irq[NUM_CHARGER_IRQS];
int ret;
+ int i;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -216,18 +218,16 @@ static int tps65217_charger_probe(struct platform_device *pdev)
cfg.of_node = pdev->dev.of_node;
cfg.drv_data = charger;
- charger->ac = devm_power_supply_register(&pdev->dev,
- &tps65217_charger_desc,
- &cfg);
- if (IS_ERR(charger->ac)) {
+ charger->psy = devm_power_supply_register(&pdev->dev,
+ &tps65217_charger_desc,
+ &cfg);
+ if (IS_ERR(charger->psy)) {
dev_err(&pdev->dev, "failed: power supply register\n");
- return PTR_ERR(charger->ac);
+ return PTR_ERR(charger->psy);
}
- irq = platform_get_irq_byname(pdev, "AC");
- if (irq < 0)
- irq = -ENXIO;
- charger->irq = irq;
+ irq[0] = platform_get_irq_byname(pdev, "USB");
+ irq[1] = platform_get_irq_byname(pdev, "AC");
ret = tps65217_config_charger(charger);
if (ret < 0) {
@@ -235,29 +235,36 @@ static int tps65217_charger_probe(struct platform_device *pdev)
return ret;
}
- if (irq != -ENXIO) {
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ /* Create a polling thread if an interrupt is invalid */
+ if (irq[0] < 0 || irq[1] < 0) {
+ poll_task = kthread_run(tps65217_charger_poll_task,
+ charger, "ktps65217charger");
+ if (IS_ERR(poll_task)) {
+ ret = PTR_ERR(poll_task);
+ dev_err(charger->dev,
+ "Unable to run kthread err %d\n", ret);
+ return ret;
+ }
+
+ charger->poll_task = poll_task;
+ return 0;
+ }
+
+ /* Create IRQ threads for charger interrupts */
+ for (i = 0; i < NUM_CHARGER_IRQS; i++) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
tps65217_charger_irq,
0, "tps65217-charger",
charger);
if (ret) {
dev_err(charger->dev,
- "Unable to register irq %d err %d\n", irq,
+ "Unable to register irq %d err %d\n", irq[i],
ret);
return ret;
}
/* Check current state */
- tps65217_charger_irq(irq, charger);
- } else {
- charger->poll_task = kthread_run(tps65217_charger_poll_task,
- charger, "ktps65217charger");
- if (IS_ERR(charger->poll_task)) {
- ret = PTR_ERR(charger->poll_task);
- dev_err(charger->dev,
- "Unable to run kthread err %d\n", ret);
- return ret;
- }
+ tps65217_charger_irq(-1, charger);
}
return 0;
@@ -267,7 +274,7 @@ static int tps65217_charger_remove(struct platform_device *pdev)
{
struct tps65217_charger *charger = platform_get_drvdata(pdev);
- if (charger->irq == -ENXIO)
+ if (charger->poll_task)
kthread_stop(charger->poll_task);
return 0;
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index e3edb31ac880..bd4f66651513 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -175,11 +175,6 @@ static int wm97xx_bat_probe(struct platform_device *dev)
if (dev->id != -1)
return -EINVAL;
- if (!pdata) {
- dev_err(&dev->dev, "No platform_data supplied\n");
- return -EINVAL;
- }
-
if (gpio_is_valid(pdata->charge_gpio)) {
ret = gpio_request(pdata->charge_gpio, "BATT CHRG");
if (ret)
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 9c13381b6966..e8142803a1a7 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -221,18 +221,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
mutex_init(&ptp->pincfg_mux);
init_waitqueue_head(&ptp->tsev_wq);
+ err = ptp_populate_pin_groups(ptp);
+ if (err)
+ goto no_pin_groups;
+
/* Create a new device in our class. */
- ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp,
- "ptp%d", ptp->index);
+ ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
+ ptp, ptp->pin_attr_groups,
+ "ptp%d", ptp->index);
if (IS_ERR(ptp->dev))
goto no_device;
- dev_set_drvdata(ptp->dev, ptp);
-
- err = ptp_populate_sysfs(ptp);
- if (err)
- goto no_sysfs;
-
/* Register a new PPS source. */
if (info->pps) {
struct pps_source_info pps;
@@ -260,10 +259,10 @@ no_clock:
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
no_pps:
- ptp_cleanup_sysfs(ptp);
-no_sysfs:
device_destroy(ptp_class, ptp->devid);
no_device:
+ ptp_cleanup_pin_groups(ptp);
+no_pin_groups:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, index);
@@ -282,8 +281,9 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
/* Release the clock's resources. */
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- ptp_cleanup_sysfs(ptp);
+
device_destroy(ptp_class, ptp->devid);
+ ptp_cleanup_pin_groups(ptp);
posix_clock_unregister(&ptp->clock);
return 0;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 9c5d41421b65..d95888974d0c 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -54,6 +54,8 @@ struct ptp_clock {
struct device_attribute *pin_dev_attr;
struct attribute **pin_attr;
struct attribute_group pin_attr_group;
+ /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
+ const struct attribute_group *pin_attr_groups[2];
};
/*
@@ -94,8 +96,7 @@ uint ptp_poll(struct posix_clock *pc,
extern const struct attribute_group *ptp_groups[];
-int ptp_cleanup_sysfs(struct ptp_clock *ptp);
-
-int ptp_populate_sysfs(struct ptp_clock *ptp);
+int ptp_populate_pin_groups(struct ptp_clock *ptp);
+void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
#endif
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 53d43954a974..48401dfcd999 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -46,27 +46,6 @@ PTP_SHOW_INT(n_periodic_outputs, n_per_out);
PTP_SHOW_INT(n_programmable_pins, n_pins);
PTP_SHOW_INT(pps_available, pps);
-static struct attribute *ptp_attrs[] = {
- &dev_attr_clock_name.attr,
- &dev_attr_max_adjustment.attr,
- &dev_attr_n_alarms.attr,
- &dev_attr_n_external_timestamps.attr,
- &dev_attr_n_periodic_outputs.attr,
- &dev_attr_n_programmable_pins.attr,
- &dev_attr_pps_available.attr,
- NULL,
-};
-
-static const struct attribute_group ptp_group = {
- .attrs = ptp_attrs,
-};
-
-const struct attribute_group *ptp_groups[] = {
- &ptp_group,
- NULL,
-};
-
-
static ssize_t extts_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -91,6 +70,7 @@ static ssize_t extts_enable_store(struct device *dev,
out:
return err;
}
+static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
static ssize_t extts_fifo_show(struct device *dev,
struct device_attribute *attr, char *page)
@@ -124,6 +104,7 @@ out:
mutex_unlock(&ptp->tsevq_mux);
return cnt;
}
+static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
static ssize_t period_store(struct device *dev,
struct device_attribute *attr,
@@ -151,6 +132,7 @@ static ssize_t period_store(struct device *dev,
out:
return err;
}
+static DEVICE_ATTR(period, 0220, NULL, period_store);
static ssize_t pps_enable_store(struct device *dev,
struct device_attribute *attr,
@@ -177,6 +159,57 @@ static ssize_t pps_enable_store(struct device *dev,
out:
return err;
}
+static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
+
+static struct attribute *ptp_attrs[] = {
+ &dev_attr_clock_name.attr,
+
+ &dev_attr_max_adjustment.attr,
+ &dev_attr_n_alarms.attr,
+ &dev_attr_n_external_timestamps.attr,
+ &dev_attr_n_periodic_outputs.attr,
+ &dev_attr_n_programmable_pins.attr,
+ &dev_attr_pps_available.attr,
+
+ &dev_attr_extts_enable.attr,
+ &dev_attr_fifo.attr,
+ &dev_attr_period.attr,
+ &dev_attr_pps_enable.attr,
+ NULL
+};
+
+static umode_t ptp_is_attribute_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_clock_info *info = ptp->info;
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_extts_enable.attr ||
+ attr == &dev_attr_fifo.attr) {
+ if (!info->n_ext_ts)
+ mode = 0;
+ } else if (attr == &dev_attr_period.attr) {
+ if (!info->n_per_out)
+ mode = 0;
+ } else if (attr == &dev_attr_pps_enable.attr) {
+ if (!info->pps)
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static const struct attribute_group ptp_group = {
+ .is_visible = ptp_is_attribute_visible,
+ .attrs = ptp_attrs,
+};
+
+const struct attribute_group *ptp_groups[] = {
+ &ptp_group,
+ NULL
+};
static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
{
@@ -235,47 +268,20 @@ static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
-static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
-static DEVICE_ATTR(period, 0220, NULL, period_store);
-static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
-
-int ptp_cleanup_sysfs(struct ptp_clock *ptp)
+int ptp_populate_pin_groups(struct ptp_clock *ptp)
{
- struct device *dev = ptp->dev;
- struct ptp_clock_info *info = ptp->info;
-
- if (info->n_ext_ts) {
- device_remove_file(dev, &dev_attr_extts_enable);
- device_remove_file(dev, &dev_attr_fifo);
- }
- if (info->n_per_out)
- device_remove_file(dev, &dev_attr_period);
-
- if (info->pps)
- device_remove_file(dev, &dev_attr_pps_enable);
-
- if (info->n_pins) {
- sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group);
- kfree(ptp->pin_attr);
- kfree(ptp->pin_dev_attr);
- }
- return 0;
-}
-
-static int ptp_populate_pins(struct ptp_clock *ptp)
-{
- struct device *dev = ptp->dev;
struct ptp_clock_info *info = ptp->info;
int err = -ENOMEM, i, n_pins = info->n_pins;
- ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr),
+ if (!n_pins)
+ return 0;
+
+ ptp->pin_dev_attr = kcalloc(n_pins, sizeof(*ptp->pin_dev_attr),
GFP_KERNEL);
if (!ptp->pin_dev_attr)
goto no_dev_attr;
- ptp->pin_attr = kzalloc((1 + n_pins) * sizeof(struct attribute *),
- GFP_KERNEL);
+ ptp->pin_attr = kcalloc(1 + n_pins, sizeof(*ptp->pin_attr), GFP_KERNEL);
if (!ptp->pin_attr)
goto no_pin_attr;
@@ -292,61 +298,18 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
ptp->pin_attr_group.name = "pins";
ptp->pin_attr_group.attrs = ptp->pin_attr;
- err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group);
- if (err)
- goto no_group;
+ ptp->pin_attr_groups[0] = &ptp->pin_attr_group;
+
return 0;
-no_group:
- kfree(ptp->pin_attr);
no_pin_attr:
kfree(ptp->pin_dev_attr);
no_dev_attr:
return err;
}
-int ptp_populate_sysfs(struct ptp_clock *ptp)
+void ptp_cleanup_pin_groups(struct ptp_clock *ptp)
{
- struct device *dev = ptp->dev;
- struct ptp_clock_info *info = ptp->info;
- int err;
-
- if (info->n_ext_ts) {
- err = device_create_file(dev, &dev_attr_extts_enable);
- if (err)
- goto out1;
- err = device_create_file(dev, &dev_attr_fifo);
- if (err)
- goto out2;
- }
- if (info->n_per_out) {
- err = device_create_file(dev, &dev_attr_period);
- if (err)
- goto out3;
- }
- if (info->pps) {
- err = device_create_file(dev, &dev_attr_pps_enable);
- if (err)
- goto out4;
- }
- if (info->n_pins) {
- err = ptp_populate_pins(ptp);
- if (err)
- goto out5;
- }
- return 0;
-out5:
- if (info->pps)
- device_remove_file(dev, &dev_attr_pps_enable);
-out4:
- if (info->n_per_out)
- device_remove_file(dev, &dev_attr_period);
-out3:
- if (info->n_ext_ts)
- device_remove_file(dev, &dev_attr_fifo);
-out2:
- if (info->n_ext_ts)
- device_remove_file(dev, &dev_attr_extts_enable);
-out1:
- return err;
+ kfree(ptp->pin_attr);
+ kfree(ptp->pin_dev_attr);
}
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index a62a89674fb5..89bbd6e8bad1 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -180,7 +180,7 @@ static int pm800_get_current_limit(struct regulator_dev *rdev)
return info->max_ua;
}
-static struct regulator_ops pm800_volt_range_ops = {
+static const struct regulator_ops pm800_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -191,7 +191,7 @@ static struct regulator_ops pm800_volt_range_ops = {
.get_current_limit = pm800_get_current_limit,
};
-static struct regulator_ops pm800_volt_table_ops = {
+static const struct regulator_ops pm800_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index b100a63ff3b3..fd86446e499b 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -220,7 +220,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
return ret;
}
-static struct regulator_ops pm8607_regulator_ops = {
+static const struct regulator_ops pm8607_regulator_ops = {
.list_voltage = pm8607_list_voltage,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -229,7 +229,7 @@ static struct regulator_ops pm8607_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops pm8606_preg_ops = {
+static const struct regulator_ops pm8606_preg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 936f7ccc9736..be06eb29c681 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -163,6 +163,13 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
+config REGULATOR_CPCAP
+ tristate "Motorola CPCAP regulator"
+ depends on MFD_CPCAP
+ help
+ Say y here for CPCAP regulator found on some Motorola phones
+ and tablets such as Droid 4.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 14294692beb9..ef7725e2592a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
+obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 9dfabda8f478..afc5b5900181 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -97,7 +97,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
return val & ri->enable_mask ? 1 : 0;
}
-static struct regulator_ops aat2870_ldo_ops = {
+static const struct regulator_ops aat2870_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = aat2870_ldo_set_voltage_sel,
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index 441864b9fece..43fda8b4455a 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -69,7 +69,7 @@ static const struct regulator_linear_range act8945a_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
};
-static struct regulator_ops act8945a_ops = {
+static const struct regulator_ops act8945a_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 8b0f788a9bbb..11c1f880b7bb 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -181,7 +181,7 @@ static int ad5398_disable(struct regulator_dev *rdev)
return ret;
}
-static struct regulator_ops ad5398_ops = {
+static const struct regulator_ops ad5398_ops = {
.get_current_limit = ad5398_get_current_limit,
.set_current_limit = ad5398_set_current_limit,
.enable = ad5398_enable,
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3a6d0290c54c..b041f277a38b 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -301,7 +301,19 @@ static int anatop_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
} else {
+ u32 enable_bit;
+
rdesc->ops = &anatop_rops;
+
+ if (!of_property_read_u32(np, "anatop-enable-bit",
+ &enable_bit)) {
+ anatop_rops.enable = regulator_enable_regmap;
+ anatop_rops.disable = regulator_disable_regmap;
+ anatop_rops.is_enabled = regulator_is_enabled_regmap;
+
+ rdesc->enable_reg = sreg->control_reg;
+ rdesc->enable_mask = BIT(enable_bit);
+ }
}
/* register regulator */
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 302b57cb89c6..e76d094591e7 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -109,7 +109,7 @@ static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT;
}
-static struct regulator_ops arizona_ldo1_hc_ops = {
+static const struct regulator_ops arizona_ldo1_hc_ops = {
.list_voltage = arizona_ldo1_hc_list_voltage,
.map_voltage = arizona_ldo1_hc_map_voltage,
.get_voltage_sel = arizona_ldo1_hc_get_voltage_sel,
@@ -135,7 +135,7 @@ static const struct regulator_desc arizona_ldo1_hc = {
.owner = THIS_MODULE,
};
-static struct regulator_ops arizona_ldo1_ops = {
+static const struct regulator_ops arizona_ldo1_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index fcb98dbda837..22bd71407622 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -45,6 +45,7 @@ static void arizona_micsupp_check_cp(struct work_struct *work)
struct arizona_micsupp *micsupp =
container_of(work, struct arizona_micsupp, check_cp_work);
struct snd_soc_dapm_context *dapm = micsupp->arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct arizona *arizona = micsupp->arizona;
struct regmap *regmap = arizona->regmap;
unsigned int reg;
@@ -59,9 +60,10 @@ static void arizona_micsupp_check_cp(struct work_struct *work)
if (dapm) {
if ((reg & (ARIZONA_CPMIC_ENA | ARIZONA_CPMIC_BYPASS)) ==
ARIZONA_CPMIC_ENA)
- snd_soc_dapm_force_enable_pin(dapm, "MICSUPP");
+ snd_soc_component_force_enable_pin(component,
+ "MICSUPP");
else
- snd_soc_dapm_disable_pin(dapm, "MICSUPP");
+ snd_soc_component_disable_pin(component, "MICSUPP");
snd_soc_dapm_sync(dapm);
}
@@ -104,7 +106,7 @@ static int arizona_micsupp_set_bypass(struct regulator_dev *rdev, bool ena)
return ret;
}
-static struct regulator_ops arizona_micsupp_ops = {
+static const struct regulator_ops arizona_micsupp_ops = {
.enable = arizona_micsupp_enable,
.disable = arizona_micsupp_disable,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index c0e93b1332f7..874d415d6b4f 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -82,7 +82,7 @@ static unsigned int as3711_get_mode_sd(struct regulator_dev *rdev)
return -EINVAL;
}
-static struct regulator_ops as3711_sd_ops = {
+static const struct regulator_ops as3711_sd_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -94,7 +94,7 @@ static struct regulator_ops as3711_sd_ops = {
.set_mode = as3711_set_mode_sd,
};
-static struct regulator_ops as3711_aldo_ops = {
+static const struct regulator_ops as3711_aldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -104,7 +104,7 @@ static struct regulator_ops as3711_aldo_ops = {
.map_voltage = regulator_map_voltage_linear_range,
};
-static struct regulator_ops as3711_dldo_ops = {
+static const struct regulator_ops as3711_dldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index e6a512ebeae2..0b9d4e3e52c7 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -128,11 +128,11 @@
.ops = &axp20x_ops_range, \
}
-static struct regulator_ops axp20x_ops_fixed = {
+static const struct regulator_ops axp20x_ops_fixed = {
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops axp20x_ops_range = {
+static const struct regulator_ops axp20x_ops_range = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
@@ -141,7 +141,7 @@ static struct regulator_ops axp20x_ops_range = {
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops axp20x_ops = {
+static const struct regulator_ops axp20x_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
@@ -150,7 +150,7 @@ static struct regulator_ops axp20x_ops = {
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops axp20x_ops_sw = {
+static const struct regulator_ops axp20x_ops_sw = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
BIT(3)),
AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
- AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+ AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 76b01835dcb4..9dd715407b39 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -250,7 +250,7 @@ static int bcm590xx_get_enable_register(int id)
return reg;
}
-static struct regulator_ops bcm590xx_ops_ldo = {
+static const struct regulator_ops bcm590xx_ops_ldo = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -260,7 +260,7 @@ static struct regulator_ops bcm590xx_ops_ldo = {
.map_voltage = regulator_map_voltage_iterate,
};
-static struct regulator_ops bcm590xx_ops_dcdc = {
+static const struct regulator_ops bcm590xx_ops_dcdc = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -270,7 +270,7 @@ static struct regulator_ops bcm590xx_ops_dcdc = {
.map_voltage = regulator_map_voltage_linear_range,
};
-static struct regulator_ops bcm590xx_ops_vbus = {
+static const struct regulator_ops bcm590xx_ops_vbus = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 04baac9a165b..53d4fc70dbd0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1455,12 +1455,14 @@ static struct regulator_dev *regulator_lookup_by_name(const char *name)
* lookup could succeed in the future.
*
* If successful, returns a struct regulator_dev that corresponds to the name
- * @supply and with the embedded struct device refcount incremented by one,
- * or NULL on failure. The refcount must be dropped by calling put_device().
+ * @supply and with the embedded struct device refcount incremented by one.
+ * The refcount must be dropped by calling put_device().
+ * On failure one of the following ERR-PTR-encoded values is returned:
+ * -ENODEV if lookup fails permanently, -EPROBE_DEFER if lookup could succeed
+ * in the future.
*/
static struct regulator_dev *regulator_dev_lookup(struct device *dev,
- const char *supply,
- int *ret)
+ const char *supply)
{
struct regulator_dev *r;
struct device_node *node;
@@ -1476,16 +1478,12 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
r = of_find_regulator_by_node(node);
if (r)
return r;
- *ret = -EPROBE_DEFER;
- return NULL;
- } else {
+
/*
- * If we couldn't even get the node then it's
- * not just that the device didn't register
- * yet, there's no node and we'll never
- * succeed.
+ * We have a node, but there is no device.
+ * assume it has not registered yet.
*/
- *ret = -ENODEV;
+ return ERR_PTR(-EPROBE_DEFER);
}
}
@@ -1506,13 +1504,16 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (strcmp(map->supply, supply) == 0 &&
get_device(&map->regulator->dev)) {
- mutex_unlock(&regulator_list_mutex);
- return map->regulator;
+ r = map->regulator;
+ break;
}
}
mutex_unlock(&regulator_list_mutex);
- return NULL;
+ if (r)
+ return r;
+
+ return ERR_PTR(-ENODEV);
}
static int regulator_resolve_supply(struct regulator_dev *rdev)
@@ -1529,8 +1530,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (rdev->supply)
return 0;
- r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
- if (!r) {
+ r = regulator_dev_lookup(dev, rdev->supply_name);
+ if (IS_ERR(r)) {
+ ret = PTR_ERR(r);
+
if (ret == -ENODEV) {
/*
* No supply was specified for this regulator and
@@ -1553,6 +1556,19 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
}
+ /*
+ * If the supply's parent device is not the same as the
+ * regulator's parent device, then ensure the parent device
+ * is bound before we resolve the supply, in case the parent
+ * device get probe deferred and unregisters the supply.
+ */
+ if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
+ if (!device_is_bound(r->dev.parent)) {
+ put_device(&r->dev);
+ return -EPROBE_DEFER;
+ }
+ }
+
/* Recursively resolve the supply of the supply */
ret = regulator_resolve_supply(r);
if (ret < 0) {
@@ -1580,69 +1596,72 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
/* Internal regulator request function */
-static struct regulator *_regulator_get(struct device *dev, const char *id,
- bool exclusive, bool allow_dummy)
+struct regulator *_regulator_get(struct device *dev, const char *id,
+ enum regulator_get_type get_type)
{
struct regulator_dev *rdev;
- struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
- const char *devname = NULL;
+ struct regulator *regulator;
+ const char *devname = dev ? dev_name(dev) : "deviceless";
int ret;
+ if (get_type >= MAX_GET_TYPE) {
+ dev_err(dev, "invalid type %d in %s\n", get_type, __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
if (id == NULL) {
pr_err("get() with no identifier\n");
return ERR_PTR(-EINVAL);
}
- if (dev)
- devname = dev_name(dev);
+ rdev = regulator_dev_lookup(dev, id);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
- if (have_full_constraints())
- ret = -ENODEV;
- else
- ret = -EPROBE_DEFER;
-
- rdev = regulator_dev_lookup(dev, id, &ret);
- if (rdev)
- goto found;
-
- regulator = ERR_PTR(ret);
+ /*
+ * If regulator_dev_lookup() fails with error other
+ * than -ENODEV our job here is done, we simply return it.
+ */
+ if (ret != -ENODEV)
+ return ERR_PTR(ret);
- /*
- * If we have return value from dev_lookup fail, we do not expect to
- * succeed, so, quit with appropriate error value
- */
- if (ret && ret != -ENODEV)
- return regulator;
+ if (!have_full_constraints()) {
+ dev_warn(dev,
+ "incomplete constraints, dummy supplies not allowed\n");
+ return ERR_PTR(-ENODEV);
+ }
- if (!devname)
- devname = "deviceless";
+ switch (get_type) {
+ case NORMAL_GET:
+ /*
+ * Assume that a regulator is physically present and
+ * enabled, even if it isn't hooked up, and just
+ * provide a dummy.
+ */
+ dev_warn(dev,
+ "%s supply %s not found, using dummy regulator\n",
+ devname, id);
+ rdev = dummy_regulator_rdev;
+ get_device(&rdev->dev);
+ break;
- /*
- * Assume that a regulator is physically present and enabled
- * even if it isn't hooked up and just provide a dummy.
- */
- if (have_full_constraints() && allow_dummy) {
- pr_warn("%s supply %s not found, using dummy regulator\n",
- devname, id);
+ case EXCLUSIVE_GET:
+ dev_warn(dev,
+ "dummy supplies not allowed for exclusive requests\n");
+ /* fall through */
- rdev = dummy_regulator_rdev;
- get_device(&rdev->dev);
- goto found;
- /* Don't log an error when called from regulator_get_optional() */
- } else if (!have_full_constraints() || exclusive) {
- dev_warn(dev, "dummy supplies not allowed\n");
+ default:
+ return ERR_PTR(-ENODEV);
+ }
}
- return regulator;
-
-found:
if (rdev->exclusive) {
regulator = ERR_PTR(-EPERM);
put_device(&rdev->dev);
return regulator;
}
- if (exclusive && rdev->open_count) {
+ if (get_type == EXCLUSIVE_GET && rdev->open_count) {
regulator = ERR_PTR(-EBUSY);
put_device(&rdev->dev);
return regulator;
@@ -1656,6 +1675,7 @@ found:
}
if (!try_module_get(rdev->owner)) {
+ regulator = ERR_PTR(-EPROBE_DEFER);
put_device(&rdev->dev);
return regulator;
}
@@ -1669,7 +1689,7 @@ found:
}
rdev->open_count++;
- if (exclusive) {
+ if (get_type == EXCLUSIVE_GET) {
rdev->exclusive = 1;
ret = _regulator_is_enabled(rdev);
@@ -1697,7 +1717,7 @@ found:
*/
struct regulator *regulator_get(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, false, true);
+ return _regulator_get(dev, id, NORMAL_GET);
}
EXPORT_SYMBOL_GPL(regulator_get);
@@ -1724,7 +1744,7 @@ EXPORT_SYMBOL_GPL(regulator_get);
*/
struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, true, false);
+ return _regulator_get(dev, id, EXCLUSIVE_GET);
}
EXPORT_SYMBOL_GPL(regulator_get_exclusive);
@@ -1750,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
*/
struct regulator *regulator_get_optional(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, false, false);
+ return _regulator_get(dev, id, OPTIONAL_GET);
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
@@ -3660,7 +3680,7 @@ err:
for (++i; i < num_consumers; ++i) {
r = regulator_enable(consumers[i].consumer);
if (r != 0)
- pr_err("Failed to reename %s: %d\n",
+ pr_err("Failed to re-enable %s: %d\n",
consumers[i].supply, r);
}
@@ -3686,21 +3706,17 @@ int regulator_bulk_force_disable(int num_consumers,
struct regulator_bulk_data *consumers)
{
int i;
- int ret;
+ int ret = 0;
- for (i = 0; i < num_consumers; i++)
+ for (i = 0; i < num_consumers; i++) {
consumers[i].ret =
regulator_force_disable(consumers[i].consumer);
- for (i = 0; i < num_consumers; i++) {
- if (consumers[i].ret != 0) {
+ /* Store first error for reporting */
+ if (consumers[i].ret && !ret)
ret = consumers[i].ret;
- goto out;
- }
}
- return 0;
-out:
return ret;
}
EXPORT_SYMBOL_GPL(regulator_bulk_force_disable);
@@ -4391,12 +4407,13 @@ static void regulator_summary_show_subtree(struct seq_file *s,
seq_puts(s, "\n");
list_for_each_entry(consumer, &rdev->consumer_list, list) {
- if (consumer->dev->class == &regulator_class)
+ if (consumer->dev && consumer->dev->class == &regulator_class)
continue;
seq_printf(s, "%*s%-*s ",
(level + 1) * 3 + 1, "",
- 30 - (level + 1) * 3, dev_name(consumer->dev));
+ 30 - (level + 1) * 3,
+ consumer->dev ? dev_name(consumer->dev) : "deviceless");
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
@@ -4540,6 +4557,16 @@ static int __init regulator_init_complete(void)
if (of_have_populated_dt())
has_full_constraints = true;
+ /*
+ * Regulators may had failed to resolve their input supplies
+ * when were registered, either because the input supply was
+ * not registered yet or because its parent device was not
+ * bound yet. So attempt to resolve the input supplies for
+ * pending regulators before trying to disable unused ones.
+ */
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_register_resolve_supply);
+
/* If we have a full configuration then disable any regulators
* we have permission to change the status for and which are
* not in use or always_on. This is effectively the default
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
new file mode 100644
index 000000000000..cc98aceed1c1
--- /dev/null
+++ b/drivers/regulator/cpcap-regulator.c
@@ -0,0 +1,464 @@
+/*
+ * Motorola CPCAP PMIC regulator driver
+ *
+ * Based on cpcap-regulator.c from Motorola Linux kernel tree
+ * Copyright (C) 2009-2011 Motorola, Inc.
+ *
+ * Rewritten for mainline kernel to use device tree and regmap
+ * Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/motorola-cpcap.h>
+
+/*
+ * Resource assignment register bits. These seem to control the state
+ * idle modes adn are used at least for omap4.
+ */
+
+/* CPCAP_REG_ASSIGN2 bits - Resource Assignment 2 */
+#define CPCAP_BIT_VSDIO_SEL BIT(15)
+#define CPCAP_BIT_VDIG_SEL BIT(14)
+#define CPCAP_BIT_VCAM_SEL BIT(13)
+#define CPCAP_BIT_SW6_SEL BIT(12)
+#define CPCAP_BIT_SW5_SEL BIT(11)
+#define CPCAP_BIT_SW4_SEL BIT(10)
+#define CPCAP_BIT_SW3_SEL BIT(9)
+#define CPCAP_BIT_SW2_SEL BIT(8)
+#define CPCAP_BIT_SW1_SEL BIT(7)
+
+/* CPCAP_REG_ASSIGN3 bits - Resource Assignment 3 */
+#define CPCAP_BIT_VUSBINT2_SEL BIT(15)
+#define CPCAP_BIT_VUSBINT1_SEL BIT(14)
+#define CPCAP_BIT_VVIB_SEL BIT(13)
+#define CPCAP_BIT_VWLAN1_SEL BIT(12)
+#define CPCAP_BIT_VRF1_SEL BIT(11)
+#define CPCAP_BIT_VHVIO_SEL BIT(10)
+#define CPCAP_BIT_VDAC_SEL BIT(9)
+#define CPCAP_BIT_VUSB_SEL BIT(8)
+#define CPCAP_BIT_VSIM_SEL BIT(7)
+#define CPCAP_BIT_VRFREF_SEL BIT(6)
+#define CPCAP_BIT_VPLL_SEL BIT(5)
+#define CPCAP_BIT_VFUSE_SEL BIT(4)
+#define CPCAP_BIT_VCSI_SEL BIT(3)
+#define CPCAP_BIT_SPARE_14_2 BIT(2)
+#define CPCAP_BIT_VWLAN2_SEL BIT(1)
+#define CPCAP_BIT_VRF2_SEL BIT(0)
+
+/* CPCAP_REG_ASSIGN4 bits - Resource Assignment 4 */
+#define CPCAP_BIT_VAUDIO_SEL BIT(0)
+
+/*
+ * Enable register bits. At least CPCAP_BIT_AUDIO_LOW_PWR is generic,
+ * and not limited to audio regulator. Let's use the Motorola kernel
+ * naming for now until we have a better understanding of the other
+ * enable register bits. No idea why BIT(3) is not defined.
+ */
+#define CPCAP_BIT_AUDIO_LOW_PWR BIT(6)
+#define CPCAP_BIT_AUD_LOWPWR_SPEED BIT(5)
+#define CPCAP_BIT_VAUDIOPRISTBY BIT(4)
+#define CPCAP_BIT_VAUDIO_MODE1 BIT(2)
+#define CPCAP_BIT_VAUDIO_MODE0 BIT(1)
+#define CPCAP_BIT_V_AUDIO_EN BIT(0)
+
+/*
+ * Off mode configuration bit. Used currently only by SW5 on omap4. There's
+ * the following comment in Motorola Linux kernel tree for it:
+ *
+ * When set in the regulator mode, the regulator assignment will be changed
+ * to secondary when the regulator is disabled. The mode will be set back to
+ * primary when the regulator is turned on.
+ */
+#define CPCAP_REG_OFF_MODE_SEC BIT(15)
+
+/**
+ * SoC specific configuraion for CPCAP regulator. There are at least three
+ * different SoCs each with their own parameters: omap3, omap4 and tegra2.
+ *
+ * The assign_reg and assign_mask seem to allow toggling between primary
+ * and secondary mode that at least omap4 uses for off mode.
+ */
+struct cpcap_regulator {
+ struct regulator_desc rdesc;
+ const u16 assign_reg;
+ const u16 assign_mask;
+ const u16 vsel_shift;
+};
+
+#define CPCAP_REG(_ID, reg, assignment_reg, assignment_mask, val_tbl, \
+ mode_mask, volt_mask, volt_shft, \
+ mode_val, off_val, volt_trans_time) { \
+ .rdesc = { \
+ .name = #_ID, \
+ .of_match = of_match_ptr(#_ID), \
+ .ops = &cpcap_regulator_ops, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = CPCAP_##_ID, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(val_tbl), \
+ .volt_table = (val_tbl), \
+ .vsel_reg = (reg), \
+ .vsel_mask = (volt_mask), \
+ .enable_reg = (reg), \
+ .enable_mask = (mode_mask), \
+ .enable_val = (mode_val), \
+ .disable_val = (off_val), \
+ .ramp_delay = (volt_trans_time), \
+ }, \
+ .assign_reg = (assignment_reg), \
+ .assign_mask = (assignment_mask), \
+ .vsel_shift = (volt_shft), \
+}
+
+struct cpcap_ddata {
+ struct regmap *reg;
+ struct device *dev;
+ const struct cpcap_regulator *soc;
+};
+
+enum cpcap_regulator_id {
+ CPCAP_SW1,
+ CPCAP_SW2,
+ CPCAP_SW3,
+ CPCAP_SW4,
+ CPCAP_SW5,
+ CPCAP_SW6,
+ CPCAP_VCAM,
+ CPCAP_VCSI,
+ CPCAP_VDAC,
+ CPCAP_VDIG,
+ CPCAP_VFUSE,
+ CPCAP_VHVIO,
+ CPCAP_VSDIO,
+ CPCAP_VPLL,
+ CPCAP_VRF1,
+ CPCAP_VRF2,
+ CPCAP_VRFREF,
+ CPCAP_VWLAN1,
+ CPCAP_VWLAN2,
+ CPCAP_VSIM,
+ CPCAP_VSIMCARD,
+ CPCAP_VVIB,
+ CPCAP_VUSB,
+ CPCAP_VAUDIO,
+ CPCAP_NR_REGULATORS,
+};
+
+/*
+ * We need to also configure regulator idle mode for SoC off mode if
+ * CPCAP_REG_OFF_MODE_SEC is set.
+ */
+static int cpcap_regulator_enable(struct regulator_dev *rdev)
+{
+ struct cpcap_regulator *regulator = rdev_get_drvdata(rdev);
+ int error, ignore;
+
+ error = regulator_enable_regmap(rdev);
+ if (error)
+ return error;
+
+ if (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC) {
+ error = regmap_update_bits(rdev->regmap, regulator->assign_reg,
+ regulator->assign_mask,
+ regulator->assign_mask);
+ if (error)
+ ignore = regulator_disable_regmap(rdev);
+ }
+
+ return error;
+}
+
+/*
+ * We need to also configure regulator idle mode for SoC off mode if
+ * CPCAP_REG_OFF_MODE_SEC is set.
+ */
+static int cpcap_regulator_disable(struct regulator_dev *rdev)
+{
+ struct cpcap_regulator *regulator = rdev_get_drvdata(rdev);
+ int error, ignore;
+
+ if (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC) {
+ error = regmap_update_bits(rdev->regmap, regulator->assign_reg,
+ regulator->assign_mask, 0);
+ if (error)
+ return error;
+ }
+
+ error = regulator_disable_regmap(rdev);
+ if (error && (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC)) {
+ ignore = regmap_update_bits(rdev->regmap, regulator->assign_reg,
+ regulator->assign_mask,
+ regulator->assign_mask);
+ }
+
+ return error;
+}
+
+static unsigned int cpcap_regulator_get_mode(struct regulator_dev *rdev)
+{
+ int value;
+
+ regmap_read(rdev->regmap, rdev->desc->enable_reg, &value);
+
+ if (!(value & CPCAP_BIT_AUDIO_LOW_PWR))
+ return REGULATOR_MODE_STANDBY;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int cpcap_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int value;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ value = CPCAP_BIT_AUDIO_LOW_PWR;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ CPCAP_BIT_AUDIO_LOW_PWR, value);
+}
+
+static struct regulator_ops cpcap_regulator_ops = {
+ .enable = cpcap_regulator_enable,
+ .disable = cpcap_regulator_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_mode = cpcap_regulator_get_mode,
+ .set_mode = cpcap_regulator_set_mode,
+};
+
+static const unsigned int unknown_val_tbl[] = { 0, };
+static const unsigned int sw5_val_tbl[] = { 0, 5050000, };
+static const unsigned int vcam_val_tbl[] = { 2600000, 2700000, 2800000,
+ 2900000, };
+static const unsigned int vcsi_val_tbl[] = { 1200000, 1800000, };
+static const unsigned int vdac_val_tbl[] = { 1200000, 1500000, 1800000,
+ 2500000,};
+static const unsigned int vdig_val_tbl[] = { 1200000, 1350000, 1500000,
+ 1875000, };
+static const unsigned int vfuse_val_tbl[] = { 1500000, 1600000, 1700000,
+ 1800000, 1900000, 2000000,
+ 2100000, 2200000, 2300000,
+ 2400000, 2500000, 2600000,
+ 2700000, 3150000, };
+static const unsigned int vhvio_val_tbl[] = { 2775000, };
+static const unsigned int vsdio_val_tbl[] = { 1500000, 1600000, 1800000,
+ 2600000, 2700000, 2800000,
+ 2900000, 3000000, };
+static const unsigned int vpll_val_tbl[] = { 1200000, 1300000, 1400000,
+ 1800000, };
+/* Quirk: 2775000 is before 2500000 for vrf1 regulator */
+static const unsigned int vrf1_val_tbl[] = { 2775000, 2500000, };
+static const unsigned int vrf2_val_tbl[] = { 0, 2775000, };
+static const unsigned int vrfref_val_tbl[] = { 2500000, 2775000, };
+static const unsigned int vwlan1_val_tbl[] = { 1800000, 1900000, };
+static const unsigned int vwlan2_val_tbl[] = { 2775000, 3000000, 3300000,
+ 3300000, };
+static const unsigned int vsim_val_tbl[] = { 1800000, 2900000, };
+static const unsigned int vsimcard_val_tbl[] = { 1800000, 2900000, };
+static const unsigned int vvib_val_tbl[] = { 1300000, 1800000, 2000000,
+ 3000000, };
+static const unsigned int vusb_val_tbl[] = { 0, 3300000, };
+static const unsigned int vaudio_val_tbl[] = { 0, 2775000, };
+
+/**
+ * SoC specific configuration for omap4. The data below is comes from Motorola
+ * Linux kernel tree. It's basically the values of cpcap_regltr_data,
+ * cpcap_regulator_mode_values and cpcap_regulator_off_mode_values, see
+ * CPCAP_REG macro above.
+ *
+ * SW1 to SW4 and SW6 seems to be unused for mapphone. Note that VSIM and
+ * VSIMCARD have a shared resource assignment bit.
+ */
+static struct cpcap_regulator omap4_regulators[] = {
+ CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW1_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW2_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW3_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW4_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW5_SEL, sw5_val_tbl,
+ 0x28, 0, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0),
+ CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW6_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
+ 0x87, 0x30, 4, 0x3, 0, 420),
+ CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
+ 0x47, 0x10, 4, 0x43, 0x41, 350),
+ CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
+ 0x87, 0x30, 4, 0x3, 0, 420),
+ CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
+ 0x87, 0x30, 4, 0x82, 0, 420),
+ CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
+ 0x80, 0xf, 0, 0x80, 0, 420),
+ CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
+ 0x17, 0, 0, 0, 0x12, 0),
+ CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
+ 0x87, 0x38, 3, 0x82, 0, 420),
+ CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
+ 0x43, 0x18, 3, 0x2, 0, 420),
+ CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
+ 0xac, 0x2, 1, 0x4, 0, 10),
+ CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
+ 0x23, 0x8, 3, 0, 0, 10),
+ CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
+ 0x23, 0x8, 3, 0, 0, 420),
+ CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
+ 0x47, 0x10, 4, 0, 0, 420),
+ CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
+ 0x20c, 0xc0, 6, 0x20c, 0, 420),
+ CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+ 0xffff, vsim_val_tbl,
+ 0x23, 0x8, 3, 0x3, 0, 420),
+ CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+ 0xffff, vsimcard_val_tbl,
+ 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+ CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
+ 0x1, 0xc, 2, 0x1, 0, 500),
+ CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
+ 0x11c, 0x40, 6, 0xc, 0, 0),
+ CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
+ CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
+ 0x16, 0x1, 0, 0x4, 0, 0),
+ { /* sentinel */ },
+};
+
+static const struct of_device_id cpcap_regulator_id_table[] = {
+ {
+ .compatible = "motorola,cpcap-regulator",
+ },
+ {
+ .compatible = "motorola,mapphone-cpcap-regulator",
+ .data = omap4_regulators,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cpcap_regulator_id_table);
+
+static int cpcap_regulator_probe(struct platform_device *pdev)
+{
+ struct cpcap_ddata *ddata;
+ const struct of_device_id *match;
+ struct regulator_config config;
+ struct regulator_init_data init_data;
+ int i;
+
+ match = of_match_device(of_match_ptr(cpcap_regulator_id_table),
+ &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ if (!match->data) {
+ dev_err(&pdev->dev, "no configuration data found\n");
+
+ return -ENODEV;
+ }
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->reg = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!ddata->reg)
+ return -ENODEV;
+
+ ddata->dev = &pdev->dev;
+ ddata->soc = match->data;
+ platform_set_drvdata(pdev, ddata);
+
+ memset(&config, 0, sizeof(config));
+ memset(&init_data, 0, sizeof(init_data));
+ config.dev = &pdev->dev;
+ config.regmap = ddata->reg;
+ config.init_data = &init_data;
+
+ for (i = 0; i < CPCAP_NR_REGULATORS; i++) {
+ const struct cpcap_regulator *regulator = &ddata->soc[i];
+ struct regulator_dev *rdev;
+
+ if (!regulator->rdesc.name)
+ break;
+
+ if (regulator->rdesc.volt_table == unknown_val_tbl)
+ continue;
+
+ config.driver_data = (void *)regulator;
+ rdev = devm_regulator_register(&pdev->dev,
+ &regulator->rdesc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulator->rdesc.name);
+
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver cpcap_regulator_driver = {
+ .probe = cpcap_regulator_probe,
+ .driver = {
+ .name = "cpcap-regulator",
+ .of_match_table = of_match_ptr(cpcap_regulator_id_table),
+ },
+};
+
+module_platform_driver(cpcap_regulator_driver);
+
+MODULE_ALIAS("platform:cpcap-regulator");
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("CPCAP regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 6ec1d400adae..784e3bf32210 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -19,12 +19,6 @@
#include "internal.h"
-enum {
- NORMAL_GET,
- EXCLUSIVE_GET,
- OPTIONAL_GET,
-};
-
static void devm_regulator_release(struct device *dev, void *res)
{
regulator_put(*(struct regulator **)res);
@@ -39,20 +33,7 @@ static struct regulator *_devm_regulator_get(struct device *dev, const char *id,
if (!ptr)
return ERR_PTR(-ENOMEM);
- switch (get_type) {
- case NORMAL_GET:
- regulator = regulator_get(dev, id);
- break;
- case EXCLUSIVE_GET:
- regulator = regulator_get_exclusive(dev, id);
- break;
- case OPTIONAL_GET:
- regulator = regulator_get_optional(dev, id);
- break;
- default:
- regulator = ERR_PTR(-EINVAL);
- }
-
+ regulator = _regulator_get(dev, id, get_type);
if (!IS_ERR(regulator)) {
*ptr = regulator;
devres_add(dev, ptr);
@@ -139,6 +120,18 @@ void devm_regulator_put(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(devm_regulator_put);
+struct regulator_bulk_devres {
+ struct regulator_bulk_data *consumers;
+ int num_consumers;
+};
+
+static void devm_regulator_bulk_release(struct device *dev, void *res)
+{
+ struct regulator_bulk_devres *devres = res;
+
+ regulator_bulk_free(devres->num_consumers, devres->consumers);
+}
+
/**
* devm_regulator_bulk_get - managed get multiple regulator consumers
*
@@ -157,29 +150,22 @@ EXPORT_SYMBOL_GPL(devm_regulator_put);
int devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers)
{
- int i;
+ struct regulator_bulk_devres *devres;
int ret;
- for (i = 0; i < num_consumers; i++)
- consumers[i].consumer = NULL;
-
- for (i = 0; i < num_consumers; i++) {
- consumers[i].consumer = devm_regulator_get(dev,
- consumers[i].supply);
- if (IS_ERR(consumers[i].consumer)) {
- ret = PTR_ERR(consumers[i].consumer);
- dev_err(dev, "Failed to get supply '%s': %d\n",
- consumers[i].supply, ret);
- consumers[i].consumer = NULL;
- goto err;
- }
- }
-
- return 0;
+ devres = devres_alloc(devm_regulator_bulk_release,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
-err:
- for (i = 0; i < num_consumers && consumers[i].consumer; i++)
- devm_regulator_put(consumers[i].consumer);
+ ret = regulator_bulk_get(dev, num_consumers, consumers);
+ if (!ret) {
+ devres->consumers = consumers;
+ devres->num_consumers = num_consumers;
+ devres_add(dev, devres);
+ } else {
+ devres_free(devres);
+ }
return ret;
}
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index d7da81a875cf..60f431831582 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -202,7 +202,7 @@ static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp)
CTL_SLEW_MASK, regval << CTL_SLEW_SHIFT);
}
-static struct regulator_ops fan53555_regulator_ops = {
+static const struct regulator_ops fan53555_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index a43b0e8a438d..988a7472c2ab 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -30,9 +30,6 @@
#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
-#include <linux/acpi.h>
-#include <linux/property.h>
-#include <linux/gpio/consumer.h>
struct fixed_voltage_data {
struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
return config;
}
-/**
- * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
- * @dev: device requesting for fixed_voltage_config
- * @desc: regulator description
- *
- * Populates fixed_voltage_config structure by extracting data through ACPI
- * interface, returns a pointer to the populated structure of NULL if memory
- * alloc fails.
- */
-static struct fixed_voltage_config *
-acpi_get_fixed_voltage_config(struct device *dev,
- const struct regulator_desc *desc)
-{
- struct fixed_voltage_config *config;
- const char *supply_name;
- struct gpio_desc *gpiod;
- int ret;
-
- config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
- if (!config)
- return ERR_PTR(-ENOMEM);
-
- ret = device_property_read_string(dev, "supply-name", &supply_name);
- if (!ret)
- config->supply_name = supply_name;
-
- gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
- if (IS_ERR(gpiod))
- return ERR_PTR(-ENODEV);
-
- config->gpio = desc_to_gpio(gpiod);
- config->enable_high = device_property_read_bool(dev,
- "enable-active-high");
- gpiod_put(gpiod);
-
- return config;
-}
-
static struct regulator_ops fixed_voltage_ops = {
};
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
&drvdata->desc);
if (IS_ERR(config))
return PTR_ERR(config);
- } else if (ACPI_HANDLE(&pdev->dev)) {
- config = acpi_get_fixed_voltage_config(&pdev->dev,
- &drvdata->desc);
- if (IS_ERR(config))
- return PTR_ERR(config);
} else {
config = dev_get_platdata(&pdev->dev);
}
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index aca18466f522..065c100e9a03 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -96,7 +96,7 @@ static int hi655x_disable(struct regulator_dev *rdev)
return ret;
}
-static struct regulator_ops hi655x_regulator_ops = {
+static const struct regulator_ops hi655x_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = hi655x_disable,
.is_enabled = hi655x_is_enabled,
@@ -105,7 +105,7 @@ static struct regulator_ops hi655x_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
-static struct regulator_ops hi655x_ldo_linear_ops = {
+static const struct regulator_ops hi655x_ldo_linear_ops = {
.enable = regulator_enable_regmap,
.disable = hi655x_disable,
.is_enabled = hi655x_is_enabled,
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index c74ac8734023..1dd575b28564 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -51,4 +51,14 @@ regulator_of_get_init_data(struct device *dev,
}
#endif
+enum regulator_get_type {
+ NORMAL_GET,
+ EXCLUSIVE_GET,
+ OPTIONAL_GET,
+ MAX_GET_TYPE
+};
+
+struct regulator *_regulator_get(struct device *dev, const char *id,
+ enum regulator_get_type get_type);
+
#endif
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index d6773da925ba..db34e1da75ef 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -227,7 +227,7 @@ err_i2c:
return ret;
}
-static struct regulator_ops lp8755_buck_ops = {
+static const struct regulator_ops lp8755_buck_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 47bef328fb58..a7a1a0313bbf 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -161,7 +161,7 @@ static int ltc3589_set_suspend_mode(struct regulator_dev *rdev,
}
/* SW1, SW2, SW3, LDO2 */
-static struct regulator_ops ltc3589_linear_regulator_ops = {
+static const struct regulator_ops ltc3589_linear_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -175,18 +175,18 @@ static struct regulator_ops ltc3589_linear_regulator_ops = {
};
/* BB_OUT, LDO3 */
-static struct regulator_ops ltc3589_fixed_regulator_ops = {
+static const struct regulator_ops ltc3589_fixed_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
/* LDO1 */
-static struct regulator_ops ltc3589_fixed_standby_regulator_ops = {
+static const struct regulator_ops ltc3589_fixed_standby_regulator_ops = {
};
/* LDO4 */
-static struct regulator_ops ltc3589_table_regulator_ops = {
+static const struct regulator_ops ltc3589_table_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index e2b476ca2b4d..503cd90eba39 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -161,7 +161,7 @@ static int ltc3676_of_parse_cb(struct device_node *np,
}
/* SW1, SW2, SW3, SW4 linear 0.8V-3.3V with scalar via R1/R2 feeback res */
-static struct regulator_ops ltc3676_linear_regulator_ops = {
+static const struct regulator_ops ltc3676_linear_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -173,11 +173,11 @@ static struct regulator_ops ltc3676_linear_regulator_ops = {
};
/* LDO1 always on fixed 0.8V-3.3V via scalar via R1/R2 feeback res */
-static struct regulator_ops ltc3676_fixed_standby_regulator_ops = {
+static const struct regulator_ops ltc3676_fixed_standby_regulator_ops = {
};
/* LDO2, LDO3 fixed (LDO2 has external scalar via R1/R2 feedback res) */
-static struct regulator_ops ltc3676_fixed_regulator_ops = {
+static const struct regulator_ops ltc3676_fixed_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index c9ff26199711..0db288ce319c 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -85,14 +85,14 @@ static int max14577_reg_set_current_limit(struct regulator_dev *rdev,
reg_data);
}
-static struct regulator_ops max14577_safeout_ops = {
+static const struct regulator_ops max14577_safeout_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops max14577_charger_ops = {
+static const struct regulator_ops max14577_charger_ops = {
.is_enabled = max14577_reg_is_enabled,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -130,7 +130,7 @@ static const struct regulator_desc max14577_supported_regulators[] = {
[MAX14577_CHARGER] = MAX14577_CHARGER_REG,
};
-static struct regulator_ops max77836_ldo_ops = {
+static const struct regulator_ops max77836_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index d088a7c79e60..b94e3a721721 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -644,7 +644,7 @@ static int max77620_of_parse_cb(struct device_node *np,
return max77620_init_pmic(pmic, desc->id);
}
-static struct regulator_ops max77620_regulator_ops = {
+static const struct regulator_ops max77620_regulator_ops = {
.is_enabled = max77620_regulator_is_enabled,
.enable = max77620_regulator_enable,
.disable = max77620_regulator_disable,
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index ac4fa581e0a5..c301f3733475 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -289,7 +289,7 @@ static int max77686_of_parse_cb(struct device_node *np,
return 0;
}
-static struct regulator_ops max77686_ops = {
+static const struct regulator_ops max77686_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
@@ -301,7 +301,7 @@ static struct regulator_ops max77686_ops = {
.set_suspend_mode = max77686_set_suspend_mode,
};
-static struct regulator_ops max77686_ldo_ops = {
+static const struct regulator_ops max77686_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
@@ -314,7 +314,7 @@ static struct regulator_ops max77686_ldo_ops = {
.set_suspend_disable = max77686_set_suspend_disable,
};
-static struct regulator_ops max77686_buck1_ops = {
+static const struct regulator_ops max77686_buck1_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
@@ -326,7 +326,7 @@ static struct regulator_ops max77686_buck1_ops = {
.set_suspend_disable = max77686_set_suspend_disable,
};
-static struct regulator_ops max77686_buck_dvs_ops = {
+static const struct regulator_ops max77686_buck_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c
index cfbb9512e486..3fce67982682 100644
--- a/drivers/regulator/max77693-regulator.c
+++ b/drivers/regulator/max77693-regulator.c
@@ -141,7 +141,7 @@ static const unsigned int max77693_safeout_table[] = {
3300000,
};
-static struct regulator_ops max77693_safeout_ops = {
+static const struct regulator_ops max77693_safeout_ops = {
.list_voltage = regulator_list_voltage_table,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index 1d3539324d9a..b6261903818c 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -288,7 +288,7 @@ static int max77802_set_ramp_delay_4bit(struct regulator_dev *rdev,
/*
* LDOs 2, 4-19, 22-35
*/
-static struct regulator_ops max77802_ldo_ops_logic1 = {
+static const struct regulator_ops max77802_ldo_ops_logic1 = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
@@ -304,7 +304,7 @@ static struct regulator_ops max77802_ldo_ops_logic1 = {
/*
* LDOs 1, 20, 21, 3
*/
-static struct regulator_ops max77802_ldo_ops_logic2 = {
+static const struct regulator_ops max77802_ldo_ops_logic2 = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
@@ -319,7 +319,7 @@ static struct regulator_ops max77802_ldo_ops_logic2 = {
};
/* BUCKS 1, 6 */
-static struct regulator_ops max77802_buck_16_dvs_ops = {
+static const struct regulator_ops max77802_buck_16_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
@@ -333,7 +333,7 @@ static struct regulator_ops max77802_buck_16_dvs_ops = {
};
/* BUCKs 2-4 */
-static struct regulator_ops max77802_buck_234_ops = {
+static const struct regulator_ops max77802_buck_234_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
@@ -348,7 +348,7 @@ static struct regulator_ops max77802_buck_234_ops = {
};
/* BUCKs 5, 7-10 */
-static struct regulator_ops max77802_buck_dvs_ops = {
+static const struct regulator_ops max77802_buck_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 5e941db5ccaf..860400d2cd85 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -109,7 +109,7 @@ struct max8907_regulator {
#define LDO_650_25(id, supply, base) REG_LDO(id, supply, (base), \
650000, 2225000, 25000)
-static struct regulator_ops max8907_mbatt_ops = {
+static const struct regulator_ops max8907_mbatt_ops = {
};
static struct regulator_ops max8907_ldo_ops = {
@@ -121,13 +121,13 @@ static struct regulator_ops max8907_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops max8907_ldo_hwctl_ops = {
+static const struct regulator_ops max8907_ldo_hwctl_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops max8907_fixed_ops = {
+static const struct regulator_ops max8907_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
};
@@ -138,11 +138,11 @@ static struct regulator_ops max8907_out5v_ops = {
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops max8907_out5v_hwctl_ops = {
+static const struct regulator_ops max8907_out5v_hwctl_ops = {
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops max8907_bbat_ops = {
+static const struct regulator_ops max8907_bbat_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index c802f0239dc7..39b63ddefeb2 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -132,7 +132,7 @@ static int max8925_set_dvm_disable(struct regulator_dev *rdev)
return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN, 0);
}
-static struct regulator_ops max8925_regulator_sdv_ops = {
+static const struct regulator_ops max8925_regulator_sdv_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = max8925_set_voltage_sel,
@@ -145,7 +145,7 @@ static struct regulator_ops max8925_regulator_sdv_ops = {
.set_suspend_disable = max8925_set_dvm_disable,
};
-static struct regulator_ops max8925_regulator_ldo_ops = {
+static const struct regulator_ops max8925_regulator_ldo_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = max8925_set_voltage_sel,
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 1af8f4a2ab86..1096546c05e9 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -113,7 +113,7 @@ static int max8952_set_voltage_sel(struct regulator_dev *rdev,
return 0;
}
-static struct regulator_ops max8952_ops = {
+static const struct regulator_ops max8952_ops = {
.list_voltage = max8952_list_voltage,
.get_voltage_sel = max8952_get_voltage_sel,
.set_voltage_sel = max8952_set_voltage_sel,
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index f11d41dad9c1..31ae5ee3a80d 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -528,7 +528,7 @@ static int palmas_smps_set_ramp_delay(struct regulator_dev *rdev,
return ret;
}
-static struct regulator_ops palmas_ops_smps = {
+static const struct regulator_ops palmas_ops_smps = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -542,7 +542,7 @@ static struct regulator_ops palmas_ops_smps = {
.set_ramp_delay = palmas_smps_set_ramp_delay,
};
-static struct regulator_ops palmas_ops_ext_control_smps = {
+static const struct regulator_ops palmas_ops_ext_control_smps = {
.set_mode = palmas_set_mode_smps,
.get_mode = palmas_get_mode_smps,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -553,7 +553,7 @@ static struct regulator_ops palmas_ops_ext_control_smps = {
.set_ramp_delay = palmas_smps_set_ramp_delay,
};
-static struct regulator_ops palmas_ops_smps10 = {
+static const struct regulator_ops palmas_ops_smps10 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -565,7 +565,7 @@ static struct regulator_ops palmas_ops_smps10 = {
.get_bypass = regulator_get_bypass_regmap,
};
-static struct regulator_ops tps65917_ops_smps = {
+static const struct regulator_ops tps65917_ops_smps = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -578,7 +578,7 @@ static struct regulator_ops tps65917_ops_smps = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops tps65917_ops_ext_control_smps = {
+static const struct regulator_ops tps65917_ops_ext_control_smps = {
.set_mode = palmas_set_mode_smps,
.get_mode = palmas_get_mode_smps,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -602,7 +602,7 @@ static int palmas_is_enabled_ldo(struct regulator_dev *dev)
return !!(reg);
}
-static struct regulator_ops palmas_ops_ldo = {
+static const struct regulator_ops palmas_ops_ldo = {
.is_enabled = palmas_is_enabled_ldo,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -612,7 +612,7 @@ static struct regulator_ops palmas_ops_ldo = {
.map_voltage = regulator_map_voltage_linear,
};
-static struct regulator_ops palmas_ops_ldo9 = {
+static const struct regulator_ops palmas_ops_ldo9 = {
.is_enabled = palmas_is_enabled_ldo,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -624,23 +624,23 @@ static struct regulator_ops palmas_ops_ldo9 = {
.get_bypass = regulator_get_bypass_regmap,
};
-static struct regulator_ops palmas_ops_ext_control_ldo = {
+static const struct regulator_ops palmas_ops_ext_control_ldo = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
};
-static struct regulator_ops palmas_ops_extreg = {
+static const struct regulator_ops palmas_ops_extreg = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
};
-static struct regulator_ops palmas_ops_ext_control_extreg = {
+static const struct regulator_ops palmas_ops_ext_control_extreg = {
};
-static struct regulator_ops tps65917_ops_ldo = {
+static const struct regulator_ops tps65917_ops_ldo = {
.is_enabled = palmas_is_enabled_ldo,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -651,7 +651,7 @@ static struct regulator_ops tps65917_ops_ldo = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops tps65917_ops_ldo_1_2 = {
+static const struct regulator_ops tps65917_ops_ldo_1_2 = {
.is_enabled = palmas_is_enabled_ldo,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index f9d74d63be7c..0cb76ba29e84 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -54,7 +54,7 @@ static const unsigned int pbias_volt_table[] = {
3000000
};
-static struct regulator_ops pbias_regulator_voltage_ops = {
+static const struct regulator_ops pbias_regulator_voltage_ops = {
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 9b16e6158f15..79cb971a69bb 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -210,7 +210,7 @@ static int pcap_regulator_is_enabled(struct regulator_dev *rdev)
return (tmp >> vreg->en) & 1;
}
-static struct regulator_ops pcap_regulator_ops = {
+static const struct regulator_ops pcap_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = pcap_regulator_set_voltage_sel,
.get_voltage_sel = pcap_regulator_get_voltage_sel,
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 134f90ec9ca1..762e18447cae 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -41,7 +41,7 @@
.enable_mask = PCF50633_REGULATOR_ON, \
}
-static struct regulator_ops pcf50633_regulator_ops = {
+static const struct regulator_ops pcf50633_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index cb18b5c4f2db..e193bbbb8ffc 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -126,7 +126,7 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
return ret;
}
-static struct regulator_ops pfuze100_ldo_regulator_ops = {
+static const struct regulator_ops pfuze100_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -135,14 +135,14 @@ static struct regulator_ops pfuze100_ldo_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops pfuze100_fixed_regulator_ops = {
+static const struct regulator_ops pfuze100_fixed_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops pfuze100_sw_regulator_ops = {
+static const struct regulator_ops pfuze100_sw_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -150,7 +150,7 @@ static struct regulator_ops pfuze100_sw_regulator_ops = {
.set_ramp_delay = pfuze100_set_ramp_delay,
};
-static struct regulator_ops pfuze100_swb_regulator_ops = {
+static const struct regulator_ops pfuze100_swb_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_table,
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 6c4afc73ecac..a9446056435f 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -162,7 +162,7 @@ static int pv88060_get_current_limit(struct regulator_dev *rdev)
return info->current_limits[data];
}
-static struct regulator_ops pv88060_buck_ops = {
+static const struct regulator_ops pv88060_buck_ops = {
.get_mode = pv88060_buck_get_mode,
.set_mode = pv88060_buck_set_mode,
.enable = regulator_enable_regmap,
@@ -175,7 +175,7 @@ static struct regulator_ops pv88060_buck_ops = {
.get_current_limit = pv88060_get_current_limit,
};
-static struct regulator_ops pv88060_ldo_ops = {
+static const struct regulator_ops pv88060_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index 954a20eeb26f..9a08cb2de501 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -306,7 +306,7 @@ static int pv88080_get_current_limit(struct regulator_dev *rdev)
return info->current_limits[data];
}
-static struct regulator_ops pv88080_buck_ops = {
+static const struct regulator_ops pv88080_buck_ops = {
.get_mode = pv88080_buck_get_mode,
.set_mode = pv88080_buck_set_mode,
.enable = regulator_enable_regmap,
@@ -319,7 +319,7 @@ static struct regulator_ops pv88080_buck_ops = {
.get_current_limit = pv88080_get_current_limit,
};
-static struct regulator_ops pv88080_hvbuck_ops = {
+static const struct regulator_ops pv88080_hvbuck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 421641175352..ab51e254d13a 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -184,7 +184,7 @@ static int pv88090_get_current_limit(struct regulator_dev *rdev)
return info->current_limits[data];
}
-static struct regulator_ops pv88090_buck_ops = {
+static const struct regulator_ops pv88090_buck_ops = {
.get_mode = pv88090_buck_get_mode,
.set_mode = pv88090_buck_set_mode,
.enable = regulator_enable_regmap,
@@ -197,7 +197,7 @@ static struct regulator_ops pv88090_buck_ops = {
.get_current_limit = pv88090_get_current_limit,
};
-static struct regulator_ops pv88090_ldo_ops = {
+static const struct regulator_ops pv88090_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 8ed46a9a55c8..f35994a2a5be 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -305,6 +305,56 @@ static const struct regulator_desc pm8916_buck_hvo_smps = {
.ops = &rpm_smps_ldo_ops,
};
+static const struct regulator_desc pm8994_hfsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 159,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_ftsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000),
+ REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 350,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 64,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
+ },
+ .n_linear_ranges = 3,
+ .n_voltages = 164,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_switch = {
+ .ops = &rpm_switch_ops,
+};
+
+static const struct regulator_desc pm8994_lnldo = {
+ .fixed_uV = 1740000,
+ .n_voltages = 1,
+ .ops = &rpm_smps_ldo_ops_fixed,
+};
+
struct rpm_regulator_data {
const char *name;
u32 type;
@@ -443,10 +493,62 @@ static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8994_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8994_ftsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPA, 7, &pm8994_hfsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPA, 8, &pm8994_ftsmps, "vdd_s8" },
+ { "s9", QCOM_SMD_RPM_SMPA, 9, &pm8994_ftsmps, "vdd_s9" },
+ { "s10", QCOM_SMD_RPM_SMPA, 10, &pm8994_ftsmps, "vdd_s10" },
+ { "s11", QCOM_SMD_RPM_SMPA, 11, &pm8994_ftsmps, "vdd_s11" },
+ { "s12", QCOM_SMD_RPM_SMPA, 12, &pm8994_ftsmps, "vdd_s12" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8994_nldo, "vdd_l1" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8994_nldo, "vdd_l2_l26_l28" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8994_nldo, "vdd_l3_l11" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8994_nldo, "vdd_l4_l27_l31" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8994_lnldo, "vdd_l5_l7" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8994_pldo, "vdd_l6_l12_l32" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8994_lnldo, "vdd_l5_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8994_pldo, "vdd_l8_l16_l30" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8994_nldo, "vdd_l3_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8994_pldo, "vdd_l6_l12_l32" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8994_pldo, "vdd_l14_l15" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8994_pldo, "vdd_l14_l15" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8994_pldo, "vdd_l8_l16_l30" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8994_pldo, "vdd_l17_l29" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8994_pldo, "vdd_l20_l21" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8994_pldo, "vdd_l20_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8994_pldo, "vdd_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8994_nldo, "vdd_l2_l26_l28" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8994_nldo, "vdd_l4_l27_l31" },
+ { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8994_nldo, "vdd_l2_l26_l28" },
+ { "l29", QCOM_SMD_RPM_LDOA, 29, &pm8994_pldo, "vdd_l17_l29" },
+ { "l30", QCOM_SMD_RPM_LDOA, 30, &pm8994_pldo, "vdd_l8_l16_l30" },
+ { "l31", QCOM_SMD_RPM_LDOA, 31, &pm8994_nldo, "vdd_l4_l27_l31" },
+ { "l32", QCOM_SMD_RPM_LDOA, 32, &pm8994_pldo, "vdd_l6_l12_l32" },
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8994_switch, "vdd_lvs1_2" },
+ { "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8994_switch, "vdd_lvs1_2" },
+
+ {}
+};
+
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ { .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
{}
};
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index d2e67c512195..d0f1340168b1 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -61,7 +61,7 @@ static int rc5t583_regulator_enable_time(struct regulator_dev *rdev)
return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us);
}
-static struct regulator_ops rc5t583_ops = {
+static const struct regulator_ops rc5t583_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index 9c930eb68cda..8d2819e36654 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -19,7 +19,7 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
-static struct regulator_ops rn5t618_reg_ops = {
+static const struct regulator_ops rn5t618_reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 92f88753bfed..38ee97a085f9 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -26,6 +26,7 @@
#define S2MPA01_REGULATOR_CNT ARRAY_SIZE(regulators)
struct s2mpa01_info {
+ struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
int ramp_delay24;
int ramp_delay3;
int ramp_delay5;
@@ -341,9 +342,9 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
- struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX] = { };
struct device_node *reg_np = NULL;
struct regulator_config config = { };
+ struct of_regulator_match *rdata;
struct s2mpa01_info *s2mpa01;
int i;
@@ -351,6 +352,7 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
if (!s2mpa01)
return -ENOMEM;
+ rdata = s2mpa01->rdata;
for (i = 0; i < S2MPA01_REGULATOR_CNT; i++)
rdata[i].name = regulators[i].name;
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index ecb0371780af..45e96e154690 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -157,19 +157,19 @@ static struct tps65086_regulator regulators[] = {
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
- TPS65086_SWITCH("SWB1", "swa2", SWB1, TPS65086_SWVTT_EN, BIT(6)),
- TPS65086_SWITCH("SWB2", "swa3", SWB2, TPS65086_SWVTT_EN, BIT(7)),
+ TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
+ TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)),
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
};
-static int tps65086_of_parse_cb(struct device_node *dev,
+static int tps65086_of_parse_cb(struct device_node *node,
const struct regulator_desc *desc,
struct regulator_config *config)
{
int ret;
/* Check for 25mV step mode */
- if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
+ if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) {
switch (desc->id) {
case BUCK1:
case BUCK2:
@@ -193,7 +193,7 @@ static int tps65086_of_parse_cb(struct device_node *dev,
}
/* Check for decay mode */
- if (desc->id <= BUCK6 && of_property_read_bool(config->of_node, "ti,regulator-decay")) {
+ if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) {
ret = regmap_write_bits(config->regmap,
regulators[desc->id].decay_reg,
regulators[desc->id].decay_mask,
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 2d12b9af3540..5324dc9e6d6e 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -179,7 +179,8 @@ static const struct regulator_desc regulators[] = {
TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, "dcdc1",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC1,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC1_EN,
- NULL, tps65217_uv1_ranges, 2, TPS65217_REG_SEQ1,
+ NULL, tps65217_uv1_ranges,
+ ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ1,
TPS65217_SEQ1_DC1_SEQ_MASK),
TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, "dcdc2",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC2,
@@ -190,7 +191,8 @@ static const struct regulator_desc regulators[] = {
TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, "dcdc3",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC3,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC3_EN,
- NULL, tps65217_uv1_ranges, 1, TPS65217_REG_SEQ2,
+ NULL, tps65217_uv1_ranges,
+ ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ2,
TPS65217_SEQ2_DC3_SEQ_MASK),
TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, "ldo1",
tps65217_pmic_ldo1_ops, 16, TPS65217_REG_DEFLDO1,
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 4864b9d742c0..716191046a70 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
vsel = 62;
else if ((min_uV > 1800000) && (min_uV <= 1900000))
vsel = 61;
- else if ((min_uV > 1350000) && (min_uV <= 1800000))
+ else if ((min_uV > 1500000) && (min_uV <= 1800000))
vsel = 60;
else if ((min_uV > 1350000) && (min_uV <= 1500000))
vsel = 59;
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 10368ed8fd13..b6f5f1e1826c 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -163,7 +163,7 @@ int reset_control_reset(struct reset_control *rstc)
}
ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
- if (rstc->shared && !ret)
+ if (rstc->shared && ret)
atomic_dec(&rstc->triggered_count);
return ret;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index c93c5a8fba32..5dc673dc9487 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
will be called rtc-mpc5121.
config RTC_DRV_JZ4740
- bool "Ingenic JZ4740 SoC"
+ tristate "Ingenic JZ4740 SoC"
depends on MACH_INGENIC || COMPILE_TEST
help
If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
controllers.
+ This driver can also be buillt as a module. If so, the module
+ will be called rtc-jz4740.
+
config RTC_DRV_LPC24XX
tristate "NXP RTC for LPC178x/18xx/408x/43xx"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 72918c1ba092..64989afffa3d 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
jz4740_rtc_poweroff(dev_for_power_off);
- machine_halt();
+ kernel_halt();
}
static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
{},
};
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
static int jz4740_rtc_probe(struct platform_device *pdev)
{
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
{ "jz4780-rtc", ID_JZ4780 },
{}
};
+MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
.id_table = jz4740_rtc_ids,
};
-builtin_platform_driver(jz4740_rtc_driver);
+module_platform_driver(jz4740_rtc_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
+MODULE_ALIAS("platform:jz4740-rtc");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 51e52446eacb..73594f38c453 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -610,7 +610,7 @@ static int rtc_pinconf_set(struct pinctrl_dev *pctldev,
struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
u32 val;
unsigned int param;
- u16 param_val;
+ u32 param_val;
int i;
rtc->type->unlock(rtc);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9f16ea6964ec..152de6817875 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -300,13 +300,6 @@ static void scm_blk_request(struct request_queue *rq)
struct request *req;
while ((req = blk_peek_request(rq))) {
- if (req->cmd_type != REQ_TYPE_FS) {
- blk_start_request(req);
- blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
- __blk_end_request_all(req, -EIO);
- continue;
- }
-
if (!scm_permit_request(bdev, req))
goto out;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6d4b68c483f3..e7addea8741b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -281,8 +281,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_HIGH_WATERMARK_PACK 5
#define QETH_WATERMARK_PACK_FUZZ 1
-#define QETH_IP_HEADER_SIZE 40
-
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
#define QETH_RX_PULL_LEN 256
@@ -674,8 +672,6 @@ struct qeth_card_info {
int broadcast_capable;
int unique_id;
struct qeth_card_blkt blkt;
- __u32 csum_mask;
- __u32 tx_csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
__u32 diagass_support;
__u32 hwtrap;
@@ -917,7 +913,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
-int qeth_send_startlan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e33558313834..315d8a2db7c0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
-int qeth_send_startlan(struct qeth_card *card)
+static int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_startlan);
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -5087,6 +5086,20 @@ retriable:
goto out;
}
+ rc = qeth_send_startlan(card);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ if (rc == IPA_RC_LAN_OFFLINE) {
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
+ card->lan_online = 0;
+ } else {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else
+ card->lan_online = 1;
+
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5098,14 +5111,14 @@ retriable:
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out;
}
}
@@ -5289,18 +5302,6 @@ int qeth_setassparms_cb(struct qeth_card *card,
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
}
- if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
- }
- if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.tx_csum_mask =
- cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -6060,23 +6061,96 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
+/* Callback to handle checksum offload command reply from OSA card.
+ * Verify that required features have been enabled on the card.
+ * Return error in hdr->return_code as this value is checked by caller.
+ *
+ * Always returns zero to indicate no further messages from the OSA card.
+ */
+static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_checksum_cmd *chksum_cb =
+ (struct qeth_checksum_cmd *)reply->param;
+
+ QETH_CARD_TEXT(card, 4, "chkdoccb");
+ if (cmd->hdr.return_code)
+ return 0;
+
+ memset(chksum_cb, 0, sizeof(*chksum_cb));
+ if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+ chksum_cb->supported =
+ cmd->data.setassparms.data.chksum.supported;
+ QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
+ }
+ if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
+ chksum_cb->supported =
+ cmd->data.setassparms.data.chksum.supported;
+ chksum_cb->enabled =
+ cmd->data.setassparms.data.chksum.enabled;
+ QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
+ QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
+ }
+ return 0;
+}
+
+/* Send command to OSA card and check results. */
+static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ __u16 cmd_code, long data,
+ struct qeth_checksum_cmd *chksum_cb)
+{
+ struct qeth_cmd_buffer *iob;
+ int rc = -ENOMEM;
+
+ QETH_CARD_TEXT(card, 4, "chkdocmd");
+ iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+ sizeof(__u32), QETH_PROT_IPV4);
+ if (iob)
+ rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
+ qeth_ipa_checksum_run_cmd_cb,
+ chksum_cb);
+ return rc;
+}
+
static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
{
- long rxtx_arg;
+ const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR |
+ QETH_IPA_CHECKSUM_UDP |
+ QETH_IPA_CHECKSUM_TCP;
+ struct qeth_checksum_cmd chksum_cb;
int rc;
- rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0);
+ rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
+ &chksum_cb);
+ if (!rc) {
+ if ((required_features & chksum_cb.supported) !=
+ required_features)
+ rc = -EIO;
+ else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
+ cstype == IPA_INBOUND_CHECKSUM)
+ dev_warn(&card->gdev->dev,
+ "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
+ QETH_CARD_IFNAME(card));
+ }
if (rc) {
+ qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
dev_warn(&card->gdev->dev,
"Starting HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
return rc;
}
- rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask
- : card->info.csum_mask;
- rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE,
- rxtx_arg);
+ rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
+ chksum_cb.supported, &chksum_cb);
+ if (!rc) {
+ if ((required_features & chksum_cb.enabled) !=
+ required_features)
+ rc = -EIO;
+ }
if (rc) {
+ qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
dev_warn(&card->gdev->dev,
"Enabling HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
@@ -6090,19 +6164,10 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
{
- int rc;
-
- if (on) {
- rc = qeth_send_checksum_on(card, cstype);
- if (rc)
- return -EIO;
- } else {
- rc = qeth_send_simple_setassparms(card, cstype,
- IPA_CMD_ASS_STOP, 0);
- if (rc)
- return -EIO;
- }
- return 0;
+ int rc = (on) ? qeth_send_checksum_on(card, cstype)
+ : qeth_send_simple_setassparms(card, cstype,
+ IPA_CMD_ASS_STOP, 0);
+ return rc ? -EIO : 0;
}
static int qeth_set_ipa_tso(struct qeth_card *card, int on)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6cccc9a49ede..bc69d0a338ad 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -352,11 +352,28 @@ struct qeth_arp_query_info {
char *udata;
};
+/* IPA set assist segmentation bit definitions for receive and
+ * transmit checksum offloading.
+ */
+enum qeth_ipa_checksum_bits {
+ QETH_IPA_CHECKSUM_IP_HDR = 0x0002,
+ QETH_IPA_CHECKSUM_UDP = 0x0008,
+ QETH_IPA_CHECKSUM_TCP = 0x0010,
+ QETH_IPA_CHECKSUM_LP2LP = 0x0020
+};
+
+/* IPA Assist checksum offload reply layout. */
+struct qeth_checksum_cmd {
+ __u32 supported;
+ __u32 enabled;
+} __packed;
+
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
+ struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
__u8 ip[16];
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 9c921c2833f1..bea483307618 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,9 +27,6 @@
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
-static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
-static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
- enum qeth_ipa_cmds);
static void qeth_l2_set_rx_mode(struct net_device *);
static int qeth_l2_recover(void *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -165,13 +162,70 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
return rc;
}
+static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+ enum qeth_ipa_cmds ipacmd)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "L2sdmac");
+ iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
+ memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+ return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob,
+ NULL, NULL));
+}
+
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+{
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
+ if (rc == 0) {
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ dev_info(&card->gdev->dev,
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
+ } else {
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ switch (rc) {
+ case -EEXIST:
+ dev_warn(&card->gdev->dev,
+ "MAC address %pM already exists\n", mac);
+ break;
+ case -EPERM:
+ dev_warn(&card->gdev->dev,
+ "MAC address %pM is not authorized\n", mac);
+ break;
+ }
+ }
+ return rc;
+}
+
+static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
+{
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "L2Delmac");
+ if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ return 0;
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
+ if (rc == 0)
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ return rc;
+}
+
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
int rc;
QETH_CARD_TEXT(card, 2, "L2Sgmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_SETGMAC));
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
if (rc == -EEXIST)
QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
mac, QETH_CARD_IFNAME(card));
@@ -186,8 +240,7 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
int rc;
QETH_CARD_TEXT(card, 2, "L2Dgmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_DELGMAC));
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
if (rc)
QETH_DBF_MESSAGE(2,
"Could not delete group MAC %pM on %s: %d\n",
@@ -195,28 +248,27 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static inline u32 qeth_l2_mac_hash(const u8 *addr)
+static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
{
- return get_unaligned((u32 *)(&addr[2]));
+ if (mac->is_uc) {
+ return qeth_l2_send_setdelmac(card, mac->mac_addr,
+ IPA_CMD_SETVMAC);
+ } else {
+ return qeth_l2_send_setgroupmac(card, mac->mac_addr);
+ }
}
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
{
-
- int rc;
-
if (mac->is_uc) {
- rc = qeth_setdel_makerc(card,
- qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_SETVMAC));
+ return qeth_l2_send_setdelmac(card, mac->mac_addr,
+ IPA_CMD_DELVMAC);
} else {
- rc = qeth_setdel_makerc(card,
- qeth_l2_send_setgroupmac(card, mac->mac_addr));
+ return qeth_l2_send_delgroupmac(card, mac->mac_addr);
}
- return rc;
}
-static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
+static void qeth_l2_del_all_macs(struct qeth_card *card)
{
struct qeth_mac *mac;
struct hlist_node *tmp;
@@ -224,19 +276,17 @@ static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
spin_lock_bh(&card->mclock);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
- if (del) {
- if (mac->is_uc)
- qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- else
- qeth_l2_send_delgroupmac(card, mac->mac_addr);
- }
hash_del(&mac->hnode);
kfree(mac);
}
spin_unlock_bh(&card->mclock);
}
+static inline u32 qeth_l2_mac_hash(const u8 *addr)
+{
+ return get_unaligned((u32 *)(&addr[2]));
+}
+
static inline int qeth_l2_get_cast_type(struct qeth_card *card,
struct sk_buff *skb)
{
@@ -425,7 +475,7 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
card->state = CARD_STATE_SOFTSETUP;
}
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l2_del_all_macs(card, 0);
+ qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
@@ -577,65 +627,6 @@ out:
return work_done;
}
-static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
- enum qeth_ipa_cmds ipacmd)
-{
- struct qeth_ipa_cmd *cmd;
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 2, "L2sdmac");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
- if (!iob)
- return -ENOMEM;
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
- memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Setmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_SETVMAC));
- if (rc == 0) {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
- dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
- } else {
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- switch (rc) {
- case -EEXIST:
- dev_warn(&card->gdev->dev,
- "MAC address %pM already exists\n", mac);
- break;
- case -EPERM:
- dev_warn(&card->gdev->dev,
- "MAC address %pM is not authorized\n", mac);
- break;
- }
- }
- return rc;
-}
-
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Delmac");
- if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_DELVMAC));
- if (rc == 0)
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- return rc;
-}
-
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
@@ -794,14 +785,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
- if (!mac->is_uc)
- rc = qeth_l2_send_delgroupmac(card,
- mac->mac_addr);
- else {
- rc = qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- }
-
+ qeth_l2_remove_mac(card, mac);
hash_del(&mac->hnode);
kfree(mac);
@@ -1193,21 +1177,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
rc = qeth_l2_start_ipassists(card);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ac37d050e765..06d0addcc058 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3227,21 +3227,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0e00a5ce0f00..05e9471e3d3f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -250,9 +250,6 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
- if (card->state == CARD_STATE_DOWN)
- return -EPERM;
-
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
EBCASC(tmp_hsuid, 8);
return sprintf(buf, "%s\n", tmp_hsuid);
@@ -692,15 +689,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -708,16 +705,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -854,15 +852,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -870,16 +868,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 75f820ca17b7..27ff38f839fc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1583,7 +1583,7 @@ out:
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req = NULL;
+ struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
- if (req && !IS_ERR(req))
+ if (!retval)
zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}
@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req = NULL;
+ struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
- if (req && !IS_ERR(req))
+ if (!retval)
zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 07ffdbb5107f..0678cf714c0e 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -330,6 +330,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.module = THIS_MODULE,
.name = "zfcp",
.queuecommand = zfcp_scsi_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 879a5f63e7d4..648373cde4a1 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define CCW_CMD_WRITE_CONF 0x21
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
#define CCW_CMD_SET_IND_ADAPTER 0x73
#define CCW_CMD_SET_VIRTIO_REV 0x83
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
* This may happen on device detach.
*/
if (ret && (ret != -ENODEV))
- dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+ dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
ret, index);
vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ u8 old_status = *vcdev->status;
+ struct ccw1 *ccw;
+
+ if (vcdev->revision < 1)
+ return *vcdev->status;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return old_status;
+
+ ccw->cmd_code = CCW_CMD_READ_STATUS;
+ ccw->flags = 0;
+ ccw->count = sizeof(*vcdev->status);
+ ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+/*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+ * handler anyway), vcdev->status was not overwritten and we just
+ * return the old status, which is fine.
+*/
+ kfree(ccw);
return *vcdev->status;
}
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
kfree(ccw);
}
-static struct virtio_config_ops virtio_ccw_config_ops = {
+static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
.get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
case VIRTIO_CCW_DOING_READ_CONFIG:
case VIRTIO_CCW_DOING_WRITE_CONFIG:
case VIRTIO_CCW_DOING_WRITE_STATUS:
+ case VIRTIO_CCW_DOING_READ_STATUS:
case VIRTIO_CCW_DOING_SET_VQ:
case VIRTIO_CCW_DOING_SET_IND:
case VIRTIO_CCW_DOING_SET_CONF_IND:
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a4f6b0d95515..d4023bf1e739 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -18,6 +18,7 @@ config SCSI
depends on BLOCK
select SCSI_DMA if HAS_DMA
select SG_POOL
+ select BLK_SCSI_REQUEST
---help---
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 4f5ca794bb71..acc33440bca0 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -96,17 +96,6 @@
* of chips. To use it, you write an architecture specific functions
* and macros and include this file in your driver.
*
- * These macros control options :
- * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
- * for commands that return with a CHECK CONDITION status.
- *
- * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
- * transceivers.
- *
- * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
- *
- * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
- *
* These macros MUST be defined :
*
* NCR5380_read(register) - read from the specified register
@@ -347,7 +336,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
#endif
/**
- * NCR58380_info - report driver and host information
+ * NCR5380_info - report driver and host information
* @instance: relevant scsi host instance
*
* For use as the host template info() handler.
@@ -360,33 +349,6 @@ static const char *NCR5380_info(struct Scsi_Host *instance)
return hostdata->info;
}
-static void prepare_info(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- snprintf(hostdata->info, sizeof(hostdata->info),
- "%s, irq %d, "
- "io_port 0x%lx, base 0x%lx, "
- "can_queue %d, cmd_per_lun %d, "
- "sg_tablesize %d, this_id %d, "
- "flags { %s%s%s}, "
- "options { %s} ",
- instance->hostt->name, instance->irq,
- hostdata->io_port, hostdata->base,
- instance->can_queue, instance->cmd_per_lun,
- instance->sg_tablesize, instance->this_id,
- hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
- hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
- hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "",
-#ifdef DIFFERENTIAL
- "DIFFERENTIAL "
-#endif
-#ifdef PARITY
- "PARITY "
-#endif
- "");
-}
-
/**
* NCR5380_init - initialise an NCR5380
* @instance: adapter to configure
@@ -436,7 +398,14 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
if (!hostdata->work_q)
return -ENOMEM;
- prepare_info(instance);
+ snprintf(hostdata->info, sizeof(hostdata->info),
+ "%s, irq %d, io_port 0x%lx, base 0x%lx, can_queue %d, cmd_per_lun %d, sg_tablesize %d, this_id %d, flags { %s%s%s}",
+ instance->hostt->name, instance->irq, hostdata->io_port,
+ hostdata->base, instance->can_queue, instance->cmd_per_lun,
+ instance->sg_tablesize, instance->this_id,
+ hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
+ hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
+ hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_write(MODE_REG, MR_BASE);
@@ -622,8 +591,9 @@ static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
list_empty(&hostdata->unissued) &&
list_empty(&hostdata->autosense) &&
!hostdata->connected &&
- !hostdata->selecting)
+ !hostdata->selecting) {
NCR5380_release_dma_irq(instance);
+ }
}
/**
@@ -962,6 +932,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
struct scsi_cmnd *cmd)
+ __releases(&hostdata->lock) __acquires(&hostdata->lock)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char tmp[3], phase;
@@ -1194,8 +1165,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
data = tmp;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &data);
+ if (len) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ cmd->result = DID_ERROR << 16;
+ complete_cmd(instance, cmd);
+ dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
+ cmd = NULL;
+ goto out;
+ }
+
dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n");
- /* XXX need to handle errors here */
hostdata->connected = cmd;
hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun;
@@ -1654,6 +1633,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
*/
static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ __releases(&hostdata->lock) __acquires(&hostdata->lock)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char msgout = NOP;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 51a3567a6fb2..d78f0957d865 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -81,11 +81,7 @@
#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
-#ifdef DIFFERENTIAL
-#define ICR_BASE ICR_DIFF_ENABLE
-#else
#define ICR_BASE 0
-#endif
#define MODE_REG 2
/*
@@ -102,11 +98,7 @@
#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */
#define MR_ARBITRATE 0x01 /* rw start arbitration */
-#ifdef PARITY
-#define MR_BASE MR_ENABLE_PAR_CHECK
-#else
#define MR_BASE 0
-#endif
#define TARGET_COMMAND_REG 3
#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */
@@ -174,11 +166,7 @@
#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */
#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */
-#if 0
-#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR
-#else
#define CSR_BASE CSR_53C80_INTR
-#endif
/* Note : PHASE_* macros are based on the values of the STATUS register */
#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
@@ -234,11 +222,9 @@ struct NCR5380_hostdata {
unsigned char id_higher_mask; /* All bits above id_mask */
unsigned char last_message; /* Last Message Out */
unsigned long region_size; /* Size of address/port range */
- char info[256];
+ char info[168]; /* Host banner message */
};
-#ifdef __KERNEL__
-
struct NCR5380_cmd {
struct list_head list;
};
@@ -331,5 +317,4 @@ static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
return 0;
}
-#endif /* __KERNEL__ */
#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 1ee7c654f7b8..907f1e80665b 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,6 +23,11 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
+ * Module Name:
+ * aachba.c
+ *
+ * Abstract: Contains Interfaces to manage IOs.
+ *
*/
#include <linux/kernel.h>
@@ -62,6 +68,7 @@
#define SENCODE_END_OF_DATA 0x00
#define SENCODE_BECOMING_READY 0x04
#define SENCODE_INIT_CMD_REQUIRED 0x04
+#define SENCODE_UNRECOVERED_READ_ERROR 0x11
#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
#define SENCODE_INVALID_COMMAND 0x20
#define SENCODE_LBA_OUT_OF_RANGE 0x21
@@ -106,6 +113,8 @@
#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
#define ASENCODE_OVERLAPPED_COMMAND 0x00
+#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
+
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
#define BYTE2(x) (unsigned char)((x) >> 16)
@@ -164,46 +173,56 @@ struct inquiry_data {
};
/* Added for VPD 0x83 */
-typedef struct {
- u8 CodeSet:4; /* VPD_CODE_SET */
- u8 Reserved:4;
- u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
- u8 Reserved2:4;
- u8 Reserved3;
- u8 IdentifierLength;
- u8 VendId[8];
- u8 ProductId[16];
- u8 SerialNumber[8]; /* SN in ASCII */
-
-} TVPD_ID_Descriptor_Type_1;
+struct tvpd_id_descriptor_type_1 {
+ u8 codeset:4; /* VPD_CODE_SET */
+ u8 reserved:4;
+ u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
+ u8 reserved2:4;
+ u8 reserved3;
+ u8 identifierlength;
+ u8 venid[8];
+ u8 productid[16];
+ u8 serialnumber[8]; /* SN in ASCII */
-typedef struct {
- u8 CodeSet:4; /* VPD_CODE_SET */
- u8 Reserved:4;
- u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
- u8 Reserved2:4;
- u8 Reserved3;
- u8 IdentifierLength;
- struct TEU64Id {
+};
+
+struct tvpd_id_descriptor_type_2 {
+ u8 codeset:4; /* VPD_CODE_SET */
+ u8 reserved:4;
+ u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
+ u8 reserved2:4;
+ u8 reserved3;
+ u8 identifierlength;
+ struct teu64id {
u32 Serial;
/* The serial number supposed to be 40 bits,
* bit we only support 32, so make the last byte zero. */
- u8 Reserved;
- u8 VendId[3];
- } EU64Id;
+ u8 reserved;
+ u8 venid[3];
+ } eu64id;
-} TVPD_ID_Descriptor_Type_2;
+};
-typedef struct {
+struct tvpd_id_descriptor_type_3 {
+ u8 codeset : 4; /* VPD_CODE_SET */
+ u8 reserved : 4;
+ u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */
+ u8 reserved2 : 4;
+ u8 reserved3;
+ u8 identifierlength;
+ u8 Identifier[16];
+};
+
+struct tvpd_page83 {
u8 DeviceType:5;
u8 DeviceTypeQualifier:3;
u8 PageCode;
- u8 Reserved;
+ u8 reserved;
u8 PageLength;
- TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
- TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
-
-} TVPD_Page83;
+ struct tvpd_id_descriptor_type_1 type1;
+ struct tvpd_id_descriptor_type_2 type2;
+ struct tvpd_id_descriptor_type_3 type3;
+};
/*
* M O D U L E G L O B A L S
@@ -214,9 +233,13 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
struct aac_raw_io2 *rio2, int sg_max);
+static long aac_build_sghba(struct scsi_cmnd *scsicmd,
+ struct aac_hba_cmd_req *hbacmd,
+ int sg_max, u64 sg_address);
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
int pages, int nseg, int nseg_new);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
+static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
#endif
@@ -327,7 +350,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
}
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
device = scsicmd->device;
- if (unlikely(!device || !scsi_device_online(device))) {
+ if (unlikely(!device)) {
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
aac_fib_complete(fibptr);
return 0;
@@ -473,16 +496,26 @@ int aac_get_containers(struct aac_dev *dev)
if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
- fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
- GFP_KERNEL);
- if (!fsa_dev_ptr)
- return -ENOMEM;
+ if (dev->fsa_dev == NULL ||
+ dev->maximum_num_containers != maximum_num_containers) {
+
+ fsa_dev_ptr = dev->fsa_dev;
+
+ dev->fsa_dev = kcalloc(maximum_num_containers,
+ sizeof(*fsa_dev_ptr), GFP_KERNEL);
+
+ kfree(fsa_dev_ptr);
+ fsa_dev_ptr = NULL;
- dev->fsa_dev = fsa_dev_ptr;
- dev->maximum_num_containers = maximum_num_containers;
- for (index = 0; index < dev->maximum_num_containers; ) {
- fsa_dev_ptr[index].devname[0] = '\0';
+ if (!dev->fsa_dev)
+ return -ENOMEM;
+
+ dev->maximum_num_containers = maximum_num_containers;
+ }
+ for (index = 0; index < dev->maximum_num_containers; index++) {
+ dev->fsa_dev[index].devname[0] = '\0';
+ dev->fsa_dev[index].valid = 0;
status = aac_probe_container(dev, index);
@@ -490,12 +523,6 @@ int aac_get_containers(struct aac_dev *dev)
printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
break;
}
-
- /*
- * If there are no more containers, then stop asking.
- */
- if (++index >= status)
- break;
}
return status;
}
@@ -602,6 +629,7 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
struct fsa_dev_info *fsa_dev_ptr;
int (*callback)(struct scsi_cmnd *);
struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+ int i;
if (!aac_valid_context(scsicmd, fibptr))
@@ -624,6 +652,10 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
fsa_dev_ptr->block_size =
le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
}
+ for (i = 0; i < 16; i++)
+ fsa_dev_ptr->identifier[i] =
+ dresp->mnt[0].fileinfo.bdevinfo
+ .identifier[i];
fsa_dev_ptr->valid = 1;
/* sense_key holds the current state of the spin-up */
if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
@@ -918,6 +950,28 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
inqstrcpy ("V1.0", str->prl);
}
+static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
+ struct aac_dev *dev, struct scsi_cmnd *scsicmd)
+{
+ int container;
+
+ vpdpage83data->type3.codeset = 1;
+ vpdpage83data->type3.identifiertype = 3;
+ vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
+ - 4;
+
+ for (container = 0; container < dev->maximum_num_containers;
+ container++) {
+
+ if (scmd_id(scsicmd) == container) {
+ memcpy(vpdpage83data->type3.Identifier,
+ dev->fsa_dev[container].identifier,
+ 16);
+ break;
+ }
+ }
+}
+
static void get_container_serial_callback(void *context, struct fib * fibptr)
{
struct aac_get_serial_resp * get_serial_reply;
@@ -935,39 +989,47 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
/*Check to see if it's for VPD 0x83 or 0x80 */
if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
+ struct aac_dev *dev;
int i;
- TVPD_Page83 VPDPage83Data;
+ struct tvpd_page83 vpdpage83data;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- memset(((u8 *)&VPDPage83Data), 0,
- sizeof(VPDPage83Data));
+ memset(((u8 *)&vpdpage83data), 0,
+ sizeof(vpdpage83data));
/* DIRECT_ACCESS_DEVIC */
- VPDPage83Data.DeviceType = 0;
+ vpdpage83data.DeviceType = 0;
/* DEVICE_CONNECTED */
- VPDPage83Data.DeviceTypeQualifier = 0;
+ vpdpage83data.DeviceTypeQualifier = 0;
/* VPD_DEVICE_IDENTIFIERS */
- VPDPage83Data.PageCode = 0x83;
- VPDPage83Data.Reserved = 0;
- VPDPage83Data.PageLength =
- sizeof(VPDPage83Data.IdDescriptorType1) +
- sizeof(VPDPage83Data.IdDescriptorType2);
+ vpdpage83data.PageCode = 0x83;
+ vpdpage83data.reserved = 0;
+ vpdpage83data.PageLength =
+ sizeof(vpdpage83data.type1) +
+ sizeof(vpdpage83data.type2);
+
+ /* VPD 83 Type 3 is not supported for ARC */
+ if (dev->sa_firmware)
+ vpdpage83data.PageLength +=
+ sizeof(vpdpage83data.type3);
/* T10 Vendor Identifier Field Format */
- /* VpdCodeSetAscii */
- VPDPage83Data.IdDescriptorType1.CodeSet = 2;
+ /* VpdcodesetAscii */
+ vpdpage83data.type1.codeset = 2;
/* VpdIdentifierTypeVendorId */
- VPDPage83Data.IdDescriptorType1.IdentifierType = 1;
- VPDPage83Data.IdDescriptorType1.IdentifierLength =
- sizeof(VPDPage83Data.IdDescriptorType1) - 4;
+ vpdpage83data.type1.identifiertype = 1;
+ vpdpage83data.type1.identifierlength =
+ sizeof(vpdpage83data.type1) - 4;
/* "ADAPTEC " for adaptec */
- memcpy(VPDPage83Data.IdDescriptorType1.VendId,
+ memcpy(vpdpage83data.type1.venid,
"ADAPTEC ",
- sizeof(VPDPage83Data.IdDescriptorType1.VendId));
- memcpy(VPDPage83Data.IdDescriptorType1.ProductId,
+ sizeof(vpdpage83data.type1.venid));
+ memcpy(vpdpage83data.type1.productid,
"ARRAY ",
sizeof(
- VPDPage83Data.IdDescriptorType1.ProductId));
+ vpdpage83data.type1.productid));
/* Convert to ascii based serial number.
* The LSB is the the end.
@@ -976,32 +1038,41 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
u8 temp =
(u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
if (temp > 0x9) {
- VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ vpdpage83data.type1.serialnumber[i] =
'A' + (temp - 0xA);
} else {
- VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ vpdpage83data.type1.serialnumber[i] =
'0' + temp;
}
}
/* VpdCodeSetBinary */
- VPDPage83Data.IdDescriptorType2.CodeSet = 1;
- /* VpdIdentifierTypeEUI64 */
- VPDPage83Data.IdDescriptorType2.IdentifierType = 2;
- VPDPage83Data.IdDescriptorType2.IdentifierLength =
- sizeof(VPDPage83Data.IdDescriptorType2) - 4;
+ vpdpage83data.type2.codeset = 1;
+ /* VpdidentifiertypeEUI64 */
+ vpdpage83data.type2.identifiertype = 2;
+ vpdpage83data.type2.identifierlength =
+ sizeof(vpdpage83data.type2) - 4;
- VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0;
- VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
- VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
+ vpdpage83data.type2.eu64id.venid[0] = 0xD0;
+ vpdpage83data.type2.eu64id.venid[1] = 0;
+ vpdpage83data.type2.eu64id.venid[2] = 0;
- VPDPage83Data.IdDescriptorType2.EU64Id.Serial =
+ vpdpage83data.type2.eu64id.Serial =
get_serial_reply->uid;
- VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
+ vpdpage83data.type2.eu64id.reserved = 0;
+
+ /*
+ * VpdIdentifierTypeFCPHName
+ * VPD 0x83 Type 3 not supported for ARC
+ */
+ if (dev->sa_firmware) {
+ build_vpd83_type3(&vpdpage83data,
+ dev, scsicmd);
+ }
/* Move the inquiry data to the response buffer. */
- scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
- sizeof(VPDPage83Data));
+ scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
+ sizeof(vpdpage83data));
} else {
/* It must be for VPD 0x80 */
char sp[13];
@@ -1144,7 +1215,9 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
long ret;
aac_fib_init(fib);
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
+ !dev->sync_mode) {
struct aac_raw_io2 *readcmd2;
readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
memset(readcmd2, 0, sizeof(struct aac_raw_io2));
@@ -1270,7 +1343,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
long ret;
aac_fib_init(fib);
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
+ !dev->sync_mode) {
struct aac_raw_io2 *writecmd2;
writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
memset(writecmd2, 0, sizeof(struct aac_raw_io2));
@@ -1435,6 +1510,52 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
return srbcmd;
}
+static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
+ struct scsi_cmnd *cmd)
+{
+ struct aac_hba_cmd_req *hbacmd;
+ struct aac_dev *dev;
+ int bus, target;
+ u64 address;
+
+ dev = (struct aac_dev *)cmd->device->host->hostdata;
+
+ hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
+ memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
+ /* iu_type is a parameter of aac_hba_send */
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ hbacmd->byte1 = 2;
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ hbacmd->byte1 = 1;
+ break;
+ case DMA_NONE:
+ default:
+ break;
+ }
+ hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
+
+ bus = aac_logical_to_phys(scmd_channel(cmd));
+ target = scmd_id(cmd);
+ hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
+
+ /* we fill in reply_qid later in aac_src_deliver_message */
+ /* we fill in iu_type, request_id later in aac_hba_send */
+ /* we fill in emb_data_desc_count later in aac_build_sghba */
+
+ memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
+ hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
+
+ address = (u64)fib->hw_error_pa;
+ hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+ hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+ hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+
+ return hbacmd;
+}
+
static void aac_srb_callback(void *context, struct fib * fibptr);
static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
@@ -1505,11 +1626,243 @@ static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
return aac_scsi_32(fib, cmd);
}
+static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
+{
+ struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
+ struct aac_dev *dev;
+ long ret;
+
+ dev = (struct aac_dev *)cmd->device->host->hostdata;
+
+ ret = aac_build_sghba(cmd, hbacmd,
+ dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Now send the HBA command to the adapter
+ */
+ fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
+ sizeof(struct aac_hba_sgl);
+
+ return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
+ (fib_callback) aac_hba_callback,
+ (void *) cmd);
+}
+
+int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
+{
+ struct fib *fibptr;
+ struct aac_srb *srbcmd;
+ struct sgmap64 *sg64;
+ struct aac_ciss_identify_pd *identify_resp;
+ dma_addr_t addr;
+ u32 vbus, vid;
+ u16 fibsize, datasize;
+ int rcode = -ENOMEM;
+
+
+ fibptr = aac_fib_alloc(dev);
+ if (!fibptr)
+ goto out;
+
+ fibsize = sizeof(struct aac_srb) -
+ sizeof(struct sgentry) + sizeof(struct sgentry64);
+ datasize = sizeof(struct aac_ciss_identify_pd);
+
+ identify_resp = pci_alloc_consistent(dev->pdev, datasize, &addr);
+
+ if (!identify_resp)
+ goto fib_free_ptr;
+
+ vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
+ vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
+
+ aac_fib_init(fibptr);
+
+ srbcmd = (struct aac_srb *) fib_data(fibptr);
+ srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srbcmd->channel = cpu_to_le32(vbus);
+ srbcmd->id = cpu_to_le32(vid);
+ srbcmd->lun = 0;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->timeout = cpu_to_le32(10);
+ srbcmd->retry_limit = 0;
+ srbcmd->cdb_size = cpu_to_le32(12);
+ srbcmd->count = cpu_to_le32(datasize);
+
+ memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+ srbcmd->cdb[0] = 0x26;
+ srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
+ srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+ sg64 = (struct sgmap64 *)&srbcmd->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
+ sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+ sg64->sg[0].count = cpu_to_le32(datasize);
+
+ rcode = aac_fib_send(ScsiPortCommand64,
+ fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
+
+ if (identify_resp->current_queue_depth_limit <= 0 ||
+ identify_resp->current_queue_depth_limit > 32)
+ dev->hba_map[bus][target].qd_limit = 32;
+ else
+ dev->hba_map[bus][target].qd_limit =
+ identify_resp->current_queue_depth_limit;
+
+ pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr);
+
+ aac_fib_complete(fibptr);
+
+fib_free_ptr:
+ aac_fib_free(fibptr);
+out:
+ return rcode;
+}
+
+/**
+ * aac_update hba_map()- update current hba map with data from FW
+ * @dev: aac_dev structure
+ * @phys_luns: FW information from report phys luns
+ *
+ * Update our hba map with the information gathered from the FW
+ */
+void aac_update_hba_map(struct aac_dev *dev,
+ struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
+{
+ /* ok and extended reporting */
+ u32 lun_count, nexus;
+ u32 i, bus, target;
+ u8 expose_flag, attribs;
+ u8 devtype;
+
+ lun_count = ((phys_luns->list_length[0] << 24)
+ + (phys_luns->list_length[1] << 16)
+ + (phys_luns->list_length[2] << 8)
+ + (phys_luns->list_length[3])) / 24;
+
+ for (i = 0; i < lun_count; ++i) {
+
+ bus = phys_luns->lun[i].level2[1] & 0x3f;
+ target = phys_luns->lun[i].level2[0];
+ expose_flag = phys_luns->lun[i].bus >> 6;
+ attribs = phys_luns->lun[i].node_ident[9];
+ nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
+
+ if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
+ continue;
+
+ dev->hba_map[bus][target].expose = expose_flag;
+
+ if (expose_flag != 0) {
+ devtype = AAC_DEVTYPE_RAID_MEMBER;
+ goto update_devtype;
+ }
+
+ if (nexus != 0 && (attribs & 8)) {
+ devtype = AAC_DEVTYPE_NATIVE_RAW;
+ dev->hba_map[bus][target].rmw_nexus =
+ nexus;
+ } else
+ devtype = AAC_DEVTYPE_ARC_RAW;
+
+ if (devtype != AAC_DEVTYPE_NATIVE_RAW)
+ goto update_devtype;
+
+ if (aac_issue_bmic_identify(dev, bus, target) < 0)
+ dev->hba_map[bus][target].qd_limit = 32;
+
+update_devtype:
+ if (rescan == AAC_INIT)
+ dev->hba_map[bus][target].devtype = devtype;
+ else
+ dev->hba_map[bus][target].new_devtype = devtype;
+ }
+}
+
+/**
+ * aac_report_phys_luns() Process topology change
+ * @dev: aac_dev structure
+ * @fibptr: fib pointer
+ *
+ * Execute a CISS REPORT PHYS LUNS and process the results into
+ * the current hba_map.
+ */
+int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
+{
+ int fibsize, datasize;
+ struct aac_ciss_phys_luns_resp *phys_luns;
+ struct aac_srb *srbcmd;
+ struct sgmap64 *sg64;
+ dma_addr_t addr;
+ u32 vbus, vid;
+ int rcode = 0;
+
+ /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
+ fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
+ + sizeof(struct sgentry64);
+ datasize = sizeof(struct aac_ciss_phys_luns_resp)
+ + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+
+ phys_luns = (struct aac_ciss_phys_luns_resp *) pci_alloc_consistent(
+ dev->pdev, datasize, &addr);
+
+ if (phys_luns == NULL) {
+ rcode = -ENOMEM;
+ goto err_out;
+ }
+
+ vbus = (u32) le16_to_cpu(
+ dev->supplement_adapter_info.VirtDeviceBus);
+ vid = (u32) le16_to_cpu(
+ dev->supplement_adapter_info.VirtDeviceTarget);
+
+ aac_fib_init(fibptr);
+
+ srbcmd = (struct aac_srb *) fib_data(fibptr);
+ srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srbcmd->channel = cpu_to_le32(vbus);
+ srbcmd->id = cpu_to_le32(vid);
+ srbcmd->lun = 0;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->timeout = cpu_to_le32(10);
+ srbcmd->retry_limit = 0;
+ srbcmd->cdb_size = cpu_to_le32(12);
+ srbcmd->count = cpu_to_le32(datasize);
+
+ memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+ srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
+ srbcmd->cdb[1] = 2; /* extended reporting */
+ srbcmd->cdb[8] = (u8)(datasize >> 8);
+ srbcmd->cdb[9] = (u8)(datasize);
+
+ sg64 = (struct sgmap64 *) &srbcmd->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
+ sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
+ sg64->sg[0].count = cpu_to_le32(datasize);
+
+ rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
+ FsaNormal, 1, 1, NULL, NULL);
+
+ /* analyse data */
+ if (rcode >= 0 && phys_luns->resp_flag == 2) {
+ /* ok and extended reporting */
+ aac_update_hba_map(dev, phys_luns, rescan);
+ }
+
+ pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr);
+err_out:
+ return rcode;
+}
+
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
int rcode;
- u32 tmp;
+ u32 tmp, bus, target;
struct aac_adapter_info *info;
struct aac_bus_info *command;
struct aac_bus_info_response *bus_info;
@@ -1540,6 +1893,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
memcpy(&dev->adapter_info, info, sizeof(*info));
+ dev->supplement_adapter_info.VirtDeviceBus = 0xffff;
if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
struct aac_supplement_adapter_info * sinfo;
@@ -1567,6 +1921,13 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
+ /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
+ for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+ for (target = 0; target < AAC_MAX_TARGETS; target++) {
+ dev->hba_map[bus][target].devtype = 0;
+ dev->hba_map[bus][target].qd_limit = 0;
+ }
+ }
/*
* GetBusInfo
@@ -1599,6 +1960,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
+ if (!dev->sync_mode && dev->sa_firmware &&
+ dev->supplement_adapter_info.VirtDeviceBus != 0xffff) {
+ /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
+ rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
+ }
+
if (!dev->in_reset) {
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -1765,6 +2132,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
(dev->scsi_host_ptr->sg_tablesize * 8) + 112;
}
}
+ if (!dev->sync_mode && dev->sa_firmware &&
+ dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
+ dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
+ HBA_MAX_SG_SEPARATE;
+
/* FIB should be freed only after getting the response from the F/W */
if (rcode != -ERESTARTSYS) {
aac_fib_complete(fibptr);
@@ -1845,6 +2217,15 @@ static void io_callback(void *context, struct fib * fibptr)
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
break;
+ case ST_MEDERR:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
+ SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ break;
default:
#ifdef AAC_DETAILED_STATUS_INFO
printk(KERN_WARNING "io_callback: io failed, status = %d\n",
@@ -2312,7 +2693,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
- u32 cid;
+ u32 cid, bus;
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
@@ -2330,8 +2711,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if((cid >= dev->maximum_num_containers) ||
(scsicmd->device->lun != 0)) {
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
- return 0;
+ goto scsi_done_ret;
}
/*
@@ -2359,15 +2739,30 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
}
} else { /* check for physical non-dasd devices */
- if (dev->nondasd_support || expose_physicals ||
- dev->jbod) {
+ bus = aac_logical_to_phys(scmd_channel(scsicmd));
+ if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
+ (dev->hba_map[bus][cid].expose
+ == AAC_HIDE_DISK)){
+ if (scsicmd->cmnd[0] == INQUIRY) {
+ scsicmd->result = DID_NO_CONNECT << 16;
+ goto scsi_done_ret;
+ }
+ }
+
+ if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
+ dev->hba_map[bus][cid].devtype
+ == AAC_DEVTYPE_NATIVE_RAW) {
+ if (dev->in_reset)
+ return -1;
+ return aac_send_hba_fib(scsicmd);
+ } else if (dev->nondasd_support || expose_physicals ||
+ dev->jbod) {
if (dev->in_reset)
return -1;
return aac_send_srb_fib(scsicmd);
} else {
scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
- return 0;
+ goto scsi_done_ret;
}
}
}
@@ -2385,13 +2780,34 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- scsicmd->scsi_done(scsicmd);
- return 0;
+ goto scsi_done_ret;
}
-
- /* Handle commands here that don't really require going out to the adapter */
switch (scsicmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ if (dev->in_reset)
+ return -1;
+ return aac_read(scsicmd);
+
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ if (dev->in_reset)
+ return -1;
+ return aac_write(scsicmd);
+
+ case SYNCHRONIZE_CACHE:
+ if (((aac_cache & 6) == 6) && dev->cache_protected) {
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
+ }
+ /* Issue FIB to tell Firmware to flush it's cache */
+ if ((aac_cache & 6) != 2)
+ return aac_synchronize(scsicmd);
case INQUIRY:
{
struct inquiry_data inq_data;
@@ -2414,8 +2830,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = AAC_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
@@ -2426,8 +2841,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = AAC_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
@@ -2436,8 +2850,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = AAC_STAT_GOOD;
} else {
/* vpd page not implemented */
scsicmd->result = DID_OK << 16 |
@@ -2452,8 +2865,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
}
- scsicmd->scsi_done(scsicmd);
- return 0;
+ break;
}
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
@@ -2469,9 +2881,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
- return 0;
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
}
if (dev->in_reset)
return -1;
@@ -2519,10 +2930,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
-
- return 0;
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
}
case READ_CAPACITY:
@@ -2547,11 +2956,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
-
- return 0;
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
}
case MODE_SENSE:
@@ -2629,10 +3035,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
-
- return 0;
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
}
case MODE_SENSE_10:
{
@@ -2708,18 +3112,17 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(char *)&mpd10,
mode_buf_length);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
-
- return 0;
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
}
case REQUEST_SENSE:
dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
- memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
- memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
- return 0;
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ sizeof(struct sense_data));
+ memset(&dev->fsa_dev[cid].sense_data, 0,
+ sizeof(struct sense_data));
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
case ALLOW_MEDIUM_REMOVAL:
dprintk((KERN_DEBUG "LOCK command.\n"));
@@ -2728,9 +3131,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
else
fsa_dev_ptr[cid].locked = 0;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
- return 0;
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
/*
* These commands are all No-Ops
*/
@@ -2746,80 +3148,41 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- scsicmd->scsi_done(scsicmd);
- return 0;
+ break;
}
- /* FALLTHRU */
case RESERVE:
case RELEASE:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
- return 0;
+ scsicmd->result = AAC_STAT_GOOD;
+ break;
case START_STOP:
return aac_start_stop(scsicmd);
- }
-
- switch (scsicmd->cmnd[0])
- {
- case READ_6:
- case READ_10:
- case READ_12:
- case READ_16:
- if (dev->in_reset)
- return -1;
- /*
- * Hack to keep track of ordinal number of the device that
- * corresponds to a container. Needed to convert
- * containers to /dev/sd device names
- */
-
- if (scsicmd->request->rq_disk)
- strlcpy(fsa_dev_ptr[cid].devname,
- scsicmd->request->rq_disk->disk_name,
- min(sizeof(fsa_dev_ptr[cid].devname),
- sizeof(scsicmd->request->rq_disk->disk_name) + 1));
-
- return aac_read(scsicmd);
- case WRITE_6:
- case WRITE_10:
- case WRITE_12:
- case WRITE_16:
- if (dev->in_reset)
- return -1;
- return aac_write(scsicmd);
-
- case SYNCHRONIZE_CACHE:
- if (((aac_cache & 6) == 6) && dev->cache_protected) {
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- scsicmd->scsi_done(scsicmd);
- return 0;
- }
- /* Issue FIB to tell Firmware to flush it's cache */
- if ((aac_cache & 6) != 2)
- return aac_synchronize(scsicmd);
- /* FALLTHRU */
- default:
- /*
- * Unhandled commands
- */
- dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense(&dev->fsa_dev[cid].sense_data,
+ /* FALLTHRU */
+ default:
+ /*
+ * Unhandled commands
+ */
+ dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
+ scsicmd->cmnd[0]));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
- memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
- scsicmd->scsi_done(scsicmd);
- return 0;
}
+
+scsi_done_ret:
+
+ scsicmd->scsi_done(scsicmd);
+ return 0;
}
static int query_disk(struct aac_dev *dev, void __user *arg)
@@ -2954,16 +3317,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
return;
BUG_ON(fibptr == NULL);
- dev = fibptr->dev;
- scsi_dma_unmap(scsicmd);
-
- /* expose physical device if expose_physicald flag is on */
- if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
- && expose_physicals > 0)
- aac_expose_phy_device(scsicmd);
+ dev = fibptr->dev;
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
@@ -2976,158 +3334,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
*/
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
- le32_to_cpu(srbreply->data_xfer_length));
- /*
- * First check the fib status
- */
+ }
- if (le32_to_cpu(srbreply->status) != ST_OK) {
- int len;
- printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_CHECK_CONDITION;
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
- }
+ scsi_dma_unmap(scsicmd);
- /*
- * Next check the srb status
- */
- switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
- case SRB_STATUS_ERROR_RECOVERY:
- case SRB_STATUS_PENDING:
- case SRB_STATUS_SUCCESS:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- break;
- case SRB_STATUS_DATA_OVERRUN:
- switch (scsicmd->cmnd[0]) {
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- case READ_12:
- case WRITE_12:
- case READ_16:
- case WRITE_16:
- if (le32_to_cpu(srbreply->data_xfer_length)
- < scsicmd->underflow)
- printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
- else
- printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- case INQUIRY: {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
- default:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- break;
- }
- break;
- case SRB_STATUS_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
- break;
- case SRB_STATUS_ABORT_FAILED:
- /*
- * Not sure about this one - but assuming the
- * hba was trying to abort for some reason
- */
- scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
+
+ /*
+ * First check the fib status
+ */
+
+ if (le32_to_cpu(srbreply->status) != ST_OK) {
+ int len;
+
+ pr_warn("aac_srb_callback: srb failed, status = %d\n",
+ le32_to_cpu(srbreply->status));
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8
+ | SAM_STAT_CHECK_CONDITION;
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
+ }
+
+ /*
+ * Next check the srb status
+ */
+ switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
+ case SRB_STATUS_ERROR_RECOVERY:
+ case SRB_STATUS_PENDING:
+ case SRB_STATUS_SUCCESS:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SRB_STATUS_DATA_OVERRUN:
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_16:
+ case WRITE_16:
+ if (le32_to_cpu(srbreply->data_xfer_length)
+ < scsicmd->underflow)
+ pr_warn("aacraid: SCSI CMD underflow\n");
+ else
+ pr_warn("aacraid: SCSI CMD Data Overrun\n");
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
break;
- case SRB_STATUS_PARITY_ERROR:
- scsicmd->result = DID_PARITY << 16
- | MSG_PARITY_ERROR << 8;
+ case INQUIRY:
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
break;
- case SRB_STATUS_NO_DEVICE:
- case SRB_STATUS_INVALID_PATH_ID:
- case SRB_STATUS_INVALID_TARGET_ID:
- case SRB_STATUS_INVALID_LUN:
- case SRB_STATUS_SELECTION_TIMEOUT:
- scsicmd->result = DID_NO_CONNECT << 16
- | COMMAND_COMPLETE << 8;
+ default:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
+ }
+ break;
+ case SRB_STATUS_ABORTED:
+ scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_ABORT_FAILED:
+ /*
+ * Not sure about this one - but assuming the
+ * hba was trying to abort for some reason
+ */
+ scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_PARITY_ERROR:
+ scsicmd->result = DID_PARITY << 16
+ | MSG_PARITY_ERROR << 8;
+ break;
+ case SRB_STATUS_NO_DEVICE:
+ case SRB_STATUS_INVALID_PATH_ID:
+ case SRB_STATUS_INVALID_TARGET_ID:
+ case SRB_STATUS_INVALID_LUN:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ scsicmd->result = DID_NO_CONNECT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_COMMAND_TIMEOUT:
- case SRB_STATUS_TIMEOUT:
- scsicmd->result = DID_TIME_OUT << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_COMMAND_TIMEOUT:
+ case SRB_STATUS_TIMEOUT:
+ scsicmd->result = DID_TIME_OUT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_BUSY:
- scsicmd->result = DID_BUS_BUSY << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_BUSY:
+ scsicmd->result = DID_BUS_BUSY << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_BUS_RESET:
- scsicmd->result = DID_RESET << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_BUS_RESET:
+ scsicmd->result = DID_RESET << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_MESSAGE_REJECTED:
- scsicmd->result = DID_ERROR << 16
- | MESSAGE_REJECT << 8;
- break;
- case SRB_STATUS_REQUEST_FLUSHED:
- case SRB_STATUS_ERROR:
- case SRB_STATUS_INVALID_REQUEST:
- case SRB_STATUS_REQUEST_SENSE_FAILED:
- case SRB_STATUS_NO_HBA:
- case SRB_STATUS_UNEXPECTED_BUS_FREE:
- case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
- case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
- case SRB_STATUS_DELAYED_RETRY:
- case SRB_STATUS_BAD_FUNCTION:
- case SRB_STATUS_NOT_STARTED:
- case SRB_STATUS_NOT_IN_USE:
- case SRB_STATUS_FORCE_ABORT:
- case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
- default:
+ case SRB_STATUS_MESSAGE_REJECTED:
+ scsicmd->result = DID_ERROR << 16
+ | MESSAGE_REJECT << 8;
+ break;
+ case SRB_STATUS_REQUEST_FLUSHED:
+ case SRB_STATUS_ERROR:
+ case SRB_STATUS_INVALID_REQUEST:
+ case SRB_STATUS_REQUEST_SENSE_FAILED:
+ case SRB_STATUS_NO_HBA:
+ case SRB_STATUS_UNEXPECTED_BUS_FREE:
+ case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+ case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+ case SRB_STATUS_DELAYED_RETRY:
+ case SRB_STATUS_BAD_FUNCTION:
+ case SRB_STATUS_NOT_STARTED:
+ case SRB_STATUS_NOT_IN_USE:
+ case SRB_STATUS_FORCE_ABORT:
+ case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+ default:
#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
- le32_to_cpu(srbreply->srb_status) & 0x3F,
- aac_get_status_string(
- le32_to_cpu(srbreply->srb_status) & 0x3F),
- scsicmd->cmnd[0],
- le32_to_cpu(srbreply->scsi_status));
+ pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
+ le32_to_cpu(srbreply->srb_status) & 0x3F,
+ aac_get_status_string(
+ le32_to_cpu(srbreply->srb_status) & 0x3F),
+ scsicmd->cmnd[0],
+ le32_to_cpu(srbreply->scsi_status));
#endif
- if ((scsicmd->cmnd[0] == ATA_12)
- || (scsicmd->cmnd[0] == ATA_16)) {
- if (scsicmd->cmnd[2] & (0x01 << 5)) {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- } else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
+ /*
+ * When the CC bit is SET by the host in ATA pass thru CDB,
+ * driver is supposed to return DID_OK
+ *
+ * When the CC bit is RESET by the host, driver should
+ * return DID_ERROR
+ */
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
} else {
scsicmd->result = DID_ERROR << 16
| COMMAND_COMPLETE << 8;
- break;
+ break;
}
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
}
- if (le32_to_cpu(srbreply->scsi_status)
- == SAM_STAT_CHECK_CONDITION) {
- int len;
+ }
+ if (le32_to_cpu(srbreply->scsi_status)
+ == SAM_STAT_CHECK_CONDITION) {
+ int len;
- scsicmd->result |= SAM_STAT_CHECK_CONDITION;
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
+ scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
- le32_to_cpu(srbreply->status), len);
+ pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
#endif
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
- }
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
}
+
/*
* OR in the scsi status (already shifted up a bit)
*/
@@ -3137,9 +3513,152 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->scsi_done(scsicmd);
}
+static void hba_resp_task_complete(struct aac_dev *dev,
+ struct scsi_cmnd *scsicmd,
+ struct aac_hba_resp *err) {
+
+ scsicmd->result = err->status;
+ /* set residual count */
+ scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
+
+ switch (err->status) {
+ case SAM_STAT_GOOD:
+ scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ {
+ int len;
+
+ len = min_t(u8, err->sense_response_data_len,
+ SCSI_SENSE_BUFFERSIZE);
+ if (len)
+ memcpy(scsicmd->sense_buffer,
+ err->sense_response_buf, len);
+ scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+ case SAM_STAT_BUSY:
+ scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SAM_STAT_TASK_ABORTED:
+ scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
+ break;
+ case SAM_STAT_RESERVATION_CONFLICT:
+ case SAM_STAT_TASK_SET_FULL:
+ default:
+ scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+}
+
+static void hba_resp_task_failure(struct aac_dev *dev,
+ struct scsi_cmnd *scsicmd,
+ struct aac_hba_resp *err)
+{
+ switch (err->status) {
+ case HBA_RESP_STAT_HBAMODE_DISABLED:
+ {
+ u32 bus, cid;
+
+ bus = aac_logical_to_phys(scmd_channel(scsicmd));
+ cid = scmd_id(scsicmd);
+ if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+ dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
+ dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
+ }
+ scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+ case HBA_RESP_STAT_IO_ERROR:
+ case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
+ break;
+ case HBA_RESP_STAT_IO_ABORTED:
+ scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ break;
+ case HBA_RESP_STAT_INVALID_DEVICE:
+ scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case HBA_RESP_STAT_UNDERRUN:
+ /* UNDERRUN is OK */
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case HBA_RESP_STAT_OVERRUN:
+ default:
+ scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+}
+
+/**
+ *
+ * aac_hba_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the completion of a native HBA scsi command
+ *
+ */
+void aac_hba_callback(void *context, struct fib *fibptr)
+{
+ struct aac_dev *dev;
+ struct scsi_cmnd *scsicmd;
+
+ struct aac_hba_resp *err =
+ &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
+
+ scsicmd = (struct scsi_cmnd *) context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ WARN_ON(fibptr == NULL);
+ dev = fibptr->dev;
+
+ if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
+ scsi_dma_unmap(scsicmd);
+
+ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+ /* fast response */
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ goto out;
+ }
+
+ switch (err->service_response) {
+ case HBA_RESP_SVCRES_TASK_COMPLETE:
+ hba_resp_task_complete(dev, scsicmd, err);
+ break;
+ case HBA_RESP_SVCRES_FAILURE:
+ hba_resp_task_failure(dev, scsicmd, err);
+ break;
+ case HBA_RESP_SVCRES_TMF_REJECTED:
+ scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+ break;
+ case HBA_RESP_SVCRES_TMF_LUN_INVALID:
+ scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case HBA_RESP_SVCRES_TMF_COMPLETE:
+ case HBA_RESP_SVCRES_TMF_SUCCEEDED:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ default:
+ scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+
+out:
+ aac_fib_complete(fibptr);
+
+ if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
+ scsicmd->SCp.sent_command = 1;
+ else
+ scsicmd->scsi_done(scsicmd);
+}
+
/**
*
- * aac_send_scb_fib
+ * aac_send_srb_fib
* @scsicmd: the scsi command block
*
* This routine will form a FIB and fill in the aac_srb from the
@@ -3182,6 +3701,54 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
return -1;
}
+/**
+ *
+ * aac_send_hba_fib
+ * @scsicmd: the scsi command block
+ *
+ * This routine will form a FIB and fill in the aac_hba_cmd_req from the
+ * scsicmd passed in.
+ */
+static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
+{
+ struct fib *cmd_fibcontext;
+ struct aac_dev *dev;
+ int status;
+
+ dev = shost_priv(scsicmd->device->host);
+ if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
+ scsicmd->device->lun > AAC_MAX_LUN - 1) {
+ scsicmd->result = DID_NO_CONNECT << 16;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+
+ /*
+ * Allocate and initialize a Fib then setup a BlockWrite command
+ */
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext)
+ return -1;
+
+ status = aac_adapter_hba(cmd_fibcontext, scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
+ status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+
+ return -1;
+}
+
+
static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
struct aac_dev *dev;
@@ -3434,6 +4001,75 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
return 0;
}
+static long aac_build_sghba(struct scsi_cmnd *scsicmd,
+ struct aac_hba_cmd_req *hbacmd,
+ int sg_max,
+ u64 sg_address)
+{
+ unsigned long byte_count = 0;
+ int nseg;
+ struct scatterlist *sg;
+ int i;
+ u32 cur_size;
+ struct aac_hba_sgl *sge;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg <= 0) {
+ byte_count = nseg;
+ goto out;
+ }
+
+ if (nseg > HBA_MAX_SG_EMBEDDED)
+ sge = &hbacmd->sge[2];
+ else
+ sge = &hbacmd->sge[0];
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
+ u64 addr = sg_dma_address(sg);
+
+ WARN_ON(i >= sg_max);
+ sge->addr_hi = cpu_to_le32((u32)(addr>>32));
+ sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
+ cur_size = cpu_to_le32(count);
+ sge->len = cur_size;
+ sge->flags = 0;
+ byte_count += count;
+ sge++;
+ }
+
+ sge--;
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp;
+
+ temp = le32_to_cpu(sge->len) - byte_count
+ - scsi_bufflen(scsicmd);
+ sge->len = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+
+ if (nseg <= HBA_MAX_SG_EMBEDDED) {
+ hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
+ sge->flags = cpu_to_le32(0x40000000);
+ } else {
+ /* not embedded */
+ hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
+ hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
+ hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
+ hbacmd->sge[0].addr_lo =
+ cpu_to_le32((u32)(sg_address & 0xffffffff));
+ }
+
+ /* Check for command underflow */
+ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+ pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
+out:
+ return byte_count;
+}
+
#ifdef AAC_DETAILED_STATUS_INFO
struct aac_srb_status_info {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index f059c14efa0c..f2344971e3cb 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,3 +1,37 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * aacraid.h
+ *
+ * Abstract: Contains all routines for control of the aacraid driver
+ *
+ */
+
+#ifndef _AACRAID_H_
+#define _AACRAID_H_
#ifndef dprintk
# define dprintk(x)
#endif
@@ -63,8 +97,8 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 41066
-# define AAC_DRIVER_BRANCH "-ms"
+# define AAC_DRIVER_BUILD 50740
+# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -72,13 +106,311 @@ enum {
#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
-#define AAC_MAX_LUN (8)
+#define AAC_MAX_LUN 256
#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
+#define AAC_MAX_NATIVE_TARGETS 1024
+/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
+#define AAC_MAX_BUSES 5
+#define AAC_MAX_TARGETS 256
+#define AAC_MAX_NATIVE_SIZE 2048
+#define FW_ERROR_BUFFER_SIZE 512
+
+/* Thor AIF events */
+#define SA_AIF_HOTPLUG (1<<1)
+#define SA_AIF_HARDWARE (1<<2)
+#define SA_AIF_PDEV_CHANGE (1<<4)
+#define SA_AIF_LDEV_CHANGE (1<<5)
+#define SA_AIF_BPSTAT_CHANGE (1<<30)
+#define SA_AIF_BPCFG_CHANGE (1<<31)
+
+#define HBA_MAX_SG_EMBEDDED 28
+#define HBA_MAX_SG_SEPARATE 90
+#define HBA_SENSE_DATA_LEN_MAX 32
+#define HBA_REQUEST_TAG_ERROR_FLAG 0x00000002
+#define HBA_SGL_FLAGS_EXT 0x80000000UL
+
+struct aac_hba_sgl {
+ u32 addr_lo; /* Lower 32-bits of SGL element address */
+ u32 addr_hi; /* Upper 32-bits of SGL element address */
+ u32 len; /* Length of SGL element in bytes */
+ u32 flags; /* SGL element flags */
+};
+
+enum {
+ HBA_IU_TYPE_SCSI_CMD_REQ = 0x40,
+ HBA_IU_TYPE_SCSI_TM_REQ = 0x41,
+ HBA_IU_TYPE_SATA_REQ = 0x42,
+ HBA_IU_TYPE_RESP = 0x60,
+ HBA_IU_TYPE_COALESCED_RESP = 0x61,
+ HBA_IU_TYPE_INT_COALESCING_CFG_REQ = 0x70
+};
+
+enum {
+ HBA_CMD_BYTE1_DATA_DIR_IN = 0x1,
+ HBA_CMD_BYTE1_DATA_DIR_OUT = 0x2,
+ HBA_CMD_BYTE1_DATA_TYPE_DDR = 0x4,
+ HBA_CMD_BYTE1_CRYPTO_ENABLE = 0x8
+};
+
+enum {
+ HBA_CMD_BYTE1_BITOFF_DATA_DIR_IN = 0x0,
+ HBA_CMD_BYTE1_BITOFF_DATA_DIR_OUT,
+ HBA_CMD_BYTE1_BITOFF_DATA_TYPE_DDR,
+ HBA_CMD_BYTE1_BITOFF_CRYPTO_ENABLE
+};
+
+enum {
+ HBA_RESP_DATAPRES_NO_DATA = 0x0,
+ HBA_RESP_DATAPRES_RESPONSE_DATA,
+ HBA_RESP_DATAPRES_SENSE_DATA
+};
+
+enum {
+ HBA_RESP_SVCRES_TASK_COMPLETE = 0x0,
+ HBA_RESP_SVCRES_FAILURE,
+ HBA_RESP_SVCRES_TMF_COMPLETE,
+ HBA_RESP_SVCRES_TMF_SUCCEEDED,
+ HBA_RESP_SVCRES_TMF_REJECTED,
+ HBA_RESP_SVCRES_TMF_LUN_INVALID
+};
+
+enum {
+ HBA_RESP_STAT_IO_ERROR = 0x1,
+ HBA_RESP_STAT_IO_ABORTED,
+ HBA_RESP_STAT_NO_PATH_TO_DEVICE,
+ HBA_RESP_STAT_INVALID_DEVICE,
+ HBA_RESP_STAT_HBAMODE_DISABLED = 0xE,
+ HBA_RESP_STAT_UNDERRUN = 0x51,
+ HBA_RESP_STAT_OVERRUN = 0x75
+};
+
+struct aac_hba_cmd_req {
+ u8 iu_type; /* HBA information unit type */
+ /*
+ * byte1:
+ * [1:0] DIR - 0=No data, 0x1 = IN, 0x2 = OUT
+ * [2] TYPE - 0=PCI, 1=DDR
+ * [3] CRYPTO_ENABLE - 0=Crypto disabled, 1=Crypto enabled
+ */
+ u8 byte1;
+ u8 reply_qid; /* Host reply queue to post response to */
+ u8 reserved1;
+ __le32 it_nexus; /* Device handle for the request */
+ __le32 request_id; /* Sender context */
+ /* Lower 32-bits of tweak value for crypto enabled IOs */
+ __le32 tweak_value_lo;
+ u8 cdb[16]; /* SCSI CDB of the command */
+ u8 lun[8]; /* SCSI LUN of the command */
+
+ /* Total data length in bytes to be read/written (if any) */
+ __le32 data_length;
+
+ /* [2:0] Task Attribute, [6:3] Command Priority */
+ u8 attr_prio;
+
+ /* Number of SGL elements embedded in the HBA req */
+ u8 emb_data_desc_count;
+
+ __le16 dek_index; /* DEK index for crypto enabled IOs */
+
+ /* Lower 32-bits of reserved error data target location on the host */
+ __le32 error_ptr_lo;
+
+ /* Upper 32-bits of reserved error data target location on the host */
+ __le32 error_ptr_hi;
+
+ /* Length of reserved error data area on the host in bytes */
+ __le32 error_length;
+
+ /* Upper 32-bits of tweak value for crypto enabled IOs */
+ __le32 tweak_value_hi;
+
+ struct aac_hba_sgl sge[HBA_MAX_SG_SEPARATE+2]; /* SG list space */
+
+ /*
+ * structure must not exceed
+ * AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE
+ */
+};
+
+/* Task Management Functions (TMF) */
+#define HBA_TMF_ABORT_TASK 0x01
+#define HBA_TMF_LUN_RESET 0x08
+
+struct aac_hba_tm_req {
+ u8 iu_type; /* HBA information unit type */
+ u8 reply_qid; /* Host reply queue to post response to */
+ u8 tmf; /* Task management function */
+ u8 reserved1;
+
+ __le32 it_nexus; /* Device handle for the command */
+
+ u8 lun[8]; /* SCSI LUN */
+
+ /* Used to hold sender context. */
+ __le32 request_id; /* Sender context */
+ __le32 reserved2;
+
+ /* Request identifier of managed task */
+ __le32 managed_request_id; /* Sender context being managed */
+ __le32 reserved3;
+
+ /* Lower 32-bits of reserved error data target location on the host */
+ __le32 error_ptr_lo;
+ /* Upper 32-bits of reserved error data target location on the host */
+ __le32 error_ptr_hi;
+ /* Length of reserved error data area on the host in bytes */
+ __le32 error_length;
+};
+
+struct aac_hba_reset_req {
+ u8 iu_type; /* HBA information unit type */
+ /* 0 - reset specified device, 1 - reset all devices */
+ u8 reset_type;
+ u8 reply_qid; /* Host reply queue to post response to */
+ u8 reserved1;
+
+ __le32 it_nexus; /* Device handle for the command */
+ __le32 request_id; /* Sender context */
+ /* Lower 32-bits of reserved error data target location on the host */
+ __le32 error_ptr_lo;
+ /* Upper 32-bits of reserved error data target location on the host */
+ __le32 error_ptr_hi;
+ /* Length of reserved error data area on the host in bytes */
+ __le32 error_length;
+};
+
+struct aac_hba_resp {
+ u8 iu_type; /* HBA information unit type */
+ u8 reserved1[3];
+ __le32 request_identifier; /* sender context */
+ __le32 reserved2;
+ u8 service_response; /* SCSI service response */
+ u8 status; /* SCSI status */
+ u8 datapres; /* [1:0] - data present, [7:2] - reserved */
+ u8 sense_response_data_len; /* Sense/response data length */
+ __le32 residual_count; /* Residual data length in bytes */
+ /* Sense/response data */
+ u8 sense_response_buf[HBA_SENSE_DATA_LEN_MAX];
+};
+
+struct aac_native_hba {
+ union {
+ struct aac_hba_cmd_req cmd;
+ struct aac_hba_tm_req tmr;
+ u8 cmd_bytes[AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE];
+ } cmd;
+ union {
+ struct aac_hba_resp err;
+ u8 resp_bytes[FW_ERROR_BUFFER_SIZE];
+ } resp;
+};
+
+#define CISS_REPORT_PHYSICAL_LUNS 0xc3
+#define WRITE_HOST_WELLNESS 0xa5
+#define CISS_IDENTIFY_PHYSICAL_DEVICE 0x15
+#define BMIC_IN 0x26
+#define BMIC_OUT 0x27
+
+struct aac_ciss_phys_luns_resp {
+ u8 list_length[4]; /* LUN list length (N-7, big endian) */
+ u8 resp_flag; /* extended response_flag */
+ u8 reserved[3];
+ struct _ciss_lun {
+ u8 tid[3]; /* Target ID */
+ u8 bus; /* Bus, flag (bits 6,7) */
+ u8 level3[2];
+ u8 level2[2];
+ u8 node_ident[16]; /* phys. node identifier */
+ } lun[1]; /* List of phys. devices */
+};
+
+/*
+ * Interrupts
+ */
+#define AAC_MAX_HRRQ 64
+
+struct aac_ciss_identify_pd {
+ u8 scsi_bus; /* SCSI Bus number on controller */
+ u8 scsi_id; /* SCSI ID on this bus */
+ u16 block_size; /* sector size in bytes */
+ u32 total_blocks; /* number for sectors on drive */
+ u32 reserved_blocks; /* controller reserved (RIS) */
+ u8 model[40]; /* Physical Drive Model */
+ u8 serial_number[40]; /* Drive Serial Number */
+ u8 firmware_revision[8]; /* drive firmware revision */
+ u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
+ u8 compaq_drive_stamp; /* 0 means drive not stamped */
+ u8 last_failure_reason;
+
+ u8 flags;
+ u8 more_flags;
+ u8 scsi_lun; /* SCSI LUN for phys drive */
+ u8 yet_more_flags;
+ u8 even_more_flags;
+ u32 spi_speed_rules; /* SPI Speed :Ultra disable diagnose */
+ u8 phys_connector[2]; /* connector number on controller */
+ u8 phys_box_on_bus; /* phys enclosure this drive resides */
+ u8 phys_bay_in_box; /* phys drv bay this drive resides */
+ u32 rpm; /* Drive rotational speed in rpm */
+ u8 device_type; /* type of drive */
+ u8 sata_version; /* only valid when drive_type is SATA */
+ u64 big_total_block_count;
+ u64 ris_starting_lba;
+ u32 ris_size;
+ u8 wwid[20];
+ u8 controller_phy_map[32];
+ u16 phy_count;
+ u8 phy_connected_dev_type[256];
+ u8 phy_to_drive_bay_num[256];
+ u16 phy_to_attached_dev_index[256];
+ u8 box_index;
+ u8 spitfire_support;
+ u16 extra_physical_drive_flags;
+ u8 negotiated_link_rate[256];
+ u8 phy_to_phy_map[256];
+ u8 redundant_path_present_map;
+ u8 redundant_path_failure_map;
+ u8 active_path_number;
+ u16 alternate_paths_phys_connector[8];
+ u8 alternate_paths_phys_box_on_port[8];
+ u8 multi_lun_device_lun_count;
+ u8 minimum_good_fw_revision[8];
+ u8 unique_inquiry_bytes[20];
+ u8 current_temperature_degreesC;
+ u8 temperature_threshold_degreesC;
+ u8 max_temperature_degreesC;
+ u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512 * 2^exp */
+ u16 current_queue_depth_limit;
+ u8 switch_name[10];
+ u16 switch_port;
+ u8 alternate_paths_switch_name[40];
+ u8 alternate_paths_switch_port[8];
+ u16 power_on_hours; /* valid only if gas gauge supported */
+ u16 percent_endurance_used; /* valid only if gas gauge supported. */
+ u8 drive_authentication;
+ u8 smart_carrier_authentication;
+ u8 smart_carrier_app_fw_version;
+ u8 smart_carrier_bootloader_fw_version;
+ u8 SanitizeSecureEraseSupport;
+ u8 DriveKeyFlags;
+ u8 encryption_key_name[64];
+ u32 misc_drive_flags;
+ u16 dek_index;
+ u16 drive_encryption_flags;
+ u8 sanitize_maximum_time[6];
+ u8 connector_info_mode;
+ u8 connector_info_number[4];
+ u8 long_connector_name[64];
+ u8 device_unique_identifier[16];
+ u8 padto_2K[17];
+} __packed;
+
/*
* These macros convert from physical channels to virtual channels
*/
@@ -86,6 +418,7 @@ enum {
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
+#define ENCLOSURE_CHANNEL (3)
#define PMC_DEVICE_S6 0x28b
#define PMC_DEVICE_S7 0x28c
@@ -351,10 +684,10 @@ enum aac_queue_types {
/* transport FIB header (PMC) */
struct aac_fib_xporthdr {
- u64 HostAddress; /* FIB host address w/o xport header */
- u32 Size; /* FIB size excluding xport header */
- u32 Handle; /* driver handle to reference the FIB */
- u64 Reserved[2];
+ __le64 HostAddress; /* FIB host address w/o xport header */
+ __le32 Size; /* FIB size excluding xport header */
+ __le32 Handle; /* driver handle to reference the FIB */
+ __le64 Reserved[2];
};
#define ALIGN32 32
@@ -379,7 +712,7 @@ struct aac_fibhdr {
__le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
__le32 TimeStamp; /* otherwise timestamp for FW internal use */
} u;
- u32 Handle; /* FIB handle used for MSGU commnunication */
+ __le32 Handle; /* FIB handle used for MSGU commnunication */
u32 Previous; /* FW internal use */
u32 Next; /* FW internal use */
};
@@ -489,41 +822,64 @@ enum fib_xfer_state {
#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */
+#define ADAPTER_INIT_STRUCT_REVISION_8 8 // Thor
-struct aac_init
+union aac_init
{
- __le32 InitStructRevision;
- __le32 Sa_MSIXVectors;
- __le32 fsrev;
- __le32 CommHeaderAddress;
- __le32 FastIoCommAreaAddress;
- __le32 AdapterFibsPhysicalAddress;
- __le32 AdapterFibsVirtualAddress;
- __le32 AdapterFibsSize;
- __le32 AdapterFibAlign;
- __le32 printfbuf;
- __le32 printfbufsiz;
- __le32 HostPhysMemPages; /* number of 4k pages of host
- physical memory */
- __le32 HostElapsedSeconds; /* number of seconds since 1970. */
- /*
- * ADAPTER_INIT_STRUCT_REVISION_4 begins here
- */
- __le32 InitFlags; /* flags for supported features */
+ struct _r7 {
+ __le32 init_struct_revision;
+ __le32 no_of_msix_vectors;
+ __le32 fsrev;
+ __le32 comm_header_address;
+ __le32 fast_io_comm_area_address;
+ __le32 adapter_fibs_physical_address;
+ __le32 adapter_fibs_virtual_address;
+ __le32 adapter_fibs_size;
+ __le32 adapter_fib_align;
+ __le32 printfbuf;
+ __le32 printfbufsiz;
+ /* number of 4k pages of host phys. mem. */
+ __le32 host_phys_mem_pages;
+ /* number of seconds since 1970. */
+ __le32 host_elapsed_seconds;
+ /* ADAPTER_INIT_STRUCT_REVISION_4 begins here */
+ __le32 init_flags; /* flags for supported features */
#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040
#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080
#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100
- __le32 MaxIoCommands; /* max outstanding commands */
- __le32 MaxIoSize; /* largest I/O command */
- __le32 MaxFibSize; /* largest FIB to adapter */
- /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
- __le32 MaxNumAif; /* max number of aif */
- /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
- __le32 HostRRQ_AddrLow;
- __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */
+#define INITFLAGS_DRIVER_SUPPORTS_HBA_MODE 0x00000400
+ __le32 max_io_commands; /* max outstanding commands */
+ __le32 max_io_size; /* largest I/O command */
+ __le32 max_fib_size; /* largest FIB to adapter */
+ /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
+ __le32 max_num_aif; /* max number of aif */
+ /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
+ /* Host RRQ (response queue) for SRC */
+ __le32 host_rrq_addr_low;
+ __le32 host_rrq_addr_high;
+ } r7;
+ struct _r8 {
+ /* ADAPTER_INIT_STRUCT_REVISION_8 */
+ __le32 init_struct_revision;
+ __le32 rr_queue_count;
+ __le32 host_elapsed_seconds; /* number of secs since 1970. */
+ __le32 init_flags;
+ __le32 max_io_size; /* largest I/O command */
+ __le32 max_num_aif; /* max number of aif */
+ __le32 reserved1;
+ __le32 reserved2;
+ struct _rrq {
+ __le32 host_addr_low;
+ __le32 host_addr_high;
+ __le16 msix_id;
+ __le16 element_count;
+ __le16 comp_thresh;
+ __le16 unused;
+ } rrq[1]; /* up to 64 RRQ addresses */
+ } r8;
};
enum aac_log_level {
@@ -554,7 +910,7 @@ struct adapter_ops
void (*adapter_enable_int)(struct aac_dev *dev);
int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
int (*adapter_check_health)(struct aac_dev *dev);
- int (*adapter_restart)(struct aac_dev *dev, int bled);
+ int (*adapter_restart)(struct aac_dev *dev, int bled, u8 reset_type);
void (*adapter_start)(struct aac_dev *dev);
/* Transport operations */
int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
@@ -727,6 +1083,7 @@ struct sa_registers {
#define SA_INIT_NUM_MSIXVECTORS 1
+#define SA_MINIPORT_REVISION SA_INIT_NUM_MSIXVECTORS
#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
@@ -820,32 +1177,37 @@ struct rkt_registers {
#define src_inbound rx_inbound
struct src_mu_registers {
- /* PCI*| Name */
- __le32 reserved0[6]; /* 00h | Reserved */
- __le32 IOAR[2]; /* 18h | IOA->host interrupt register */
- __le32 IDR; /* 20h | Inbound Doorbell Register */
- __le32 IISR; /* 24h | Inbound Int. Status Register */
- __le32 reserved1[3]; /* 28h | Reserved */
- __le32 OIMR; /* 34h | Outbound Int. Mask Register */
- __le32 reserved2[25]; /* 38h | Reserved */
- __le32 ODR_R; /* 9ch | Outbound Doorbell Read */
- __le32 ODR_C; /* a0h | Outbound Doorbell Clear */
- __le32 reserved3[6]; /* a4h | Reserved */
- __le32 OMR; /* bch | Outbound Message Register */
+ /* PCI*| Name */
+ __le32 reserved0[6]; /* 00h | Reserved */
+ __le32 IOAR[2]; /* 18h | IOA->host interrupt register */
+ __le32 IDR; /* 20h | Inbound Doorbell Register */
+ __le32 IISR; /* 24h | Inbound Int. Status Register */
+ __le32 reserved1[3]; /* 28h | Reserved */
+ __le32 OIMR; /* 34h | Outbound Int. Mask Register */
+ __le32 reserved2[25]; /* 38h | Reserved */
+ __le32 ODR_R; /* 9ch | Outbound Doorbell Read */
+ __le32 ODR_C; /* a0h | Outbound Doorbell Clear */
+ __le32 reserved3[3]; /* a4h | Reserved */
+ __le32 SCR0; /* b0h | Scratchpad 0 */
+ __le32 reserved4[2]; /* b4h | Reserved */
+ __le32 OMR; /* bch | Outbound Message Register */
__le32 IQ_L; /* c0h | Inbound Queue (Low address) */
__le32 IQ_H; /* c4h | Inbound Queue (High address) */
__le32 ODR_MSI; /* c8h | MSI register for sync./AIF */
+ __le32 reserved5; /* cch | Reserved */
+ __le32 IQN_L; /* d0h | Inbound (native cmd) low */
+ __le32 IQN_H; /* d4h | Inbound (native cmd) high */
};
struct src_registers {
struct src_mu_registers MUnit; /* 00h - cbh */
union {
struct {
- __le32 reserved1[130789]; /* cch - 7fc5fh */
+ __le32 reserved1[130786]; /* d8h - 7fc5fh */
struct src_inbound IndexRegs; /* 7fc60h */
} tupelo;
struct {
- __le32 reserved1[973]; /* cch - fffh */
+ __le32 reserved1[970]; /* d8h - fffh */
struct src_inbound IndexRegs; /* 1000h */
} denali;
} u;
@@ -930,6 +1292,7 @@ struct fsa_dev_info {
char devname[8];
struct sense_data sense_data;
u32 block_size;
+ u8 identifier[16];
};
struct fib {
@@ -958,8 +1321,30 @@ struct fib {
struct list_head fiblink;
void *data;
u32 vector_no;
- struct hw_fib *hw_fib_va; /* Actual shared object */
- dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
+ struct hw_fib *hw_fib_va; /* also used for native */
+ dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
+ dma_addr_t hw_sgl_pa; /* extra sgl for native */
+ dma_addr_t hw_error_pa; /* error buffer for native */
+ u32 hbacmd_size; /* cmd size for native */
+};
+
+#define AAC_INIT 0
+#define AAC_RESCAN 1
+
+#define AAC_DEVTYPE_RAID_MEMBER 1
+#define AAC_DEVTYPE_ARC_RAW 2
+#define AAC_DEVTYPE_NATIVE_RAW 3
+#define AAC_EXPOSE_DISK 0
+#define AAC_HIDE_DISK 3
+
+struct aac_hba_map_info {
+ __le32 rmw_nexus; /* nexus for native HBA devices */
+ u8 devtype; /* device type */
+ u8 new_devtype;
+ u8 reset_state; /* 0 - no reset, 1..x - */
+ /* after xth TM LUN reset */
+ u16 qd_limit;
+ u8 expose; /*checks if to expose or not*/
};
/*
@@ -1025,7 +1410,28 @@ struct aac_supplement_adapter_info
/* StructExpansion == 1 */
__le32 FeatureBits3;
__le32 SupportedPerformanceModes;
- __le32 ReservedForFutureGrowth[80];
+ u8 HostBusType; /* uses HOST_BUS_TYPE_xxx defines */
+ u8 HostBusWidth; /* actual width in bits or links */
+ u16 HostBusSpeed; /* actual bus speed/link rate in MHz */
+ u8 MaxRRCDrives; /* max. number of ITP-RRC drives/pool */
+ u8 MaxDiskXtasks; /* max. possible num of DiskX Tasks */
+
+ u8 CpldVerLoaded;
+ u8 CpldVerInFlash;
+
+ __le64 MaxRRCCapacity;
+ __le32 CompiledMaxHistLogLevel;
+ u8 CustomBoardName[12];
+ u16 SupportedCntlrMode; /* identify supported controller mode */
+ u16 ReservedForFuture16;
+ __le32 SupportedOptions3; /* reserved for future options */
+
+ __le16 VirtDeviceBus; /* virt. SCSI device for Thor */
+ __le16 VirtDeviceTarget;
+ __le16 VirtDeviceLUN;
+ __le16 Unused;
+ __le32 ReservedForFutureGrowth[68];
+
};
#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
@@ -1099,11 +1505,21 @@ struct aac_bus_info_response {
#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
+#define AAC_OPT_EXTENDED cpu_to_le32(1<<23)
+#define AAC_OPT_NATIVE_HBA cpu_to_le32(1<<25)
#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29)
#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
+#define AAC_COMM_PRODUCER 0
+#define AAC_COMM_MESSAGE 1
+#define AAC_COMM_MESSAGE_TYPE1 3
+#define AAC_COMM_MESSAGE_TYPE2 4
+#define AAC_COMM_MESSAGE_TYPE3 5
+
+#define AAC_EXTOPT_SA_FIRMWARE cpu_to_le32(1<<1)
+
/* MSIX context */
struct aac_msix_ctx {
int vector_no;
@@ -1119,15 +1535,17 @@ struct aac_dev
/*
* negotiated FIB settings
*/
- unsigned max_fib_size;
- unsigned sg_tablesize;
- unsigned max_num_aif;
+ unsigned int max_fib_size;
+ unsigned int sg_tablesize;
+ unsigned int max_num_aif;
+
+ unsigned int max_cmd_size; /* max_fib_size or MAX_NATIVE */
/*
* Map for 128 fib objects (64k)
*/
- dma_addr_t hw_fib_pa;
- struct hw_fib *hw_fib_va;
+ dma_addr_t hw_fib_pa; /* also used for native cmd */
+ struct hw_fib *hw_fib_va; /* also used for native cmd */
struct hw_fib *aif_base_va;
/*
* Fib Headers
@@ -1157,21 +1575,23 @@ struct aac_dev
resource_size_t base_size, dbg_size; /* Size of
* mapped in region */
-
- struct aac_init *init; /* Holds initialization info to communicate with adapter */
+ /*
+ * Holds initialization info
+ * to communicate with adapter
+ */
+ union aac_init *init;
dma_addr_t init_pa; /* Holds physical address of the init struct */
-
- u32 *host_rrq; /* response queue
- * if AAC_COMM_MESSAGE_TYPE1 */
-
+ /* response queue (if AAC_COMM_MESSAGE_TYPE1) */
+ __le32 *host_rrq;
dma_addr_t host_rrq_pa; /* phys. address */
/* index into rrq buffer */
u32 host_rrq_idx[AAC_MAX_MSIX];
atomic_t rrq_outstanding[AAC_MAX_MSIX];
u32 fibs_pushed_no;
struct pci_dev *pdev; /* Our PCI interface */
- void * printfbuf; /* pointer to buffer used for printf's from the adapter */
- void * comm_addr; /* Base address of Comm area */
+ /* pointer to buffer used for printf's from the adapter */
+ void *printfbuf;
+ void *comm_addr; /* Base address of Comm area */
dma_addr_t comm_phys; /* Physical Address of Comm area */
size_t comm_size;
@@ -1227,15 +1647,12 @@ struct aac_dev
u8 needs_dac;
u8 raid_scsi_mode;
u8 comm_interface;
-# define AAC_COMM_PRODUCER 0
-# define AAC_COMM_MESSAGE 1
-# define AAC_COMM_MESSAGE_TYPE1 3
-# define AAC_COMM_MESSAGE_TYPE2 4
u8 raw_io_interface;
u8 raw_io_64;
u8 printf_enabled;
u8 in_reset;
u8 msi;
+ u8 sa_firmware;
int management_fib_count;
spinlock_t manage_lock;
spinlock_t sync_lock;
@@ -1246,7 +1663,10 @@ struct aac_dev
u32 max_msix; /* max. MSI-X vectors */
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
+ atomic_t msix_counter;
+ struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
+ struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
u8 adapter_shutdown;
u32 handle_pci_error;
};
@@ -1269,8 +1689,8 @@ struct aac_dev
#define aac_adapter_check_health(dev) \
(dev)->a_ops.adapter_check_health(dev)
-#define aac_adapter_restart(dev,bled) \
- (dev)->a_ops.adapter_restart(dev,bled)
+#define aac_adapter_restart(dev, bled, reset_type) \
+ ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
#define aac_adapter_start(dev) \
((dev)->a_ops.adapter_start(dev))
@@ -1300,6 +1720,8 @@ struct aac_dev
#define FIB_CONTEXT_FLAG (0x00000002)
#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008)
+#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010)
+#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020)
/*
* Define the command values
@@ -1358,6 +1780,7 @@ struct aac_dev
#define ST_IO 5
#define ST_NXIO 6
#define ST_E2BIG 7
+#define ST_MEDERR 8
#define ST_ACCES 13
#define ST_EXIST 17
#define ST_XDEV 18
@@ -1715,6 +2138,8 @@ struct aac_fsinfo {
struct aac_blockdevinfo {
__le32 block_size;
+ __le32 logical_phys_map;
+ u8 identifier[16];
};
union aac_contentinfo {
@@ -1940,6 +2365,15 @@ struct revision
#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
#define FSACTL_GET_CONTAINERS 2131
#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED)
+#define FSACTL_RESET_IOP CTL_CODE(2140, METHOD_BUFFERED)
+#define FSACTL_GET_HBA_INFO CTL_CODE(2150, METHOD_BUFFERED)
+/* flags defined for IOP & HW SOFT RESET */
+#define HW_IOP_RESET 0x01
+#define HW_SOFT_RESET 0x02
+#define IOP_HWSOFT_RESET (HW_IOP_RESET | HW_SOFT_RESET)
+/* HW Soft Reset register offset */
+#define IBW_SWR_OFFSET 0x4000
+#define SOFT_RESET_TIME 60
struct aac_common
@@ -1958,6 +2392,8 @@ struct aac_common
#ifdef DBG
u32 FibsSent;
u32 FibRecved;
+ u32 NativeSent;
+ u32 NativeRecved;
u32 NoResponseSent;
u32 NoResponseRecved;
u32 AsyncSent;
@@ -1969,6 +2405,56 @@ struct aac_common
extern struct aac_common aac_config;
+/*
+ * This is for management ioctl purpose only.
+ */
+struct aac_hba_info {
+
+ u8 driver_name[50];
+ u8 adapter_number;
+ u8 system_io_bus_number;
+ u8 device_number;
+ u32 function_number;
+ u32 vendor_id;
+ u32 device_id;
+ u32 sub_vendor_id;
+ u32 sub_system_id;
+ u32 mapped_base_address_size;
+ u32 base_physical_address_high_part;
+ u32 base_physical_address_low_part;
+
+ u32 max_command_size;
+ u32 max_fib_size;
+ u32 max_scatter_gather_from_os;
+ u32 max_scatter_gather_to_fw;
+ u32 max_outstanding_fibs;
+
+ u32 queue_start_threshold;
+ u32 queue_dump_threshold;
+ u32 max_io_size_queued;
+ u32 outstanding_io;
+
+ u32 firmware_build_number;
+ u32 bios_build_number;
+ u32 driver_build_number;
+ u32 serial_number_high_part;
+ u32 serial_number_low_part;
+ u32 supported_options;
+ u32 feature_bits;
+ u32 currentnumber_ports;
+
+ u8 new_comm_interface:1;
+ u8 new_commands_supported:1;
+ u8 disable_passthrough:1;
+ u8 expose_non_dasd:1;
+ u8 queue_allowed:1;
+ u8 bled_check_enabled:1;
+ u8 reserved1:1;
+ u8 reserted2:1;
+
+ u32 reserved3[10];
+
+};
/*
* The following macro is used when sending and receiving FIBs. It is
@@ -2096,9 +2582,10 @@ extern struct aac_common aac_config;
/* PMC NEW COMM: Request the event data */
#define AifReqEvent 200
+#define AifRawDeviceRemove 203 /* RAW device deleted */
+#define AifNativeDeviceAdd 204 /* native HBA device added */
+#define AifNativeDeviceRemove 205 /* native HBA device removed */
-/* RAW device deleted */
-#define AifRawDeviceRemove 203
/*
* Adapter Initiated FIB command structures. Start with the adapter
@@ -2131,6 +2618,8 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
+int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
+int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
const char *aac_driverinfo(struct Scsi_Host *);
void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
@@ -2141,9 +2630,12 @@ void aac_fib_free(struct fib * context);
void aac_fib_init(struct fib * context);
void aac_printf(struct aac_dev *dev, u32 val);
int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_hba_send(u8 command, struct fib *context,
+ fib_callback callback, void *ctxt);
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
int aac_fib_complete(struct fib * context);
+void aac_hba_callback(void *context, struct fib *fibptr);
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
void aac_src_access_devreg(struct aac_dev *dev, int mode);
@@ -2169,7 +2661,7 @@ unsigned int aac_command_normal(struct aac_queue * q);
unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
int isAif, int isFastResponse,
struct hw_fib *aif_fib);
-int aac_reset_adapter(struct aac_dev * dev, int forced);
+int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type);
int aac_check_health(struct aac_dev * dev);
int aac_command_thread(void *data);
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
@@ -2183,7 +2675,6 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
char * get_container_type(unsigned type);
extern int numacb;
-extern int acbsize;
extern char aac_driver_version[];
extern int startup_timeout;
extern int aif_timeout;
@@ -2194,3 +2685,4 @@ extern int aac_commit;
extern int update_interval;
extern int check_interval;
extern int aac_check_reset;
+#endif
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e1daff230c7d..614842a9eb07 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -477,20 +478,24 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
struct fib* srbfib;
int status;
struct aac_srb *srbcmd = NULL;
+ struct aac_hba_cmd_req *hbacmd = NULL;
struct user_aac_srb *user_srbcmd = NULL;
struct user_aac_srb __user *user_srb = arg;
struct aac_srb_reply __user *user_reply;
- struct aac_srb_reply* reply;
+ u32 chn;
u32 fibsize = 0;
u32 flags = 0;
s32 rcode = 0;
u32 data_dir;
- void __user *sg_user[32];
- void *sg_list[32];
+ void __user *sg_user[HBA_MAX_SG_EMBEDDED];
+ void *sg_list[HBA_MAX_SG_EMBEDDED];
+ u32 sg_count[HBA_MAX_SG_EMBEDDED];
u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
+ int is_native_device;
+ u64 address;
if (dev->in_reset) {
@@ -507,11 +512,6 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
if (!(srbfib = aac_fib_alloc(dev))) {
return -ENOMEM;
}
- aac_fib_init(srbfib);
- /* raw_srb FIB is not FastResponseCapable */
- srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
-
- srbcmd = (struct aac_srb*) fib_data(srbfib);
memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
@@ -538,21 +538,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- user_reply = arg+fibsize;
-
flags = user_srbcmd->flags; /* from user in cpu order */
- // Fix up srb for endian and force some values
-
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
- srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
- srbcmd->id = cpu_to_le32(user_srbcmd->id);
- srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
- srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
- srbcmd->flags = cpu_to_le32(flags);
- srbcmd->retry_limit = 0; // Obsolete parameter
- srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
- memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
-
switch (flags & (SRB_DataIn | SRB_DataOut)) {
case SRB_DataOut:
data_dir = DMA_TO_DEVICE;
@@ -568,7 +554,12 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
- le32_to_cpu(srbcmd->sg.count)));
+ user_srbcmd->sg.count));
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
+ dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
rcode = -EINVAL;
goto cleanup;
}
@@ -588,13 +579,136 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
- dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
- rcode = -EINVAL;
- goto cleanup;
+
+ chn = aac_logical_to_phys(user_srbcmd->channel);
+ if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
+ dev->hba_map[chn][user_srbcmd->id].devtype ==
+ AAC_DEVTYPE_NATIVE_RAW) {
+ is_native_device = 1;
+ hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
+ memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
+
+ /* iu_type is a parameter of aac_hba_send */
+ switch (data_dir) {
+ case DMA_TO_DEVICE:
+ hbacmd->byte1 = 2;
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ hbacmd->byte1 = 1;
+ break;
+ case DMA_NONE:
+ default:
+ break;
+ }
+ hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
+ hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
+
+ /*
+ * we fill in reply_qid later in aac_src_deliver_message
+ * we fill in iu_type, request_id later in aac_hba_send
+ * we fill in emb_data_desc_count, data_length later
+ * in sg list build
+ */
+
+ memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
+
+ address = (u64)srbfib->hw_error_pa;
+ hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+ hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+ hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+ hbacmd->emb_data_desc_count =
+ cpu_to_le32(user_srbcmd->sg.count);
+ srbfib->hbacmd_size = 64 +
+ user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
+
+ } else {
+ is_native_device = 0;
+ aac_fib_init(srbfib);
+
+ /* raw_srb FIB is not FastResponseCapable */
+ srbfib->hw_fib_va->header.XferState &=
+ ~cpu_to_le32(FastResponseCapable);
+
+ srbcmd = (struct aac_srb *) fib_data(srbfib);
+
+ // Fix up srb for endian and force some values
+
+ srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
+ srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
+ srbcmd->id = cpu_to_le32(user_srbcmd->id);
+ srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
+ srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
+ srbcmd->flags = cpu_to_le32(flags);
+ srbcmd->retry_limit = 0; // Obsolete parameter
+ srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
+ memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
}
+
byte_count = 0;
- if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
+ if (is_native_device) {
+ struct user_sgmap *usg32 = &user_srbcmd->sg;
+ struct user_sgmap64 *usg64 =
+ (struct user_sgmap64 *)&user_srbcmd->sg;
+
+ for (i = 0; i < usg32->count; i++) {
+ void *p;
+ u64 addr;
+
+ sg_count[i] = (actual_fibsize64 == fibsize) ?
+ usg64->sg[i].count : usg32->sg[i].count;
+ if (sg_count[i] >
+ (dev->scsi_host_ptr->max_sectors << 9)) {
+ pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
+ i, sg_count[i],
+ dev->scsi_host_ptr->max_sectors << 9);
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+
+ p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+ if (!p) {
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+
+ if (actual_fibsize64 == fibsize) {
+ addr = (u64)usg64->sg[i].addr[0];
+ addr += ((u64)usg64->sg[i].addr[1]) << 32;
+ } else {
+ addr = (u64)usg32->sg[i].addr;
+ }
+
+ sg_user[i] = (void __user *)(uintptr_t)addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if (copy_from_user(p, sg_user[i],
+ sg_count[i])) {
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p, sg_count[i],
+ data_dir);
+ hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
+ hbacmd->sge[i].addr_lo = cpu_to_le32(
+ (u32)(addr & 0xffffffff));
+ hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
+ hbacmd->sge[i].flags = 0;
+ byte_count += sg_count[i];
+ }
+
+ if (usg32->count > 0) /* embedded sglist */
+ hbacmd->sge[usg32->count-1].flags =
+ cpu_to_le32(0x40000000);
+ hbacmd->data_length = cpu_to_le32(byte_count);
+
+ status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
+ NULL, NULL);
+
+ } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
@@ -606,7 +720,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
u64 addr;
void* p;
- if (upsg->sg[i].count >
+
+ sg_count[i] = upsg->sg[i].count;
+ if (sg_count[i] >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
@@ -615,10 +731,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
- p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- upsg->sg[i].count,i,upsg->count));
+ sg_count[i], i, upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
@@ -629,18 +745,20 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_indx = i;
if (flags & SRB_DataOut) {
- if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+ if (copy_from_user(p, sg_user[i],
+ sg_count[i])){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
+ addr = pci_map_single(dev->pdev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
- byte_count += upsg->sg[i].count;
- psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+ byte_count += sg_count[i];
+ psg->sg[i].count = cpu_to_le32(sg_count[i]);
}
} else {
struct user_sgmap* usg;
@@ -657,7 +775,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < usg->count; i++) {
u64 addr;
void* p;
- if (usg->sg[i].count >
+
+ sg_count[i] = usg->sg[i].count;
+ if (sg_count[i] >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
@@ -667,10 +787,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
- p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- usg->sg[i].count,i,usg->count));
+ sg_count[i], i, usg->count));
kfree(usg);
rcode = -ENOMEM;
goto cleanup;
@@ -680,19 +800,21 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_indx = i;
if (flags & SRB_DataOut) {
- if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+ if (copy_from_user(p, sg_user[i],
+ sg_count[i])) {
kfree (usg);
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+ addr = pci_map_single(dev->pdev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
- byte_count += usg->sg[i].count;
- psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
+ byte_count += sg_count[i];
+ psg->sg[i].count = cpu_to_le32(sg_count[i]);
}
kfree (usg);
}
@@ -711,7 +833,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
uintptr_t addr;
void* p;
- if (usg->sg[i].count >
+
+ sg_count[i] = usg->sg[i].count;
+ if (sg_count[i] >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
@@ -720,10 +844,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
- p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
- if(!p) {
+ p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+ if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- usg->sg[i].count,i,usg->count));
+ sg_count[i], i, usg->count));
rcode = -ENOMEM;
goto cleanup;
}
@@ -734,7 +858,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_indx = i;
if (flags & SRB_DataOut) {
- if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
+ if (copy_from_user(p, sg_user[i],
+ sg_count[i])){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
@@ -744,13 +869,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
- psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
+ psg->sg[i].count = cpu_to_le32(sg_count[i]);
}
} else {
for (i = 0; i < upsg->count; i++) {
dma_addr_t addr;
void* p;
- if (upsg->sg[i].count >
+
+ sg_count[i] = upsg->sg[i].count;
+ if (sg_count[i] >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
@@ -758,10 +885,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
+ p = kmalloc(sg_count[i], GFP_KERNEL);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- upsg->sg[i].count, i, upsg->count));
+ sg_count[i], i, upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
@@ -770,19 +897,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_indx = i;
if (flags & SRB_DataOut) {
- if(copy_from_user(p, sg_user[i],
- upsg->sg[i].count)) {
+ if (copy_from_user(p, sg_user[i],
+ sg_count[i])) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p,
- upsg->sg[i].count, data_dir);
+ sg_count[i], data_dir);
psg->sg[i].addr = cpu_to_le32(addr);
- byte_count += upsg->sg[i].count;
- psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+ byte_count += sg_count[i];
+ psg->sg[i].count = cpu_to_le32(sg_count[i]);
}
}
srbcmd->count = cpu_to_le32(byte_count);
@@ -792,12 +919,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
psg->count = 0;
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
+
if (status == -ERESTARTSYS) {
rcode = -ERESTARTSYS;
goto cleanup;
}
- if (status != 0){
+ if (status != 0) {
dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
rcode = -ENXIO;
goto cleanup;
@@ -805,11 +933,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
if (flags & SRB_DataIn) {
for(i = 0 ; i <= sg_indx; i++){
- byte_count = le32_to_cpu(
- (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
- ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
- : srbcmd->sg.sg[i].count);
- if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
+ if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
rcode = -EFAULT;
goto cleanup;
@@ -818,19 +942,50 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
}
- reply = (struct aac_srb_reply *) fib_data(srbfib);
- if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
- dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
- rcode = -EFAULT;
- goto cleanup;
+ user_reply = arg + fibsize;
+ if (is_native_device) {
+ struct aac_hba_resp *err =
+ &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
+ struct aac_srb_reply reply;
+
+ reply.status = ST_OK;
+ if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+ /* fast response */
+ reply.srb_status = SRB_STATUS_SUCCESS;
+ reply.scsi_status = 0;
+ reply.data_xfer_length = byte_count;
+ } else {
+ reply.srb_status = err->service_response;
+ reply.scsi_status = err->status;
+ reply.data_xfer_length = byte_count -
+ le32_to_cpu(err->residual_count);
+ reply.sense_data_size = err->sense_response_data_len;
+ memcpy(reply.sense_data, err->sense_response_buf,
+ AAC_SENSE_BUFFERSIZE);
+ }
+ if (copy_to_user(user_reply, &reply,
+ sizeof(struct aac_srb_reply))) {
+ dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ } else {
+ struct aac_srb_reply *reply;
+
+ reply = (struct aac_srb_reply *) fib_data(srbfib);
+ if (copy_to_user(user_reply, reply,
+ sizeof(struct aac_srb_reply))) {
+ dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
}
cleanup:
kfree(user_srbcmd);
- for(i=0; i <= sg_indx; i++){
- kfree(sg_list[i]);
- }
if (rcode != -ERESTARTSYS) {
+ for (i = 0; i <= sg_indx; i++)
+ kfree(sg_list[i]);
aac_fib_complete(srbfib);
aac_fib_free(srbfib);
}
@@ -858,6 +1013,44 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
return 0;
}
+static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_hba_info hbainfo;
+
+ hbainfo.adapter_number = (u8) dev->id;
+ hbainfo.system_io_bus_number = dev->pdev->bus->number;
+ hbainfo.device_number = (dev->pdev->devfn >> 3);
+ hbainfo.function_number = (dev->pdev->devfn & 0x0007);
+
+ hbainfo.vendor_id = dev->pdev->vendor;
+ hbainfo.device_id = dev->pdev->device;
+ hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
+ hbainfo.sub_system_id = dev->pdev->subsystem_device;
+
+ if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
+ dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+struct aac_reset_iop {
+ u8 reset_type;
+};
+
+static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_reset_iop reset;
+ int retval;
+
+ if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
+ return -EFAULT;
+
+ retval = aac_reset_adapter(dev, 0, reset.reset_type);
+ return retval;
+
+}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
{
@@ -901,6 +1094,13 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
case FSACTL_GET_PCI_INFO:
status = aac_get_pci_info(dev,arg);
break;
+ case FSACTL_GET_HBA_INFO:
+ status = aac_get_hba_info(dev, arg);
+ break;
+ case FSACTL_RESET_IOP:
+ status = aac_send_reset_adapter(dev, arg);
+ break;
+
default:
status = -ENOTTY;
break;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 4f56b1003cc7..40bfc57b6849 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -50,9 +51,13 @@ struct aac_common aac_config = {
static inline int aac_is_msix_mode(struct aac_dev *dev)
{
- u32 status;
+ u32 status = 0;
- status = src_readl(dev, MUnit.OMR);
+ if (dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8) {
+ status = src_readl(dev, MUnit.OMR);
+ }
return (status & AAC_INT_MODE_MSIX);
}
@@ -68,104 +73,175 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
unsigned long size, align;
const unsigned long fibsize = dev->max_fib_size;
const unsigned long printfbufsiz = 256;
- unsigned long host_rrq_size = 0;
- struct aac_init *init;
+ unsigned long host_rrq_size, aac_init_size;
+ union aac_init *init;
dma_addr_t phys;
unsigned long aac_max_hostphysmempages;
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
- dev->comm_interface == AAC_COMM_MESSAGE_TYPE2)
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
+ (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
+ (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
+ !dev->sa_firmware)) {
+ host_rrq_size =
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)
+ * sizeof(u32);
+ aac_init_size = sizeof(union aac_init);
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
+ dev->sa_firmware) {
host_rrq_size = (dev->scsi_host_ptr->can_queue
- + AAC_NUM_MGT_FIB) * sizeof(u32);
- size = fibsize + sizeof(struct aac_init) + commsize +
- commalign + printfbufsiz + host_rrq_size;
-
+ + AAC_NUM_MGT_FIB) * sizeof(u32) * AAC_MAX_MSIX;
+ aac_init_size = sizeof(union aac_init) +
+ (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq);
+ } else {
+ host_rrq_size = 0;
+ aac_init_size = sizeof(union aac_init);
+ }
+ size = fibsize + aac_init_size + commsize + commalign +
+ printfbufsiz + host_rrq_size;
+
base = pci_alloc_consistent(dev->pdev, size, &phys);
- if(base == NULL)
- {
+ if (base == NULL) {
printk(KERN_ERR "aacraid: unable to create mapping.\n");
return 0;
}
+
dev->comm_addr = (void *)base;
dev->comm_phys = phys;
dev->comm_size = size;
-
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
- dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
+ (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
+ (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) {
dev->host_rrq = (u32 *)(base + fibsize);
dev->host_rrq_pa = phys + fibsize;
memset(dev->host_rrq, 0, host_rrq_size);
}
- dev->init = (struct aac_init *)(base + fibsize + host_rrq_size);
+ dev->init = (union aac_init *)(base + fibsize + host_rrq_size);
dev->init_pa = phys + fibsize + host_rrq_size;
init = dev->init;
- init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
- if (dev->max_fib_size != sizeof(struct hw_fib))
- init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
- init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS);
- init->fsrev = cpu_to_le32(dev->fsrev);
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+ int i;
+ u64 addr;
+
+ init->r8.init_struct_revision =
+ cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8);
+ init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_DRIVER_USES_UTC_TIME |
+ INITFLAGS_DRIVER_SUPPORTS_PM);
+ init->r8.init_flags |=
+ cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE);
+ init->r8.rr_queue_count = cpu_to_le32(dev->max_msix);
+ init->r8.max_io_size =
+ cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+ init->r8.max_num_aif = init->r8.reserved1 =
+ init->r8.reserved2 = 0;
+
+ for (i = 0; i < dev->max_msix; i++) {
+ addr = (u64)dev->host_rrq_pa + dev->vector_cap * i *
+ sizeof(u32);
+ init->r8.rrq[i].host_addr_high = cpu_to_le32(
+ upper_32_bits(addr));
+ init->r8.rrq[i].host_addr_low = cpu_to_le32(
+ lower_32_bits(addr));
+ init->r8.rrq[i].msix_id = i;
+ init->r8.rrq[i].element_count = cpu_to_le16(
+ (u16)dev->vector_cap);
+ init->r8.rrq[i].comp_thresh =
+ init->r8.rrq[i].unused = 0;
+ }
- /*
- * Adapter Fibs are the first thing allocated so that they
- * start page aligned
- */
- dev->aif_base_va = (struct hw_fib *)base;
-
- init->AdapterFibsVirtualAddress = 0;
- init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
- init->AdapterFibsSize = cpu_to_le32(fibsize);
- init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
- /*
- * number of 4k pages of host physical memory. The aacraid fw needs
- * this number to be less than 4gb worth of pages. New firmware doesn't
- * have any issues with the mapping system, but older Firmware did, and
- * had *troubles* dealing with the math overloading past 32 bits, thus
- * we must limit this field.
- */
- aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12;
- if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
- init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages);
- else
- init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
-
- init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
- INITFLAGS_DRIVER_SUPPORTS_PM);
- init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
- init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
- init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
- init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
-
- if (dev->comm_interface == AAC_COMM_MESSAGE) {
- init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
- dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
- } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
- init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
- init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
- INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
- init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
- init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
- dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n"));
- } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
- init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
- init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
- INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
- init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
- init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
- /* number of MSI-X */
- init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
- dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
+ pr_warn("aacraid: Comm Interface type3 enabled\n");
+ } else {
+ init->r7.init_struct_revision =
+ cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
+ if (dev->max_fib_size != sizeof(struct hw_fib))
+ init->r7.init_struct_revision =
+ cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
+ init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION);
+ init->r7.fsrev = cpu_to_le32(dev->fsrev);
+
+ /*
+ * Adapter Fibs are the first thing allocated so that they
+ * start page aligned
+ */
+ dev->aif_base_va = (struct hw_fib *)base;
+
+ init->r7.adapter_fibs_virtual_address = 0;
+ init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys);
+ init->r7.adapter_fibs_size = cpu_to_le32(fibsize);
+ init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib));
+
+ /*
+ * number of 4k pages of host physical memory. The aacraid fw
+ * needs this number to be less than 4gb worth of pages. New
+ * firmware doesn't have any issues with the mapping system, but
+ * older Firmware did, and had *troubles* dealing with the math
+ * overloading past 32 bits, thus we must limit this field.
+ */
+ aac_max_hostphysmempages =
+ dma_get_required_mask(&dev->pdev->dev) >> 12;
+ if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
+ init->r7.host_phys_mem_pages =
+ cpu_to_le32(aac_max_hostphysmempages);
+ else
+ init->r7.host_phys_mem_pages =
+ cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
+
+ init->r7.init_flags =
+ cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
+ INITFLAGS_DRIVER_SUPPORTS_PM);
+ init->r7.max_io_commands =
+ cpu_to_le32(dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB);
+ init->r7.max_io_size =
+ cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+ init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size);
+ init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif);
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE) {
+ init->r7.init_flags |=
+ cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
+ pr_warn("aacraid: Comm Interface enabled\n");
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ init->r7.init_struct_revision =
+ cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
+ init->r7.init_flags |=
+ cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
+ INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->r7.host_rrq_addr_high =
+ cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
+ init->r7.host_rrq_addr_low =
+ cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
+ pr_warn("aacraid: Comm Interface type1 enabled\n");
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ init->r7.init_struct_revision =
+ cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
+ init->r7.init_flags |=
+ cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
+ INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->r7.host_rrq_addr_high =
+ cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
+ init->r7.host_rrq_addr_low =
+ cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
+ init->r7.no_of_msix_vectors =
+ cpu_to_le32(dev->max_msix);
+ /* must be the COMM_PREFERRED_SETTINGS values */
+ pr_warn("aacraid: Comm Interface type2 enabled\n");
+ }
}
/*
* Increment the base address by the amount already used
*/
- base = base + fibsize + host_rrq_size + sizeof(struct aac_init);
+ base = base + fibsize + host_rrq_size + aac_init_size;
phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
- sizeof(struct aac_init));
+ aac_init_size);
/*
* Align the beginning of Headers to commalign
@@ -177,7 +253,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
* Fill in addresses of the Comm Area Headers and Queues
*/
*commaddr = base;
- init->CommHeaderAddress = cpu_to_le32((u32)phys);
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
+ init->r7.comm_header_address = cpu_to_le32((u32)phys);
/*
* Increment the base address by the size of the CommArea
*/
@@ -187,12 +264,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
* Place the Printf buffer area after the Fast I/O comm area.
*/
dev->printfbuf = (void *)base;
- init->printfbuf = cpu_to_le32(phys);
- init->printfbufsiz = cpu_to_le32(printfbufsiz);
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) {
+ init->r7.printfbuf = cpu_to_le32(phys);
+ init->r7.printfbufsiz = cpu_to_le32(printfbufsiz);
+ }
memset(base, 0, printfbufsiz);
return 1;
}
-
+
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
{
atomic_set(&q->numpending, 0);
@@ -400,9 +479,13 @@ void aac_define_int_mode(struct aac_dev *dev)
if (dev->max_msix > msi_count)
dev->max_msix = msi_count;
}
- dev->vector_cap =
- (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
- msi_count;
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware)
+ dev->vector_cap = dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB;
+ else
+ dev->vector_cap = (dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB) / msi_count;
+
}
struct aac_dev *aac_init_adapter(struct aac_dev *dev)
{
@@ -436,30 +519,37 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0, 0, 0, 0, 0, 0,
- status+0, status+1, status+2, status+3, NULL)) &&
- (status[0] == 0x00000001)) {
+ status+0, status+1, status+2, status+3, status+4)) &&
+ (status[0] == 0x00000001)) {
dev->doorbell_mask = status[3];
- if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
+ if (status[1] & AAC_OPT_NEW_COMM_64)
dev->raw_io_64 = 1;
dev->sync_mode = aac_sync_mode;
if (dev->a_ops.adapter_comm &&
- (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
+ (status[1] & AAC_OPT_NEW_COMM)) {
dev->comm_interface = AAC_COMM_MESSAGE;
dev->raw_io_interface = 1;
- if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
+ if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
/* driver supports TYPE1 (Tupelo) */
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
- } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
- /* driver supports TYPE2 (Denali) */
+ } else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) {
+ /* driver supports TYPE2 (Denali, Yosemite) */
dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
- } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
- (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) {
- /* driver doesn't TYPE3 and TYPE4 */
- /* switch to sync. mode */
+ } else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) {
+ /* driver supports TYPE3 (Yosemite, Thor) */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE3;
+ } else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) {
+ /* not supported TYPE - switch to sync. mode */
dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
dev->sync_mode = 1;
}
}
+ if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
+ (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
+ dev->sa_firmware = 1;
+ else
+ dev->sa_firmware = 0;
+
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0);
@@ -496,61 +586,25 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->sg_tablesize = status[2] & 0xFFFF;
if (dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9)
- host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
- (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
- else
- host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ dev->pdev->device == PMC_DEVICE_S9) {
+ if (host->can_queue > (status[3] >> 16) -
+ AAC_NUM_MGT_FIB)
+ host->can_queue = (status[3] >> 16) -
+ AAC_NUM_MGT_FIB;
+ } else if (host->can_queue > (status[3] & 0xFFFF) -
+ AAC_NUM_MGT_FIB)
+ host->can_queue = (status[3] & 0xFFFF) -
+ AAC_NUM_MGT_FIB;
+
dev->max_num_aif = status[4] & 0xFFFF;
- /*
- * NOTE:
- * All these overrides are based on a fixed internal
- * knowledge and understanding of existing adapters,
- * acbsize should be set with caution.
- */
- if (acbsize == 512) {
- host->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
- dev->max_fib_size = 512;
- dev->sg_tablesize = host->sg_tablesize
- = (512 - sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgentry))
- / sizeof(struct sgentry);
- host->can_queue = AAC_NUM_IO_FIB;
- } else if (acbsize == 2048) {
- host->max_sectors = 512;
- dev->max_fib_size = 2048;
- host->sg_tablesize = 65;
- dev->sg_tablesize = 81;
- host->can_queue = 512 - AAC_NUM_MGT_FIB;
- } else if (acbsize == 4096) {
- host->max_sectors = 1024;
- dev->max_fib_size = 4096;
- host->sg_tablesize = 129;
- dev->sg_tablesize = 166;
- host->can_queue = 256 - AAC_NUM_MGT_FIB;
- } else if (acbsize == 8192) {
- host->max_sectors = 2048;
- dev->max_fib_size = 8192;
- host->sg_tablesize = 257;
- dev->sg_tablesize = 337;
- host->can_queue = 128 - AAC_NUM_MGT_FIB;
- } else if (acbsize > 0) {
- printk("Illegal acbsize=%d ignored\n", acbsize);
- }
}
- {
-
- if (numacb > 0) {
- if (numacb < host->can_queue)
- host->can_queue = numacb;
- else
- printk("numacb=%d ignored\n", numacb);
- }
+ if (numacb > 0) {
+ if (numacb < host->can_queue)
+ host->can_queue = numacb;
+ else
+ pr_warn("numacb=%d ignored\n", numacb);
}
- if (host->can_queue > AAC_NUM_IO_FIB)
- host->can_queue = AAC_NUM_IO_FIB;
-
if (dev->pdev->device == PMC_DEVICE_S6 ||
dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8 ||
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 9e7551fe4b19..969727b67cdd 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -43,6 +44,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/semaphore.h>
+#include <linux/bcd.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
@@ -60,12 +62,22 @@
static int fib_map_alloc(struct aac_dev *dev)
{
+ if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
+ dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
+ else
+ dev->max_cmd_size = dev->max_fib_size;
+ if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
+ dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
+ } else {
+ dev->max_cmd_size = dev->max_fib_size;
+ }
+
dprintk((KERN_INFO
"allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
- dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
+ dev->pdev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
- (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
+ (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
* (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
&dev->hw_fib_pa);
if (dev->hw_fib_va == NULL)
@@ -83,9 +95,9 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- if (dev->hw_fib_va && dev->max_fib_size) {
+ if (dev->hw_fib_va && dev->max_cmd_size) {
pci_free_consistent(dev->pdev,
- (dev->max_fib_size *
+ (dev->max_cmd_size *
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
dev->hw_fib_va, dev->hw_fib_pa);
}
@@ -129,11 +141,14 @@ int aac_fib_setup(struct aac_dev * dev)
struct hw_fib *hw_fib;
dma_addr_t hw_fib_pa;
int i;
+ u32 max_cmds;
while (((i = fib_map_alloc(dev)) == -ENOMEM)
&& (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
- dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
- dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
+ max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
+ dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
+ dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
}
if (i<0)
return -ENOMEM;
@@ -144,7 +159,7 @@ int aac_fib_setup(struct aac_dev * dev)
(hw_fib_pa - dev->hw_fib_pa));
dev->hw_fib_pa = hw_fib_pa;
memset(dev->hw_fib_va, 0,
- (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
+ (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
/* add Xport header */
@@ -170,12 +185,22 @@ int aac_fib_setup(struct aac_dev * dev)
sema_init(&fibptr->event_wait, 0);
spin_lock_init(&fibptr->event_lock);
hw_fib->header.XferState = cpu_to_le32(0xffffffff);
- hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
+ hw_fib->header.SenderSize =
+ cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
fibptr->hw_fib_pa = hw_fib_pa;
+ fibptr->hw_sgl_pa = hw_fib_pa +
+ offsetof(struct aac_hba_cmd_req, sge[2]);
+ /*
+ * one element is for the ptr to the separate sg list,
+ * second element for 32 byte alignment
+ */
+ fibptr->hw_error_pa = hw_fib_pa +
+ offsetof(struct aac_native_hba, resp.resp_bytes[0]);
+
hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
- dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
+ dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
hw_fib_pa = hw_fib_pa +
- dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+ dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
}
/*
@@ -273,7 +298,8 @@ void aac_fib_free(struct fib *fibptr)
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
aac_config.fib_timeouts++;
- if (fibptr->hw_fib_va->header.XferState != 0) {
+ if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
+ fibptr->hw_fib_va->header.XferState != 0) {
printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
(void*)fibptr,
le32_to_cpu(fibptr->hw_fib_va->header.XferState));
@@ -501,8 +527,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
* Map the fib into 32bits by using the fib number
*/
- hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
- hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
+ hw_fib->header.SenderFibAddress =
+ cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
+
+ /* use the same shifted value for handle to be compatible
+ * with the new native hba command handle
+ */
+ hw_fib->header.Handle =
+ cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+
/*
* Set FIB state to indicate where it came from and if we want a
* response from the adapter. Also load the command from the
@@ -670,6 +703,82 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
return 0;
}
+int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
+ void *callback_data)
+{
+ struct aac_dev *dev = fibptr->dev;
+ int wait;
+ unsigned long flags = 0;
+ unsigned long mflags = 0;
+
+ fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
+ if (callback) {
+ wait = 0;
+ fibptr->callback = callback;
+ fibptr->callback_data = callback_data;
+ } else
+ wait = 1;
+
+
+ if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
+ struct aac_hba_cmd_req *hbacmd =
+ (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
+
+ hbacmd->iu_type = command;
+ /* bit1 of request_id must be 0 */
+ hbacmd->request_id =
+ cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+ } else
+ return -EINVAL;
+
+
+ if (wait) {
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ return -EBUSY;
+ }
+ dev->management_fib_count++;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ }
+
+ if (aac_adapter_deliver(fibptr) != 0) {
+ if (wait) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ }
+ return -EBUSY;
+ }
+ FIB_COUNTER_INCREMENT(aac_config.NativeSent);
+
+ if (wait) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ /* Only set for first known interruptable command */
+ if (down_interruptible(&fibptr->event_wait)) {
+ fibptr->done = 2;
+ up(&fibptr->event_wait);
+ }
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ if ((fibptr->done == 0) || (fibptr->done == 2)) {
+ fibptr->done = 2; /* Tell interrupt we aborted */
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ return -ERESTARTSYS;
+ }
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ WARN_ON(fibptr->done == 0);
+
+ if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+ return -ETIMEDOUT;
+
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
/**
* aac_consumer_get - get the top of the queue
* @dev: Adapter
@@ -761,7 +870,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
unsigned long qflags;
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
- dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
kfree(hw_fib);
return 0;
}
@@ -827,11 +937,17 @@ int aac_fib_complete(struct fib *fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
+ if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
+ fib_dealloc(fibptr);
+ return 0;
+ }
+
/*
- * Check for a fib which has already been completed
+ * Check for a fib which has already been completed or with a
+ * status wait timeout
*/
- if (hw_fib->header.XferState == 0)
+ if (hw_fib->header.XferState == 0 || fibptr->done == 2)
return 0;
/*
* If we plan to do anything check the structure type first.
@@ -984,20 +1100,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
lun = (container >> 16) & 0xFF;
container = (u32)-1;
channel = aac_phys_to_logical(channel);
- device_config_needed =
- (((__le32 *)aifcmd->data)[0] ==
- cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
-
- if (device_config_needed == ADD) {
- device = scsi_device_lookup(
- dev->scsi_host_ptr,
- channel, id, lun);
- if (device) {
- scsi_remove_device(device);
- scsi_device_put(device);
- }
- }
+ device_config_needed = DELETE;
break;
+
/*
* Morph or Expand complete
*/
@@ -1351,7 +1456,7 @@ retry_next:
}
}
-static int _aac_reset_adapter(struct aac_dev *aac, int forced)
+static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
int retval;
@@ -1360,6 +1465,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
struct scsi_cmnd *command;
struct scsi_cmnd *command_list;
int jafo = 0;
+ int bled;
/*
* Assumptions:
@@ -1384,7 +1490,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
* If a positive health, means in a known DEAD PANIC
* state and the adapter could be reset to `try again'.
*/
- retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
+ bled = forced ? 0 : aac_adapter_check_health(aac);
+ retval = aac_adapter_restart(aac, bled, reset_type);
if (retval)
goto out;
@@ -1494,11 +1601,12 @@ out:
return retval;
}
-int aac_reset_adapter(struct aac_dev * aac, int forced)
+int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
unsigned long flagv = 0;
int retval;
struct Scsi_Host * host;
+ int bled;
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
return -EBUSY;
@@ -1547,7 +1655,9 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
if (forced < 2)
aac_send_shutdown(aac);
spin_lock_irqsave(host->host_lock, flagv);
- retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
+ bled = forced ? forced :
+ (aac_check_reset != 0 && aac_check_reset != 1);
+ retval = _aac_reset_adapter(aac, bled, reset_type);
spin_unlock_irqrestore(host->host_lock, flagv);
if ((forced < 2) && (retval == -ENODEV)) {
@@ -1593,6 +1703,7 @@ int aac_check_health(struct aac_dev * aac)
unsigned long time_now, flagv = 0;
struct list_head * entry;
struct Scsi_Host * host;
+ int bled;
/* Extending the scope of fib_lock slightly to protect aac->in_reset */
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1710,7 +1821,8 @@ int aac_check_health(struct aac_dev * aac)
host = aac->scsi_host_ptr;
if (aac->thread->pid != current->pid)
spin_lock_irqsave(host->host_lock, flagv);
- BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
+ bled = aac_check_reset != 1 ? 1 : 0;
+ _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
if (aac->thread->pid != current->pid)
spin_unlock_irqrestore(host->host_lock, flagv);
return BlinkLED;
@@ -1721,6 +1833,552 @@ out:
}
+static void aac_resolve_luns(struct aac_dev *dev)
+{
+ int bus, target, channel;
+ struct scsi_device *sdev;
+ u8 devtype;
+ u8 new_devtype;
+
+ for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+ for (target = 0; target < AAC_MAX_TARGETS; target++) {
+
+ if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL)
+ continue;
+
+ if (bus == CONTAINER_CHANNEL)
+ channel = CONTAINER_CHANNEL;
+ else
+ channel = aac_phys_to_logical(bus);
+
+ devtype = dev->hba_map[bus][target].devtype;
+ new_devtype = dev->hba_map[bus][target].new_devtype;
+
+ sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
+ target, 0);
+
+ if (!sdev && devtype)
+ scsi_add_device(dev->scsi_host_ptr, channel,
+ target, 0);
+ else if (sdev && new_devtype != devtype)
+ scsi_remove_device(sdev);
+ else if (sdev && new_devtype == devtype)
+ scsi_rescan_device(&sdev->sdev_gendev);
+
+ if (sdev)
+ scsi_device_put(sdev);
+
+ dev->hba_map[bus][target].devtype = new_devtype;
+ }
+ }
+}
+
+/**
+ * aac_handle_sa_aif Handle a message from the firmware
+ * @dev: Which adapter this fib is from
+ * @fibptr: Pointer to fibptr from adapter
+ *
+ * This routine handles a driver notify fib from the adapter and
+ * dispatches it to the appropriate routine for handling.
+ */
+static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
+{
+ int i, bus, target, container, rcode = 0;
+ u32 events = 0;
+ struct fib *fib;
+ struct scsi_device *sdev;
+
+ if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
+ events = SA_AIF_HOTPLUG;
+ else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
+ events = SA_AIF_HARDWARE;
+ else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
+ events = SA_AIF_PDEV_CHANGE;
+ else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
+ events = SA_AIF_LDEV_CHANGE;
+ else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
+ events = SA_AIF_BPSTAT_CHANGE;
+ else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
+ events = SA_AIF_BPCFG_CHANGE;
+
+ switch (events) {
+ case SA_AIF_HOTPLUG:
+ case SA_AIF_HARDWARE:
+ case SA_AIF_PDEV_CHANGE:
+ case SA_AIF_LDEV_CHANGE:
+ case SA_AIF_BPCFG_CHANGE:
+
+ fib = aac_fib_alloc(dev);
+ if (!fib) {
+ pr_err("aac_handle_sa_aif: out of memory\n");
+ return;
+ }
+ for (bus = 0; bus < AAC_MAX_BUSES; bus++)
+ for (target = 0; target < AAC_MAX_TARGETS; target++)
+ dev->hba_map[bus][target].new_devtype = 0;
+
+ rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
+
+ if (rcode != -ERESTARTSYS)
+ aac_fib_free(fib);
+
+ aac_resolve_luns(dev);
+
+ if (events == SA_AIF_LDEV_CHANGE ||
+ events == SA_AIF_BPCFG_CHANGE) {
+ aac_get_containers(dev);
+ for (container = 0; container <
+ dev->maximum_num_containers; ++container) {
+ sdev = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_CHANNEL,
+ container, 0);
+ if (dev->fsa_dev[container].valid && !sdev) {
+ scsi_add_device(dev->scsi_host_ptr,
+ CONTAINER_CHANNEL,
+ container, 0);
+ } else if (!dev->fsa_dev[container].valid &&
+ sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else if (sdev) {
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ }
+ }
+ }
+ break;
+
+ case SA_AIF_BPSTAT_CHANGE:
+ /* currently do nothing */
+ break;
+ }
+
+ for (i = 1; i <= 10; ++i) {
+ events = src_readl(dev, MUnit.IDR);
+ if (events & (1<<23)) {
+ pr_warn(" AIF not cleared by firmware - %d/%d)\n",
+ i, 10);
+ ssleep(1);
+ }
+ }
+}
+
+static int get_fib_count(struct aac_dev *dev)
+{
+ unsigned int num = 0;
+ struct list_head *entry;
+ unsigned long flagv;
+
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock. We take the estimate
+ * and pre-allocate a set of fibs outside the
+ * lock.
+ */
+ num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
+ / sizeof(struct hw_fib); /* some extra */
+ spin_lock_irqsave(&dev->fib_lock, flagv);
+ entry = dev->fib_list.next;
+ while (entry != &dev->fib_list) {
+ entry = entry->next;
+ ++num;
+ }
+ spin_unlock_irqrestore(&dev->fib_lock, flagv);
+
+ return num;
+}
+
+static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
+ struct fib **fib_pool,
+ unsigned int num)
+{
+ struct hw_fib **hw_fib_p;
+ struct fib **fib_p;
+ int rcode = 1;
+
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
+ if (!(*(hw_fib_p++))) {
+ --hw_fib_p;
+ break;
+ }
+
+ *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
+ if (!(*(fib_p++))) {
+ kfree(*(--hw_fib_p));
+ break;
+ }
+ }
+
+ num = hw_fib_p - hw_fib_pool;
+ if (!num)
+ rcode = 0;
+
+ return rcode;
+}
+
+static void wakeup_fibctx_threads(struct aac_dev *dev,
+ struct hw_fib **hw_fib_pool,
+ struct fib **fib_pool,
+ struct fib *fib,
+ struct hw_fib *hw_fib,
+ unsigned int num)
+{
+ unsigned long flagv;
+ struct list_head *entry;
+ struct hw_fib **hw_fib_p;
+ struct fib **fib_p;
+ u32 time_now, time_last;
+ struct hw_fib *hw_newfib;
+ struct fib *newfib;
+ struct aac_fib_context *fibctx;
+
+ time_now = jiffies/HZ;
+ spin_lock_irqsave(&dev->fib_lock, flagv);
+ entry = dev->fib_list.next;
+ /*
+ * For each Context that is on the
+ * fibctxList, make a copy of the
+ * fib, and then set the event to wake up the
+ * thread that is waiting for it.
+ */
+
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (entry != &dev->fib_list) {
+ /*
+ * Extract the fibctx
+ */
+ fibctx = list_entry(entry, struct aac_fib_context,
+ next);
+ /*
+ * Check if the queue is getting
+ * backlogged
+ */
+ if (fibctx->count > 20) {
+ /*
+ * It's *not* jiffies folks,
+ * but jiffies / HZ so do not
+ * panic ...
+ */
+ time_last = fibctx->jiffies;
+ /*
+ * Has it been > 2 minutes
+ * since the last read off
+ * the queue?
+ */
+ if ((time_now - time_last) > aif_timeout) {
+ entry = entry->next;
+ aac_close_fib_context(dev, fibctx);
+ continue;
+ }
+ }
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock
+ */
+ if (hw_fib_p >= &hw_fib_pool[num]) {
+ pr_warn("aifd: didn't allocate NewFib\n");
+ entry = entry->next;
+ continue;
+ }
+
+ hw_newfib = *hw_fib_p;
+ *(hw_fib_p++) = NULL;
+ newfib = *fib_p;
+ *(fib_p++) = NULL;
+ /*
+ * Make the copy of the FIB
+ */
+ memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
+ memcpy(newfib, fib, sizeof(struct fib));
+ newfib->hw_fib_va = hw_newfib;
+ /*
+ * Put the FIB onto the
+ * fibctx's fibs
+ */
+ list_add_tail(&newfib->fiblink, &fibctx->fib_list);
+ fibctx->count++;
+ /*
+ * Set the event to wake up the
+ * thread that is waiting.
+ */
+ up(&fibctx->wait_sem);
+
+ entry = entry->next;
+ }
+ /*
+ * Set the status of this FIB
+ */
+ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+ aac_fib_adapter_complete(fib, sizeof(u32));
+ spin_unlock_irqrestore(&dev->fib_lock, flagv);
+
+}
+
+static void aac_process_events(struct aac_dev *dev)
+{
+ struct hw_fib *hw_fib;
+ struct fib *fib;
+ unsigned long flags;
+ spinlock_t *t_lock;
+ unsigned int rcode;
+
+ t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+ spin_lock_irqsave(t_lock, flags);
+
+ while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
+ struct list_head *entry;
+ struct aac_aifcmd *aifcmd;
+ unsigned int num;
+ struct hw_fib **hw_fib_pool, **hw_fib_p;
+ struct fib **fib_pool, **fib_p;
+
+ set_current_state(TASK_RUNNING);
+
+ entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
+ list_del(entry);
+
+ t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+ spin_unlock_irqrestore(t_lock, flags);
+
+ fib = list_entry(entry, struct fib, fiblink);
+ hw_fib = fib->hw_fib_va;
+ if (dev->sa_firmware) {
+ /* Thor AIF */
+ aac_handle_sa_aif(dev, fib);
+ aac_fib_adapter_complete(fib, (u16)sizeof(u32));
+ continue;
+ }
+ /*
+ * We will process the FIB here or pass it to a
+ * worker thread that is TBD. We Really can't
+ * do anything at this point since we don't have
+ * anything defined for this thread to do.
+ */
+ memset(fib, 0, sizeof(struct fib));
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
+ fib->data = hw_fib->data;
+ fib->dev = dev;
+ /*
+ * We only handle AifRequest fibs from the adapter.
+ */
+
+ aifcmd = (struct aac_aifcmd *) hw_fib->data;
+ if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
+ /* Handle Driver Notify Events */
+ aac_handle_aif(dev, fib);
+ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+ aac_fib_adapter_complete(fib, (u16)sizeof(u32));
+ goto free_fib;
+ }
+ /*
+ * The u32 here is important and intended. We are using
+ * 32bit wrapping time to fit the adapter field
+ */
+
+ /* Sniff events */
+ if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
+ || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
+ aac_handle_aif(dev, fib);
+ }
+
+ /*
+ * get number of fibs to process
+ */
+ num = get_fib_count(dev);
+ if (!num)
+ goto free_fib;
+
+ hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
+ GFP_KERNEL);
+ if (!hw_fib_pool)
+ goto free_fib;
+
+ fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
+ if (!fib_pool)
+ goto free_hw_fib_pool;
+
+ /*
+ * Fill up fib pointer pools with actual fibs
+ * and hw_fibs
+ */
+ rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num);
+ if (!rcode)
+ goto free_mem;
+
+ /*
+ * wakeup the thread that is waiting for
+ * the response from fw (ioctl)
+ */
+ wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
+ fib, hw_fib, num);
+
+free_mem:
+ /* Free up the remaining resources */
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ kfree(*hw_fib_p);
+ kfree(*fib_p);
+ ++fib_p;
+ ++hw_fib_p;
+ }
+ kfree(fib_pool);
+free_hw_fib_pool:
+ kfree(hw_fib_pool);
+free_fib:
+ kfree(fib);
+ t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+ spin_lock_irqsave(t_lock, flags);
+ }
+ /*
+ * There are no more AIF's
+ */
+ t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+ spin_unlock_irqrestore(t_lock, flags);
+}
+
+static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
+ u32 datasize)
+{
+ struct aac_srb *srbcmd;
+ struct sgmap64 *sg64;
+ dma_addr_t addr;
+ char *dma_buf;
+ struct fib *fibptr;
+ int ret = -ENOMEM;
+ u32 vbus, vid;
+
+ fibptr = aac_fib_alloc(dev);
+ if (!fibptr)
+ goto out;
+
+ dma_buf = pci_alloc_consistent(dev->pdev, datasize, &addr);
+ if (!dma_buf)
+ goto fib_free_out;
+
+ aac_fib_init(fibptr);
+
+ vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
+ vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
+
+ srbcmd = (struct aac_srb *)fib_data(fibptr);
+
+ srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srbcmd->channel = cpu_to_le32(vbus);
+ srbcmd->id = cpu_to_le32(vid);
+ srbcmd->lun = 0;
+ srbcmd->flags = cpu_to_le32(SRB_DataOut);
+ srbcmd->timeout = cpu_to_le32(10);
+ srbcmd->retry_limit = 0;
+ srbcmd->cdb_size = cpu_to_le32(12);
+ srbcmd->count = cpu_to_le32(datasize);
+
+ memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+ srbcmd->cdb[0] = BMIC_OUT;
+ srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
+ memcpy(dma_buf, (char *)wellness_str, datasize);
+
+ sg64 = (struct sgmap64 *)&srbcmd->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
+ sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+ sg64->sg[0].count = cpu_to_le32(datasize);
+
+ ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
+ FsaNormal, 1, 1, NULL, NULL);
+
+ pci_free_consistent(dev->pdev, datasize, (void *)dma_buf, addr);
+
+ /*
+ * Do not set XferState to zero unless
+ * receives a response from F/W
+ */
+ if (ret >= 0)
+ aac_fib_complete(fibptr);
+
+ /*
+ * FIB should be freed only after
+ * getting the response from the F/W
+ */
+ if (ret != -ERESTARTSYS)
+ goto fib_free_out;
+
+out:
+ return ret;
+fib_free_out:
+ aac_fib_free(fibptr);
+ goto out;
+}
+
+int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now)
+{
+ struct tm cur_tm;
+ char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
+ u32 datasize = sizeof(wellness_str);
+ unsigned long local_time;
+ int ret = -ENODEV;
+
+ if (!dev->sa_firmware)
+ goto out;
+
+ local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60));
+ time_to_tm(local_time, 0, &cur_tm);
+ cur_tm.tm_mon += 1;
+ cur_tm.tm_year += 1900;
+ wellness_str[8] = bin2bcd(cur_tm.tm_hour);
+ wellness_str[9] = bin2bcd(cur_tm.tm_min);
+ wellness_str[10] = bin2bcd(cur_tm.tm_sec);
+ wellness_str[12] = bin2bcd(cur_tm.tm_mon);
+ wellness_str[13] = bin2bcd(cur_tm.tm_mday);
+ wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
+ wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
+
+ ret = aac_send_wellness_command(dev, wellness_str, datasize);
+
+out:
+ return ret;
+}
+
+int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
+{
+ int ret = -ENOMEM;
+ struct fib *fibptr;
+ __le32 *info;
+
+ fibptr = aac_fib_alloc(dev);
+ if (!fibptr)
+ goto out;
+
+ aac_fib_init(fibptr);
+ info = (__le32 *)fib_data(fibptr);
+ *info = cpu_to_le32(now->tv_sec);
+ ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
+ 1, 1, NULL, NULL);
+
+ /*
+ * Do not set XferState to zero unless
+ * receives a response from F/W
+ */
+ if (ret >= 0)
+ aac_fib_complete(fibptr);
+
+ /*
+ * FIB should be freed only after
+ * getting the response from the F/W
+ */
+ if (ret != -ERESTARTSYS)
+ aac_fib_free(fibptr);
+
+out:
+ return ret;
+}
+
/**
* aac_command_thread - command processing thread
* @dev: Adapter to monitor
@@ -1734,10 +2392,6 @@ out:
int aac_command_thread(void *data)
{
struct aac_dev *dev = data;
- struct hw_fib *hw_fib, *hw_newfib;
- struct fib *fib, *newfib;
- struct aac_fib_context *fibctx;
- unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
unsigned long next_jiffies = jiffies + HZ;
unsigned long next_check_jiffies = next_jiffies;
@@ -1757,196 +2411,8 @@ int aac_command_thread(void *data)
set_current_state(TASK_INTERRUPTIBLE);
dprintk ((KERN_INFO "aac_command_thread start\n"));
while (1) {
- spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
- while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
- struct list_head *entry;
- struct aac_aifcmd * aifcmd;
-
- set_current_state(TASK_RUNNING);
- entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
- list_del(entry);
-
- spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
- fib = list_entry(entry, struct fib, fiblink);
- /*
- * We will process the FIB here or pass it to a
- * worker thread that is TBD. We Really can't
- * do anything at this point since we don't have
- * anything defined for this thread to do.
- */
- hw_fib = fib->hw_fib_va;
- memset(fib, 0, sizeof(struct fib));
- fib->type = FSAFS_NTC_FIB_CONTEXT;
- fib->size = sizeof(struct fib);
- fib->hw_fib_va = hw_fib;
- fib->data = hw_fib->data;
- fib->dev = dev;
- /*
- * We only handle AifRequest fibs from the adapter.
- */
- aifcmd = (struct aac_aifcmd *) hw_fib->data;
- if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
- /* Handle Driver Notify Events */
- aac_handle_aif(dev, fib);
- *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
- aac_fib_adapter_complete(fib, (u16)sizeof(u32));
- } else {
- /* The u32 here is important and intended. We are using
- 32bit wrapping time to fit the adapter field */
-
- u32 time_now, time_last;
- unsigned long flagv;
- unsigned num;
- struct hw_fib ** hw_fib_pool, ** hw_fib_p;
- struct fib ** fib_pool, ** fib_p;
-
- /* Sniff events */
- if ((aifcmd->command ==
- cpu_to_le32(AifCmdEventNotify)) ||
- (aifcmd->command ==
- cpu_to_le32(AifCmdJobProgress))) {
- aac_handle_aif(dev, fib);
- }
-
- time_now = jiffies/HZ;
-
- /*
- * Warning: no sleep allowed while
- * holding spinlock. We take the estimate
- * and pre-allocate a set of fibs outside the
- * lock.
- */
- num = le32_to_cpu(dev->init->AdapterFibsSize)
- / sizeof(struct hw_fib); /* some extra */
- spin_lock_irqsave(&dev->fib_lock, flagv);
- entry = dev->fib_list.next;
- while (entry != &dev->fib_list) {
- entry = entry->next;
- ++num;
- }
- spin_unlock_irqrestore(&dev->fib_lock, flagv);
- hw_fib_pool = NULL;
- fib_pool = NULL;
- if (num
- && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
- && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
- hw_fib_p = hw_fib_pool;
- fib_p = fib_pool;
- while (hw_fib_p < &hw_fib_pool[num]) {
- if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
- --hw_fib_p;
- break;
- }
- if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
- kfree(*(--hw_fib_p));
- break;
- }
- }
- if ((num = hw_fib_p - hw_fib_pool) == 0) {
- kfree(fib_pool);
- fib_pool = NULL;
- kfree(hw_fib_pool);
- hw_fib_pool = NULL;
- }
- } else {
- kfree(hw_fib_pool);
- hw_fib_pool = NULL;
- }
- spin_lock_irqsave(&dev->fib_lock, flagv);
- entry = dev->fib_list.next;
- /*
- * For each Context that is on the
- * fibctxList, make a copy of the
- * fib, and then set the event to wake up the
- * thread that is waiting for it.
- */
- hw_fib_p = hw_fib_pool;
- fib_p = fib_pool;
- while (entry != &dev->fib_list) {
- /*
- * Extract the fibctx
- */
- fibctx = list_entry(entry, struct aac_fib_context, next);
- /*
- * Check if the queue is getting
- * backlogged
- */
- if (fibctx->count > 20)
- {
- /*
- * It's *not* jiffies folks,
- * but jiffies / HZ so do not
- * panic ...
- */
- time_last = fibctx->jiffies;
- /*
- * Has it been > 2 minutes
- * since the last read off
- * the queue?
- */
- if ((time_now - time_last) > aif_timeout) {
- entry = entry->next;
- aac_close_fib_context(dev, fibctx);
- continue;
- }
- }
- /*
- * Warning: no sleep allowed while
- * holding spinlock
- */
- if (hw_fib_p < &hw_fib_pool[num]) {
- hw_newfib = *hw_fib_p;
- *(hw_fib_p++) = NULL;
- newfib = *fib_p;
- *(fib_p++) = NULL;
- /*
- * Make the copy of the FIB
- */
- memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
- memcpy(newfib, fib, sizeof(struct fib));
- newfib->hw_fib_va = hw_newfib;
- /*
- * Put the FIB onto the
- * fibctx's fibs
- */
- list_add_tail(&newfib->fiblink, &fibctx->fib_list);
- fibctx->count++;
- /*
- * Set the event to wake up the
- * thread that is waiting.
- */
- up(&fibctx->wait_sem);
- } else {
- printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
- }
- entry = entry->next;
- }
- /*
- * Set the status of this FIB
- */
- *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
- aac_fib_adapter_complete(fib, sizeof(u32));
- spin_unlock_irqrestore(&dev->fib_lock, flagv);
- /* Free up the remaining resources */
- hw_fib_p = hw_fib_pool;
- fib_p = fib_pool;
- while (hw_fib_p < &hw_fib_pool[num]) {
- kfree(*hw_fib_p);
- kfree(*fib_p);
- ++fib_p;
- ++hw_fib_p;
- }
- kfree(hw_fib_pool);
- kfree(fib_pool);
- }
- kfree(fib);
- spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
- }
- /*
- * There are no more AIF's
- */
- spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
+ aac_process_events(dev);
/*
* Background activity
@@ -1968,7 +2434,7 @@ int aac_command_thread(void *data)
/* Don't even try to talk to adapter if its sick */
ret = aac_check_health(dev);
- if (!ret && !dev->queues)
+ if (!dev->queues)
break;
next_check_jiffies = jiffies
+ ((long)(unsigned)check_interval)
@@ -1981,36 +2447,16 @@ int aac_command_thread(void *data)
difference = (((1000000 - now.tv_usec) * HZ)
+ 500000) / 1000000;
else if (ret == 0) {
- struct fib *fibptr;
-
- if ((fibptr = aac_fib_alloc(dev))) {
- int status;
- __le32 *info;
-
- aac_fib_init(fibptr);
-
- info = (__le32 *) fib_data(fibptr);
- if (now.tv_usec > 500000)
- ++now.tv_sec;
-
- *info = cpu_to_le32(now.tv_sec);
-
- status = aac_fib_send(SendHostTime,
- fibptr,
- sizeof(*info),
- FsaNormal,
- 1, 1,
- NULL,
- NULL);
- /* Do not set XferState to zero unless
- * receives a response from F/W */
- if (status >= 0)
- aac_fib_complete(fibptr);
- /* FIB should be freed only after
- * getting the response from the F/W */
- if (status != -ERESTARTSYS)
- aac_fib_free(fibptr);
- }
+
+ if (now.tv_usec > 500000)
+ ++now.tv_sec;
+
+ if (dev->sa_firmware)
+ ret =
+ aac_send_safw_hostttime(dev, &now);
+ else
+ ret = aac_send_hosttime(dev, &now);
+
difference = (long)(unsigned)update_interval*HZ;
} else {
/* retry shortly */
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 7e836205aef1..417ba349e10e 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -122,7 +123,6 @@ unsigned int aac_response_normal(struct aac_queue * q)
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
- fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
@@ -251,8 +251,9 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
BUG_ON(fibptr == NULL);
dev = fibptr->dev;
- if (fibptr->hw_fib_va->header.XferState &
- cpu_to_le32(NoMoreAifDataAvailable)) {
+ if ((fibptr->hw_fib_va->header.XferState &
+ cpu_to_le32(NoMoreAifDataAvailable)) ||
+ dev->sa_firmware) {
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
return;
@@ -282,8 +283,8 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting.
*/
-unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
- int isAif, int isFastResponse, struct hw_fib *aif_fib)
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
+ int isFastResponse, struct hw_fib *aif_fib)
{
unsigned long mflags;
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
@@ -305,12 +306,14 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
kfree (fib);
return 1;
}
- if (aif_fib != NULL) {
+ if (dev->sa_firmware) {
+ fib->hbacmd_size = index; /* store event type */
+ } else if (aif_fib != NULL) {
memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
} else {
- memcpy(hw_fib,
- (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
- index), sizeof(struct hw_fib));
+ memcpy(hw_fib, (struct hw_fib *)
+ (((uintptr_t)(dev->regs.sa)) + index),
+ sizeof(struct hw_fib));
}
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
@@ -344,7 +347,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
(fib_callback)aac_aif_callback, fibctx);
} else {
struct fib *fib = &dev->fibs[index];
- struct hw_fib * hwfib = fib->hw_fib_va;
+ int start_callback = 0;
/*
* Remove this fib from the Outstanding I/O queue.
@@ -362,60 +365,104 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
return 0;
}
- if (isFastResponse) {
- /*
- * Doctor the fib
- */
- *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
- hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
- fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
- }
-
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
- if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
- {
- __le32 *pstatus = (__le32 *)hwfib->data;
- if (*pstatus & cpu_to_le32(0xffff0000))
- *pstatus = cpu_to_le32(ST_OK);
- }
- if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
- {
- if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
- FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
- else
- FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
- /*
- * NOTE: we cannot touch the fib after this
- * call, because it may have been deallocated.
- */
- if (likely(fib->callback && fib->callback_data)) {
- fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
- fib->callback(fib->callback_data, fib);
- } else
- dev_info(&dev->pdev->dev,
- "Invalid callback_fib[%d] (*%p)(%p)\n",
- index, fib->callback, fib->callback_data);
+ if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
+
+ if (isFastResponse)
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+
+ if (fib->callback) {
+ start_callback = 1;
+ } else {
+ unsigned long flagv;
+ int complete = 0;
+
+ dprintk((KERN_INFO "event_wait up\n"));
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ if (fib->done == 2) {
+ fib->done = 1;
+ complete = 1;
+ } else {
+ fib->done = 1;
+ up(&fib->event_wait);
+ }
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock,
+ mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
+ if (complete)
+ aac_fib_complete(fib);
+ }
} else {
- unsigned long flagv;
- dprintk((KERN_INFO "event_wait up\n"));
- spin_lock_irqsave(&fib->event_lock, flagv);
- if (!fib->done) {
- fib->done = 1;
- up(&fib->event_wait);
+ struct hw_fib *hwfib = fib->hw_fib_va;
+
+ if (isFastResponse) {
+ /* Doctor the fib */
+ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+ hwfib->header.XferState |=
+ cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
}
- spin_unlock_irqrestore(&fib->event_lock, flagv);
- spin_lock_irqsave(&dev->manage_lock, mflags);
- dev->management_fib_count--;
- spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ if (hwfib->header.Command ==
+ cpu_to_le16(NuFileSystem)) {
+ __le32 *pstatus = (__le32 *)hwfib->data;
- FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
- if (fib->done == 2) {
+ if (*pstatus & cpu_to_le32(0xffff0000))
+ *pstatus = cpu_to_le32(ST_OK);
+ }
+ if (hwfib->header.XferState &
+ cpu_to_le32(NoResponseExpected | Async)) {
+ if (hwfib->header.XferState & cpu_to_le32(
+ NoResponseExpected))
+ FIB_COUNTER_INCREMENT(
+ aac_config.NoResponseRecved);
+ else
+ FIB_COUNTER_INCREMENT(
+ aac_config.AsyncRecved);
+ start_callback = 1;
+ } else {
+ unsigned long flagv;
+ int complete = 0;
+
+ dprintk((KERN_INFO "event_wait up\n"));
spin_lock_irqsave(&fib->event_lock, flagv);
- fib->done = 0;
+ if (fib->done == 2) {
+ fib->done = 1;
+ complete = 1;
+ } else {
+ fib->done = 1;
+ up(&fib->event_wait);
+ }
spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock,
+ mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (complete)
+ aac_fib_complete(fib);
+ }
+ }
+
+
+ if (start_callback) {
+ /*
+ * NOTE: we cannot touch the fib after this
+ * call, because it may have been deallocated.
+ */
+ if (likely(fib->callback && fib->callback_data)) {
+ fib->callback(fib->callback_data, fib);
+ } else {
aac_fib_complete(fib);
+ aac_fib_free(fib);
}
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3ecbf20ca29f..137d22d3a005 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,7 +58,7 @@
#include "aacraid.h"
-#define AAC_DRIVER_VERSION "1.2-1"
+#define AAC_DRIVER_VERSION "1.2.1"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
@@ -401,61 +402,89 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
static int aac_slave_configure(struct scsi_device *sdev)
{
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+ int chn, tid;
+ unsigned int depth = 0;
+ unsigned int set_timeout = 0;
+
+ chn = aac_logical_to_phys(sdev_channel(sdev));
+ tid = sdev_id(sdev);
+ if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
+ aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+ depth = aac->hba_map[chn][tid].qd_limit;
+ set_timeout = 1;
+ goto common_config;
+ }
+
+
if (aac->jbod && (sdev->type == TYPE_DISK))
sdev->removable = 1;
- if ((sdev->type == TYPE_DISK) &&
- (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
- (!aac->jbod || sdev->inq_periph_qual) &&
- (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
+
+ if (sdev->type == TYPE_DISK
+ && sdev_channel(sdev) != CONTAINER_CHANNEL
+ && (!aac->jbod || sdev->inq_periph_qual)
+ && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
+
if (expose_physicals == 0)
return -ENXIO;
+
if (expose_physicals < 0)
sdev->no_uld_attach = 1;
}
- if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
- (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
- !sdev->no_uld_attach) {
+
+ if (sdev->tagged_supported
+ && sdev->type == TYPE_DISK
+ && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
+ && !sdev->no_uld_attach) {
+
struct scsi_device * dev;
struct Scsi_Host *host = sdev->host;
unsigned num_lsu = 0;
unsigned num_one = 0;
- unsigned depth;
unsigned cid;
- /*
- * Firmware has an individual device recovery time typically
- * of 35 seconds, give us a margin.
- */
- if (sdev->request_queue->rq_timeout < (45 * HZ))
- blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+ set_timeout = 1;
+
for (cid = 0; cid < aac->maximum_num_containers; ++cid)
if (aac->fsa_dev[cid].valid)
++num_lsu;
+
__shost_for_each_device(dev, host) {
- if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
- (!aac->raid_scsi_mode ||
- (sdev_channel(sdev) != 2)) &&
- !dev->no_uld_attach) {
+ if (dev->tagged_supported
+ && dev->type == TYPE_DISK
+ && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
+ && !dev->no_uld_attach) {
if ((sdev_channel(dev) != CONTAINER_CHANNEL)
- || !aac->fsa_dev[sdev_id(dev)].valid)
+ || !aac->fsa_dev[sdev_id(dev)].valid) {
++num_lsu;
- } else
+ }
+ } else {
++num_one;
+ }
}
+
if (num_lsu == 0)
++num_lsu;
- depth = (host->can_queue - num_one) / num_lsu;
- if (depth > 256)
- depth = 256;
- else if (depth < 2)
- depth = 2;
- scsi_change_queue_depth(sdev, depth);
- } else {
- scsi_change_queue_depth(sdev, 1);
- sdev->tagged_supported = 1;
+ depth = (host->can_queue - num_one) / num_lsu;
}
+common_config:
+ /*
+ * Firmware has an individual device recovery time typically
+ * of 35 seconds, give us a margin.
+ */
+ if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
+ blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 1)
+ depth = 1;
+
+ scsi_change_queue_depth(sdev, depth);
+
+ sdev->tagged_supported = 1;
+
return 0;
}
@@ -470,6 +499,15 @@ static int aac_slave_configure(struct scsi_device *sdev)
static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
{
+ struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+ int chn, tid, is_native_device = 0;
+
+ chn = aac_logical_to_phys(sdev_channel(sdev));
+ tid = sdev_id(sdev);
+ if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
+ aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
+ is_native_device = 1;
+
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) == CONTAINER_CHANNEL)) {
struct scsi_device * dev;
@@ -491,9 +529,12 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
else if (depth < 2)
depth = 2;
return scsi_change_queue_depth(sdev, depth);
+ } else if (is_native_device) {
+ scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
+ } else {
+ scsi_change_queue_depth(sdev, 1);
}
-
- return scsi_change_queue_depth(sdev, 1);
+ return sdev->queue_depth;
}
static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
@@ -516,8 +557,39 @@ static struct device_attribute aac_raid_level_attr = {
.show = aac_show_raid_level
};
+static ssize_t aac_show_unique_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+ unsigned char sn[16];
+
+ memset(sn, 0, sizeof(sn));
+
+ if (sdev_channel(sdev) == CONTAINER_CHANNEL)
+ memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
+
+ return snprintf(buf, 16 * 2 + 2,
+ "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ sn[0], sn[1], sn[2], sn[3],
+ sn[4], sn[5], sn[6], sn[7],
+ sn[8], sn[9], sn[10], sn[11],
+ sn[12], sn[13], sn[14], sn[15]);
+}
+
+static struct device_attribute aac_unique_id_attr = {
+ .attr = {
+ .name = "unique_id",
+ .mode = 0444,
+ },
+ .show = aac_show_unique_id
+};
+
+
+
static struct device_attribute *aac_dev_attrs[] = {
&aac_raid_level_attr,
+ &aac_unique_id_attr,
NULL,
};
@@ -534,46 +606,136 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
- int count;
+ int count, found;
+ u32 bus, cid;
int ret = FAILED;
- printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n",
- AAC_DRIVERNAME,
- host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
- switch (cmd->cmnd[0]) {
- case SERVICE_ACTION_IN_16:
- if (!(aac->raw_io_interface) ||
- !(aac->raw_io_64) ||
- ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
- break;
- case INQUIRY:
- case READ_CAPACITY:
- /* Mark associated FIB to not complete, eh handler does this */
+ bus = aac_logical_to_phys(scmd_channel(cmd));
+ cid = scmd_id(cmd);
+ if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+ struct fib *fib;
+ struct aac_hba_tm_req *tmf;
+ int status;
+ u64 address;
+ __le32 managed_request_id;
+
+ pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
+ AAC_DRIVERNAME,
+ host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
+
+ found = 0;
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
- struct fib * fib = &aac->fibs[count];
- if (fib->hw_fib_va->header.XferState &&
- (fib->flags & FIB_CONTEXT_FLAG) &&
- (fib->callback_data == cmd)) {
- fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
- cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ fib = &aac->fibs[count];
+ if (*(u8 *)fib->hw_fib_va != 0 &&
+ (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
+ (fib->callback_data == cmd)) {
+ found = 1;
+ managed_request_id = ((struct aac_hba_cmd_req *)
+ fib->hw_fib_va)->request_id;
+ break;
+ }
+ }
+ if (!found)
+ return ret;
+
+ /* start a HBA_TMF_ABORT_TASK TMF request */
+ fib = aac_fib_alloc(aac);
+ if (!fib)
+ return ret;
+
+ tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->tmf = HBA_TMF_ABORT_TASK;
+ tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
+ tmf->lun[1] = cmd->device->lun;
+
+ address = (u64)fib->hw_error_pa;
+ tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+ tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+ tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+
+ fib->hbacmd_size = sizeof(*tmf);
+ cmd->SCp.sent_command = 0;
+
+ status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
+ (fib_callback) aac_hba_callback,
+ (void *) cmd);
+
+ /* Wait up to 2 minutes for completion */
+ for (count = 0; count < 120; ++count) {
+ if (cmd->SCp.sent_command) {
ret = SUCCESS;
+ break;
}
+ msleep(1000);
}
- break;
- case TEST_UNIT_READY:
- /* Mark associated FIB to not complete, eh handler does this */
- for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
- struct scsi_cmnd * command;
- struct fib * fib = &aac->fibs[count];
- if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
- (fib->flags & FIB_CONTEXT_FLAG) &&
- ((command = fib->callback_data)) &&
- (command->device == cmd->device)) {
- fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
- command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
- if (command == cmd)
+
+ if (ret != SUCCESS)
+ pr_err("%s: Host adapter abort request timed out\n",
+ AAC_DRIVERNAME);
+ } else {
+ pr_err(
+ "%s: Host adapter abort request.\n"
+ "%s: Outstanding commands on (%d,%d,%d,%d):\n",
+ AAC_DRIVERNAME, AAC_DRIVERNAME,
+ host->host_no, sdev_channel(dev), sdev_id(dev),
+ (int)dev->lun);
+ switch (cmd->cmnd[0]) {
+ case SERVICE_ACTION_IN_16:
+ if (!(aac->raw_io_interface) ||
+ !(aac->raw_io_64) ||
+ ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ case INQUIRY:
+ case READ_CAPACITY:
+ /*
+ * Mark associated FIB to not complete,
+ * eh handler does this
+ */
+ for (count = 0;
+ count < (host->can_queue + AAC_NUM_MGT_FIB);
+ ++count) {
+ struct fib *fib = &aac->fibs[count];
+
+ if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ (fib->callback_data == cmd)) {
+ fib->flags |=
+ FIB_CONTEXT_FLAG_TIMED_OUT;
+ cmd->SCp.phase =
+ AAC_OWNER_ERROR_HANDLER;
ret = SUCCESS;
+ }
+ }
+ break;
+ case TEST_UNIT_READY:
+ /*
+ * Mark associated FIB to not complete,
+ * eh handler does this
+ */
+ for (count = 0;
+ count < (host->can_queue + AAC_NUM_MGT_FIB);
+ ++count) {
+ struct scsi_cmnd *command;
+ struct fib *fib = &aac->fibs[count];
+
+ command = fib->callback_data;
+
+ if ((fib->hw_fib_va->header.XferState &
+ cpu_to_le32
+ (Async | NoResponseExpected)) &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ ((command)) &&
+ (command->device == cmd->device)) {
+ fib->flags |=
+ FIB_CONTEXT_FLAG_TIMED_OUT;
+ command->SCp.phase =
+ AAC_OWNER_ERROR_HANDLER;
+ if (command == cmd)
+ ret = SUCCESS;
+ }
}
+ break;
}
}
return ret;
@@ -588,70 +750,165 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
- struct scsi_cmnd * command;
- int count;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
- unsigned long flags;
-
- /* Mark the associated FIB to not complete, eh handler does this */
- for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
- struct fib * fib = &aac->fibs[count];
- if (fib->hw_fib_va->header.XferState &&
- (fib->flags & FIB_CONTEXT_FLAG) &&
- (fib->callback_data == cmd)) {
- fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
- cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ int count;
+ u32 bus, cid;
+ int ret = FAILED;
+
+ bus = aac_logical_to_phys(scmd_channel(cmd));
+ cid = scmd_id(cmd);
+ if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
+ aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+ struct fib *fib;
+ int status;
+ u64 address;
+ u8 command;
+
+ pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ AAC_DRIVERNAME);
+
+ fib = aac_fib_alloc(aac);
+ if (!fib)
+ return ret;
+
+
+ if (aac->hba_map[bus][cid].reset_state == 0) {
+ struct aac_hba_tm_req *tmf;
+
+ /* start a HBA_TMF_LUN_RESET TMF request */
+ tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->tmf = HBA_TMF_LUN_RESET;
+ tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
+ tmf->lun[1] = cmd->device->lun;
+
+ address = (u64)fib->hw_error_pa;
+ tmf->error_ptr_hi = cpu_to_le32
+ ((u32)(address >> 32));
+ tmf->error_ptr_lo = cpu_to_le32
+ ((u32)(address & 0xffffffff));
+ tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+ fib->hbacmd_size = sizeof(*tmf);
+
+ command = HBA_IU_TYPE_SCSI_TM_REQ;
+ aac->hba_map[bus][cid].reset_state++;
+ } else if (aac->hba_map[bus][cid].reset_state >= 1) {
+ struct aac_hba_reset_req *rst;
+
+ /* already tried, start a hard reset now */
+ rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
+ memset(rst, 0, sizeof(*rst));
+ /* reset_type is already zero... */
+ rst->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
+
+ address = (u64)fib->hw_error_pa;
+ rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+ rst->error_ptr_lo = cpu_to_le32
+ ((u32)(address & 0xffffffff));
+ rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+ fib->hbacmd_size = sizeof(*rst);
+
+ command = HBA_IU_TYPE_SATA_REQ;
+ aac->hba_map[bus][cid].reset_state = 0;
}
- }
- printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
- AAC_DRIVERNAME);
+ cmd->SCp.sent_command = 0;
- if ((count = aac_check_health(aac)))
- return count;
- /*
- * Wait for all commands to complete to this specific
- * target (block maximum 60 seconds).
- */
- for (count = 60; count; --count) {
- int active = aac->in_reset;
+ status = aac_hba_send(command, fib,
+ (fib_callback) aac_hba_callback,
+ (void *) cmd);
- if (active == 0)
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flags);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if ((command != cmd) &&
- (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flags);
- if (active)
+ /* Wait up to 2 minutes for completion */
+ for (count = 0; count < 120; ++count) {
+ if (cmd->SCp.sent_command) {
+ ret = SUCCESS;
break;
+ }
+ msleep(1000);
+ }
+ if (ret != SUCCESS)
+ pr_err("%s: Host adapter reset request timed out\n",
+ AAC_DRIVERNAME);
+ } else {
+ struct scsi_cmnd *command;
+ unsigned long flags;
+
+ /* Mark the assoc. FIB to not complete, eh handler does this */
+ for (count = 0;
+ count < (host->can_queue + AAC_NUM_MGT_FIB);
+ ++count) {
+ struct fib *fib = &aac->fibs[count];
+
+ if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ (fib->callback_data == cmd)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ }
}
+
+ pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ AAC_DRIVERNAME);
+
+ count = aac_check_health(aac);
+ if (count)
+ return count;
/*
- * We can exit If all the commands are complete
+ * Wait for all commands to complete to this specific
+ * target (block maximum 60 seconds).
*/
- if (active == 0)
- return SUCCESS;
- ssleep(1);
+ for (count = 60; count; --count) {
+ int active = aac->in_reset;
+
+ if (active == 0)
+ __shost_for_each_device(dev, host) {
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_for_each_entry(command, &dev->cmd_list,
+ list) {
+ if ((command != cmd) &&
+ (command->SCp.phase ==
+ AAC_OWNER_FIRMWARE)) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flags);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ return SUCCESS;
+ ssleep(1);
+ }
+ pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+
+ /*
+ * This adapter needs a blind reset, only do so for
+ * Adapters that support a register, instead of a commanded,
+ * reset.
+ */
+ if (((aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_MU_RESET) ||
+ (aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) &&
+ aac_check_reset &&
+ ((aac_check_reset != 1) ||
+ !(aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET))) {
+ /* Bypass wait for command quiesce */
+ aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
+ }
+ ret = SUCCESS;
}
- printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
/*
- * This adapter needs a blind reset, only do so for Adapters that
- * support a register, instead of a commanded, reset.
+ * Cause an immediate retry of the command with a ten second delay
+ * after successful tur
*/
- if (((aac->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_MU_RESET) ||
- (aac->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_DOORBELL_RESET)) &&
- aac_check_reset &&
- ((aac_check_reset != 1) ||
- !(aac->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_IGNORE_RESET)))
- aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
- return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
+ return ret;
}
/**
@@ -911,10 +1168,16 @@ static ssize_t aac_store_reset_adapter(struct device *device,
const char *buf, size_t count)
{
int retval = -EACCES;
+ int bled = 0;
+ struct aac_dev *aac;
+
if (!capable(CAP_SYS_ADMIN))
return retval;
- retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
+
+ aac = (struct aac_dev *)class_to_shost(device)->hostdata;
+ bled = buf[0] == '!' ? 1:0;
+ retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
if (retval >= 0)
retval = count;
return retval;
@@ -1070,6 +1333,7 @@ static void __aac_shutdown(struct aac_dev * aac)
{
int i;
+ aac->adapter_shutdown = 1;
aac_send_shutdown(aac);
if (aac->aif_thread) {
@@ -1285,7 +1549,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
shost->this_id = shost->max_id;
- if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
aac_intr_normal(aac, 0, 2, 0, NULL);
/*
@@ -1327,35 +1591,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
static void aac_release_resources(struct aac_dev *aac)
{
- int i;
-
aac_adapter_disable_int(aac);
- if (aac->pdev->device == PMC_DEVICE_S6 ||
- aac->pdev->device == PMC_DEVICE_S7 ||
- aac->pdev->device == PMC_DEVICE_S8 ||
- aac->pdev->device == PMC_DEVICE_S9) {
- if (aac->max_msix > 1) {
- for (i = 0; i < aac->max_msix; i++)
- free_irq(pci_irq_vector(aac->pdev, i),
- &(aac->aac_msix[i]));
- } else {
- free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
- }
- } else {
- free_irq(aac->pdev->irq, aac);
- }
- if (aac->msi)
- pci_disable_msi(aac->pdev);
- else if (aac->max_msix > 1)
- pci_disable_msix(aac->pdev);
-
+ aac_free_irq(aac);
}
static int aac_acquire_resources(struct aac_dev *dev)
{
- int i, j;
- int instance = dev->id;
- const char *name = dev->name;
unsigned long status;
/*
* First clear out all interrupts. Then enable the one's that we
@@ -1377,37 +1618,8 @@ static int aac_acquire_resources(struct aac_dev *dev)
if (dev->msi_enabled)
aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
- if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
- for (i = 0; i < dev->max_msix; i++) {
- dev->aac_msix[i].vector_no = i;
- dev->aac_msix[i].dev = dev;
-
- if (request_irq(pci_irq_vector(dev->pdev, i),
- dev->a_ops.adapter_intr,
- 0, "aacraid", &(dev->aac_msix[i]))) {
- printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
- name, instance, i);
- for (j = 0 ; j < i ; j++)
- free_irq(pci_irq_vector(dev->pdev, j),
- &(dev->aac_msix[j]));
- pci_disable_msix(dev->pdev);
- goto error_iounmap;
- }
- }
- } else {
- dev->aac_msix[0].vector_no = 0;
- dev->aac_msix[0].dev = dev;
-
- if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED, "aacraid",
- &(dev->aac_msix[0])) < 0) {
- if (dev->msi)
- pci_disable_msi(dev->pdev);
- printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
- name, instance);
- goto error_iounmap;
- }
- }
+ if (aac_acquire_irq(dev))
+ goto error_iounmap;
aac_adapter_enable_int(dev);
@@ -1420,7 +1632,7 @@ static int aac_acquire_resources(struct aac_dev *dev)
/* After EEH recovery or suspend resume, max_msix count
* may change, therfore updating in init as well.
*/
- dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
aac_adapter_start(dev);
}
return 0;
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index 6c53b1d8b2ba..c59074e782d6 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -5,7 +5,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 7d8013feedde..a1bc5bbf7a34 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,7 +61,7 @@ static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
* case warrants this half baked, but convenient, check here.
*/
if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
- dev->init->MaxIoCommands =
+ dev->init->r7.max_io_commands =
cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ac1638069335..0e69a80c3275 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -315,10 +316,10 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
static void aac_rx_start_adapter(struct aac_dev *dev)
{
- struct aac_init *init;
+ union aac_init *init;
init = dev->init;
- init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
@@ -470,7 +471,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
return 0;
}
-static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
+static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
{
u32 var = 0;
@@ -559,7 +560,7 @@ int _aac_rx_init(struct aac_dev *dev)
dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
- !aac_rx_restart_adapter(dev, 0))
+ !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
/* Make sure the Hardware FIFO is empty */
while ((++restart < 512) &&
(rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
@@ -568,7 +569,8 @@ int _aac_rx_init(struct aac_dev *dev)
*/
status = rx_readl(dev, MUnit.OMRx[0]);
if (status & KERNEL_PANIC) {
- if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
+ if (aac_rx_restart_adapter(dev,
+ aac_rx_check_health(dev), IOP_HWSOFT_RESET))
goto error_iounmap;
++restart;
}
@@ -606,7 +608,8 @@ int _aac_rx_init(struct aac_dev *dev)
((startup_timeout > 60)
? (startup_timeout - 60)
: (startup_timeout / 2))))) {
- if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
+ if (likely(!aac_rx_restart_adapter(dev,
+ aac_rx_check_health(dev), IOP_HWSOFT_RESET)))
start = jiffies;
++restart;
}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 869aea23c041..553922fed524 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -245,19 +246,19 @@ static void aac_sa_interrupt_adapter (struct aac_dev *dev)
static void aac_sa_start_adapter(struct aac_dev *dev)
{
- struct aac_init *init;
+ union aac_init *init;
/*
* Fill in the remaining pieces of the init.
*/
init = dev->init;
- init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
-static int aac_sa_restart_adapter(struct aac_dev *dev, int bled)
+static int aac_sa_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
{
return -EINVAL;
}
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 0c453880f214..8e4e2ddbafd7 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -6,7 +6,8 @@
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
- * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -135,8 +136,16 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
if (mode & AAC_INT_MODE_AIF) {
/* handle AIF */
- if (dev->aif_thread && dev->fsa_dev)
- aac_intr_normal(dev, 0, 2, 0, NULL);
+ if (dev->sa_firmware) {
+ u32 events = src_readl(dev, MUnit.SCR0);
+
+ aac_intr_normal(dev, events, 1, 0, NULL);
+ writel(events, &dev->IndexRegs->Mailbox[0]);
+ src_writel(dev, MUnit.IDR, 1 << 23);
+ } else {
+ if (dev->aif_thread && dev->fsa_dev)
+ aac_intr_normal(dev, 0, 2, 0, NULL);
+ }
if (dev->msi_enabled)
aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
mode = 0;
@@ -148,17 +157,19 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
for (;;) {
isFastResponse = 0;
/* remove toggle bit (31) */
- handle = (dev->host_rrq[index] & 0x7fffffff);
- /* check fast response bit (30) */
+ handle = le32_to_cpu((dev->host_rrq[index])
+ & 0x7fffffff);
+ /* check fast response bits (30, 1) */
if (handle & 0x40000000)
isFastResponse = 1;
handle &= 0x0000ffff;
if (handle == 0)
break;
+ handle >>= 2;
if (dev->msi_enabled && dev->max_msix > 1)
atomic_dec(&dev->rrq_outstanding[vector_no]);
+ aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
dev->host_rrq[index++] = 0;
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
if (index == (vector_no + 1) * dev->vector_cap)
index = vector_no * dev->vector_cap;
dev->host_rrq_idx[vector_no] = index;
@@ -384,7 +395,7 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
static void aac_src_start_adapter(struct aac_dev *dev)
{
- struct aac_init *init;
+ union aac_init *init;
int i;
/* reset host_rrq_idx first */
@@ -392,14 +403,26 @@ static void aac_src_start_adapter(struct aac_dev *dev)
dev->host_rrq_idx[i] = i * dev->vector_cap;
atomic_set(&dev->rrq_outstanding[i], 0);
}
+ atomic_set(&dev->msix_counter, 0);
dev->fibs_pushed_no = 0;
init = dev->init;
- init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+ init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
+ lower_32_bits(dev->init_pa),
+ upper_32_bits(dev->init_pa),
+ sizeof(struct _r8) +
+ (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
+ 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+ } else {
+ init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ // We can only use a 32 bit address here
+ src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
+ (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ }
- /* We can only use a 32 bit address here */
- src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
- 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
}
/**
@@ -435,6 +458,11 @@ static int aac_src_check_health(struct aac_dev *dev)
return 0;
}
+static inline u32 aac_get_vector(struct aac_dev *dev)
+{
+ return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
+}
+
/**
* aac_src_deliver_message
* @fib: fib to issue
@@ -448,66 +476,125 @@ static int aac_src_deliver_message(struct fib *fib)
u32 fibsize;
dma_addr_t address;
struct aac_fib_xporthdr *pFibX;
+ int native_hba;
#if !defined(writeq)
unsigned long flags;
#endif
- u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
u16 vector_no;
atomic_inc(&q->numpending);
- if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
- dev->max_msix > 1) {
- vector_no = fib->vector_no;
- fib->hw_fib_va->header.Handle += (vector_no << 16);
+ native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
+
+
+ if (dev->msi_enabled && dev->max_msix > 1 &&
+ (native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
+
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+ && dev->sa_firmware)
+ vector_no = aac_get_vector(dev);
+ else
+ vector_no = fib->vector_no;
+
+ if (native_hba) {
+ if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+ struct aac_hba_tm_req *tm_req;
+
+ tm_req = (struct aac_hba_tm_req *)
+ fib->hw_fib_va;
+ if (tm_req->iu_type ==
+ HBA_IU_TYPE_SCSI_TM_REQ) {
+ ((struct aac_hba_tm_req *)
+ fib->hw_fib_va)->reply_qid
+ = vector_no;
+ ((struct aac_hba_tm_req *)
+ fib->hw_fib_va)->request_id
+ += (vector_no << 16);
+ } else {
+ ((struct aac_hba_reset_req *)
+ fib->hw_fib_va)->reply_qid
+ = vector_no;
+ ((struct aac_hba_reset_req *)
+ fib->hw_fib_va)->request_id
+ += (vector_no << 16);
+ }
+ } else {
+ ((struct aac_hba_cmd_req *)
+ fib->hw_fib_va)->reply_qid
+ = vector_no;
+ ((struct aac_hba_cmd_req *)
+ fib->hw_fib_va)->request_id
+ += (vector_no << 16);
+ }
+ } else {
+ fib->hw_fib_va->header.Handle += (vector_no << 16);
+ }
} else {
vector_no = 0;
}
atomic_inc(&dev->rrq_outstanding[vector_no]);
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
- /* Calculate the amount to the fibsize bits */
- fibsize = (hdr_size + 127) / 128 - 1;
- if (fibsize > (ALIGN32 - 1))
- return -EMSGSIZE;
- /* New FIB header, 32-bit */
+ if (native_hba) {
address = fib->hw_fib_pa;
- fib->hw_fib_va->header.StructType = FIB_MAGIC2;
- fib->hw_fib_va->header.SenderFibAddress = (u32)address;
- fib->hw_fib_va->header.u.TimeStamp = 0;
- BUG_ON(upper_32_bits(address) != 0L);
+ fibsize = (fib->hbacmd_size + 127) / 128 - 1;
+ if (fibsize > 31)
+ fibsize = 31;
address |= fibsize;
+#if defined(writeq)
+ src_writeq(dev, MUnit.IQN_L, (u64)address);
+#else
+ spin_lock_irqsave(&fib->dev->iq_lock, flags);
+ src_writel(dev, MUnit.IQN_H,
+ upper_32_bits(address) & 0xffffffff);
+ src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
+ spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
+#endif
} else {
- /* Calculate the amount to the fibsize bits */
- fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
- if (fibsize > (ALIGN32 - 1))
- return -EMSGSIZE;
-
- /* Fill XPORT header */
- pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
- pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
- pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
- pFibX->Size = cpu_to_le32(hdr_size);
-
- /*
- * The xport header has been 32-byte aligned for us so that fibsize
- * can be masked out of this address by hardware. -- BenC
- */
- address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
- if (address & (ALIGN32 - 1))
- return -EINVAL;
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
+ + 127) / 128 - 1;
+ /* New FIB header, 32-bit */
+ address = fib->hw_fib_pa;
+ fib->hw_fib_va->header.StructType = FIB_MAGIC2;
+ fib->hw_fib_va->header.SenderFibAddress =
+ cpu_to_le32((u32)address);
+ fib->hw_fib_va->header.u.TimeStamp = 0;
+ WARN_ON(upper_32_bits(address) != 0L);
+ } else {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (sizeof(struct aac_fib_xporthdr) +
+ le16_to_cpu(fib->hw_fib_va->header.Size)
+ + 127) / 128 - 1;
+ /* Fill XPORT header */
+ pFibX = (struct aac_fib_xporthdr *)
+ ((unsigned char *)fib->hw_fib_va -
+ sizeof(struct aac_fib_xporthdr));
+ pFibX->Handle = fib->hw_fib_va->header.Handle;
+ pFibX->HostAddress =
+ cpu_to_le64((u64)fib->hw_fib_pa);
+ pFibX->Size = cpu_to_le32(
+ le16_to_cpu(fib->hw_fib_va->header.Size));
+ address = fib->hw_fib_pa -
+ (u64)sizeof(struct aac_fib_xporthdr);
+ }
+ if (fibsize > 31)
+ fibsize = 31;
address |= fibsize;
- }
+
#if defined(writeq)
- src_writeq(dev, MUnit.IQ_L, (u64)address);
+ src_writeq(dev, MUnit.IQ_L, (u64)address);
#else
- spin_lock_irqsave(&fib->dev->iq_lock, flags);
- src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
- src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
- spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
+ spin_lock_irqsave(&fib->dev->iq_lock, flags);
+ src_writel(dev, MUnit.IQ_H,
+ upper_32_bits(address) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
+ spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
#endif
+ }
return 0;
}
@@ -553,52 +640,117 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
dev->base = dev->regs.src.bar0 = NULL;
return 0;
}
+
+ dev->regs.src.bar1 =
+ ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
+ dev->base = NULL;
+ if (dev->regs.src.bar1 == NULL)
+ return -1;
dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
- if (dev->base == NULL)
+ if (dev->base == NULL) {
+ iounmap(dev->regs.src.bar1);
+ dev->regs.src.bar1 = NULL;
return -1;
+ }
dev->IndexRegs = &((struct src_registers __iomem *)
dev->base)->u.denali.IndexRegs;
return 0;
}
-static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
+static void aac_set_intx_mode(struct aac_dev *dev)
+{
+ if (dev->msi_enabled) {
+ aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+ dev->msi_enabled = 0;
+ msleep(5000); /* Delay 5 seconds */
+ }
+}
+
+static void aac_send_iop_reset(struct aac_dev *dev, int bled)
{
u32 var, reset_mask;
- if (bled >= 0) {
- if (bled)
- printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var,
+ &reset_mask, NULL, NULL, NULL);
+
+ if ((bled || var != 0x00000001) && !dev->doorbell_mask)
+ bled = -EINVAL;
+ else if (dev->doorbell_mask) {
+ reset_mask = dev->doorbell_mask;
+ bled = 0;
+ var = 0x00000001;
+ }
+
+ aac_set_intx_mode(dev);
+
+ if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) {
+ src_writel(dev, MUnit.IDR, reset_mask);
+ } else {
+ src_writel(dev, MUnit.IDR, 0x100);
+ }
+ msleep(30000);
+}
+
+static void aac_send_hardware_soft_reset(struct aac_dev *dev)
+{
+ u_int32_t val;
+
+ val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
+ val |= 0x01;
+ writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
+ msleep_interruptible(20000);
+}
+
+static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
+{
+ unsigned long status, start;
+
+ if (bled < 0)
+ goto invalid_out;
+
+ if (bled)
+ pr_err("%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
- dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
- bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
- 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
- if ((bled || (var != 0x00000001)) &&
- !dev->doorbell_mask)
- return -EINVAL;
- else if (dev->doorbell_mask) {
- reset_mask = dev->doorbell_mask;
- bled = 0;
- var = 0x00000001;
- }
- if ((dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) {
- aac_src_access_devreg(dev, AAC_ENABLE_INTX);
- dev->msi_enabled = 0;
- msleep(5000); /* Delay 5 seconds */
- }
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
- if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_DOORBELL_RESET)) {
- src_writel(dev, MUnit.IDR, reset_mask);
- ssleep(45);
- } else {
- src_writel(dev, MUnit.IDR, 0x100);
- ssleep(45);
+ switch (reset_type) {
+ case IOP_HWSOFT_RESET:
+ aac_send_iop_reset(dev, bled);
+ /*
+ * Check to see if KERNEL_UP_AND_RUNNING
+ * Wait for the adapter to be up and running.
+ * If !KERNEL_UP_AND_RUNNING issue HW Soft Reset
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (dev->sa_firmware
+ && !(status & KERNEL_UP_AND_RUNNING)) {
+ start = jiffies;
+ do {
+ status = src_readl(dev, MUnit.OMR);
+ if (time_after(jiffies,
+ start+HZ*SOFT_RESET_TIME)) {
+ aac_send_hardware_soft_reset(dev);
+ start = jiffies;
+ }
+ } while (!(status & KERNEL_UP_AND_RUNNING));
}
+ break;
+ case HW_SOFT_RESET:
+ if (dev->sa_firmware) {
+ aac_send_hardware_soft_reset(dev);
+ aac_set_intx_mode(dev);
+ }
+ break;
+ default:
+ aac_send_iop_reset(dev, bled);
+ break;
}
+invalid_out:
+
if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
return -ENODEV;
@@ -653,14 +805,15 @@ int aac_src_init(struct aac_dev *dev)
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
if ((aac_reset_devices || reset_devices) &&
- !aac_src_restart_adapter(dev, 0))
+ !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
++restart;
/*
* Check to see if the board panic'd while booting.
*/
status = src_readl(dev, MUnit.OMR);
if (status & KERNEL_PANIC) {
- if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ if (aac_src_restart_adapter(dev,
+ aac_src_check_health(dev), IOP_HWSOFT_RESET))
goto error_iounmap;
++restart;
}
@@ -701,7 +854,7 @@ int aac_src_init(struct aac_dev *dev)
? (startup_timeout - 60)
: (startup_timeout / 2))))) {
if (likely(!aac_src_restart_adapter(dev,
- aac_src_check_health(dev))))
+ aac_src_check_health(dev), IOP_HWSOFT_RESET)))
start = jiffies;
++restart;
}
@@ -798,7 +951,7 @@ int aac_srcv_init(struct aac_dev *dev)
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
if ((aac_reset_devices || reset_devices) &&
- !aac_src_restart_adapter(dev, 0))
+ !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
++restart;
/*
* Check to see if flash update is running.
@@ -827,7 +980,8 @@ int aac_srcv_init(struct aac_dev *dev)
*/
status = src_readl(dev, MUnit.OMR);
if (status & KERNEL_PANIC) {
- if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ if (aac_src_restart_adapter(dev,
+ aac_src_check_health(dev), IOP_HWSOFT_RESET))
goto error_iounmap;
++restart;
}
@@ -866,7 +1020,8 @@ int aac_srcv_init(struct aac_dev *dev)
((startup_timeout > 60)
? (startup_timeout - 60)
: (startup_timeout / 2))))) {
- if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
+ if (likely(!aac_src_restart_adapter(dev,
+ aac_src_check_health(dev), IOP_HWSOFT_RESET)))
start = jiffies;
++restart;
}
@@ -897,7 +1052,8 @@ int aac_srcv_init(struct aac_dev *dev)
if (aac_init_adapter(dev) == NULL)
goto error_iounmap;
- if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
+ if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
+ (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
goto error_iounmap;
if (dev->msi_enabled)
aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
@@ -905,9 +1061,9 @@ int aac_srcv_init(struct aac_dev *dev)
if (aac_acquire_irq(dev))
goto error_iounmap;
- dev->dbg_base = dev->base_start;
- dev->dbg_base_mapped = dev->base;
- dev->dbg_size = dev->base_size;
+ dev->dbg_base = pci_resource_start(dev->pdev, 2);
+ dev->dbg_base_mapped = dev->regs.src.bar1;
+ dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
aac_adapter_enable_int(dev);
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 105b35393ce9..f792420c533e 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -178,37 +178,6 @@ static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
}
-#if 0
-/* Dead code... wasn't called anyway :-) and causes some trouble, because at
- * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has
- * to clear the DMA int pending bit before it allows other level 6 interrupts.
- */
-static void scsi_dma_buserr(int irq, void *dummy)
-{
- unsigned char dma_stat = tt_scsi_dma.dma_ctrl;
-
- /* Don't do anything if a NCR interrupt is pending. Probably it's just
- * masked... */
- if (atari_irq_pending(IRQ_TT_MFP_SCSI))
- return;
-
- printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n",
- SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt));
- if (dma_stat & 0x80) {
- if (!scsi_dma_is_ignored_buserr(dma_stat))
- printk("SCSI DMA bus error -- bad DMA programming!\n");
- } else {
- /* Under normal circumstances we never should get to this point,
- * since both interrupts are triggered simultaneously and the 5380
- * int has higher priority. When this irq is handled, that DMA
- * interrupt is cleared. So a warning message is printed here.
- */
- printk("SCSI DMA intr ?? -- this shouldn't happen!\n");
- }
-}
-#endif
-
-
static irqreturn_t scsi_tt_intr(int irq, void *dev)
{
struct Scsi_Host *instance = dev;
@@ -713,7 +682,8 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
if (IS_A_TT()) {
tt_scsi_dma.dma_ctrl = 0;
} else {
- st_dma.dma_mode_status = 0x90;
+ if (stdma_is_locked_by(scsi_falcon_intr))
+ st_dma.dma_mode_status = 0x90;
atari_dma_active = 0;
atari_dma_orig_addr = NULL;
}
@@ -813,7 +783,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
return -ENOMEM;
}
atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
- atari_dma_orig_addr = 0;
+ atari_dma_orig_addr = NULL;
}
instance = scsi_host_alloc(&atari_scsi_template,
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index b1d0fdc5d5e1..ca9440fb2325 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -84,7 +84,6 @@ static inline void queue_tail_inc(struct be_queue_info *q)
/*ISCSI */
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
- bool enable;
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
@@ -94,8 +93,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
};
struct be_eq_obj {
- bool todo_mcc_cq;
- bool todo_cq;
u32 cq_count;
struct be_queue_info q;
struct beiscsi_hba *phba;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index be65da2988fb..5d59e2630ce6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -676,10 +676,10 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
bool embedded, u8 sge_cnt)
{
if (embedded)
- wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
+ wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
else
- wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
- MCC_WRB_SGE_CNT_SHIFT;
+ wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
+ MCC_WRB_SGE_CNT_SHIFT;
wrb->payload_length = payload_len;
be_dws_cpu_to_le(wrb, 8);
}
@@ -1599,7 +1599,7 @@ int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+ struct be_post_sgl_pages_req *req;
int status;
mutex_lock(&ctrl->mbox_lock);
@@ -1700,31 +1700,34 @@ int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
struct be_ctrl_info *ctrl = &phba->ctrl;
struct iscsi_cleanup_req_v1 *req_v1;
struct iscsi_cleanup_req *req;
+ u16 hdr_ring_id, data_ring_id;
struct be_mcc_wrb *wrb;
int status;
mutex_lock(&ctrl->mbox_lock);
wrb = wrb_from_mbox(&ctrl->mbox_mem);
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
- OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
- /**
- * TODO: Check with FW folks the chute value to be set.
- * For now, use the ULP_MASK as the chute value.
- */
+ hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
+ data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
if (is_chip_be2_be3r(phba)) {
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
req->chute = (1 << ulp);
- req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
- req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
+ /* BE2/BE3 FW creates 8-bit ring id */
+ req->hdr_ring_id = hdr_ring_id;
+ req->data_ring_id = data_ring_id;
} else {
- req_v1 = (struct iscsi_cleanup_req_v1 *)req;
+ req_v1 = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
+ be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_CLEANUP,
+ sizeof(*req_v1));
req_v1->hdr.version = 1;
- req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba,
- ulp));
- req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba,
- ulp));
+ req_v1->chute = (1 << ulp);
+ req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
+ req_v1->data_ring_id = cpu_to_le16(data_ring_id);
}
status = be_mbox_notify(ctrl);
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 328fb5b973cd..1d40e83b0790 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -31,10 +31,16 @@ struct be_sge {
__le32 len;
};
-#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
-#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
struct be_mcc_wrb {
- u32 embedded; /* dword 0 */
+ u32 emb_sgecnt_special; /* dword 0 */
+ /* bits 0 - embedded */
+ /* bits 1 - 2 reserved */
+ /* bits 3 - 7 sge count */
+ /* bits 8 - 23 reserved */
+ /* bits 24 - 31 special */
+#define MCC_WRB_EMBEDDED_MASK 1
+#define MCC_WRB_SGE_CNT_SHIFT 3
+#define MCC_WRB_SGE_CNT_MASK 0x1F
u32 payload_length; /* dword 1 */
u32 tag0; /* dword 2 */
u32 tag1; /* dword 3 */
@@ -1133,11 +1139,6 @@ struct tcp_connect_and_offload_out {
} __packed;
-struct be_mcc_wrb_context {
- struct MCC_WRB *wrb;
- int *users_final_status;
-} __packed;
-
#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */
#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */
#define DB_DEF_PDU_REARM_SHIFT 14
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index ba258217614e..a4844578e357 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -166,33 +166,6 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
}
/**
- * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
- * @beiscsi_conn: The pointer to beiscsi_conn structure
- * @phba: The phba instance
- * @cid: The cid to free
- */
-static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
- struct beiscsi_conn *beiscsi_conn,
- unsigned int cid)
-{
- uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
-
- if (phba->conn_table[cri_index]) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Connection table already occupied. Detected clash\n");
-
- return -EINVAL;
- } else {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
- cri_index, beiscsi_conn);
-
- phba->conn_table[cri_index] = beiscsi_conn;
- }
- return 0;
-}
-
-/**
* beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
* @cls_session: pointer to iscsi cls session
* @cls_conn: pointer to iscsi cls conn
@@ -212,6 +185,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct hwi_wrb_context *pwrb_context;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
+ uint16_t cri_index;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -229,20 +203,34 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return -EEXIST;
}
-
- pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
- beiscsi_ep->ep_cid)];
+ cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
+ if (phba->conn_table[cri_index]) {
+ if (beiscsi_conn != phba->conn_table[cri_index] ||
+ beiscsi_ep != phba->conn_table[cri_index]->ep) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : conn_table not empty at %u: cid %u conn %p:%p\n",
+ cri_index,
+ beiscsi_ep->ep_cid,
+ beiscsi_conn,
+ phba->conn_table[cri_index]);
+ return -EINVAL;
+ }
+ }
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn;
+ /**
+ * Each connection is associated with a WRBQ kept in wrb_context.
+ * Store doorbell offset for transmit path.
+ */
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
-
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
- beiscsi_conn, conn, beiscsi_ep->ep_cid);
-
- return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
+ "BS_%d : cid %d phba->conn_table[%u]=%p\n",
+ beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
+ phba->conn_table[cri_index] = beiscsi_conn;
+ return 0;
}
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
@@ -973,9 +961,9 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
*/
static int beiscsi_get_cid(struct beiscsi_hba *phba)
{
- unsigned short cid = 0xFFFF, cid_from_ulp;
- struct ulp_cid_info *cid_info = NULL;
uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
+ unsigned short cid, cid_from_ulp;
+ struct ulp_cid_info *cid_info;
/* Find the ULP which has more CID available */
cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
@@ -984,20 +972,27 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
BEISCSI_ULP1_AVLBL_CID(phba) : 0;
cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
BEISCSI_ULP0 : BEISCSI_ULP1;
-
- if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
- cid_info = phba->cid_array_info[cid_from_ulp];
- if (!cid_info->avlbl_cids)
- return cid;
-
- cid = cid_info->cid_array[cid_info->cid_alloc++];
-
- if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
- phba, cid_from_ulp))
- cid_info->cid_alloc = 0;
-
- cid_info->avlbl_cids--;
+ /**
+ * If iSCSI protocol is loaded only on ULP 0, and when cid_avlbl_ulp
+ * is ZERO for both, ULP 1 is returned.
+ * Check if ULP is loaded before getting new CID.
+ */
+ if (!test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported))
+ return BE_INVALID_CID;
+
+ cid_info = phba->cid_array_info[cid_from_ulp];
+ cid = cid_info->cid_array[cid_info->cid_alloc];
+ if (!cid_info->avlbl_cids || cid == BE_INVALID_CID) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : failed to get cid: available %u:%u\n",
+ cid_info->avlbl_cids, cid_info->cid_free);
+ return BE_INVALID_CID;
}
+ /* empty the slot */
+ cid_info->cid_array[cid_info->cid_alloc++] = BE_INVALID_CID;
+ if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(phba, cid_from_ulp))
+ cid_info->cid_alloc = 0;
+ cid_info->avlbl_cids--;
return cid;
}
@@ -1008,22 +1003,28 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
*/
static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
{
- uint16_t cid_post_ulp;
- struct hwi_controller *phwi_ctrlr;
- struct hwi_wrb_context *pwrb_context;
- struct ulp_cid_info *cid_info = NULL;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct ulp_cid_info *cid_info;
+ uint16_t cid_post_ulp;
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
cid_post_ulp = pwrb_context->ulp_num;
cid_info = phba->cid_array_info[cid_post_ulp];
- cid_info->avlbl_cids++;
-
+ /* fill only in empty slot */
+ if (cid_info->cid_array[cid_info->cid_free] != BE_INVALID_CID) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : failed to put cid %u: available %u:%u\n",
+ cid, cid_info->avlbl_cids, cid_info->cid_free);
+ return;
+ }
cid_info->cid_array[cid_info->cid_free++] = cid;
if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
cid_info->cid_free = 0;
+ cid_info->avlbl_cids++;
}
/**
@@ -1037,8 +1038,8 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
beiscsi_ep->phba = NULL;
- phba->ep_array[BE_GET_CRI_FROM_CID
- (beiscsi_ep->ep_cid)] = NULL;
+ /* clear this to track freeing in beiscsi_ep_disconnect */
+ phba->ep_array[BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid)] = NULL;
/**
* Check if any connection resource allocated by driver
@@ -1049,6 +1050,11 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
return;
beiscsi_conn = beiscsi_ep->conn;
+ /**
+ * Break ep->conn link here so that completions after
+ * this are ignored.
+ */
+ beiscsi_ep->conn = NULL;
if (beiscsi_conn->login_in_progress) {
beiscsi_free_mgmt_task_handles(beiscsi_conn,
beiscsi_conn->task);
@@ -1079,7 +1085,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : In beiscsi_open_conn\n");
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
- if (beiscsi_ep->ep_cid == 0xFFFF) {
+ if (beiscsi_ep->ep_cid == BE_INVALID_CID) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : No free cid available\n");
return ret;
@@ -1114,7 +1120,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
nonemb_cmd.size = req_memsize;
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
- if (tag <= 0) {
+ if (!tag) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
@@ -1285,26 +1291,6 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
}
/**
- * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
- * @phba: The phba instance
- * @cid: The cid to free
- */
-static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
- unsigned int cid)
-{
- uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
-
- if (phba->conn_table[cri_index])
- phba->conn_table[cri_index] = NULL;
- else {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : Connection table Not occupied.\n");
- return -EINVAL;
- }
- return 0;
-}
-
-/**
* beiscsi_ep_disconnect - Tears down the TCP connection
* @ep: endpoint to be used
*
@@ -1318,13 +1304,23 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
unsigned int tag;
uint8_t mgmt_invalidate_flag, tcp_upload_flag;
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
+ uint16_t cri_index;
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
+ "BS_%d : In beiscsi_ep_disconnect for ep_cid = %u\n",
beiscsi_ep->ep_cid);
+ cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
+ if (!phba->ep_array[cri_index]) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : ep_array at %u cid %u empty\n",
+ cri_index,
+ beiscsi_ep->ep_cid);
+ return;
+ }
+
if (beiscsi_ep->conn) {
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
@@ -1356,7 +1352,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
free_ep:
msleep(BEISCSI_LOGOUT_SYNC_DELAY);
beiscsi_free_ep(beiscsi_ep);
- beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
+ if (!phba->conn_table[cri_index])
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : conn_table empty at %u: cid %u\n",
+ cri_index,
+ beiscsi_ep->ep_cid);
+ phba->conn_table[cri_index] = NULL;
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index b5112d6d7e73..32b2713cec93 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -67,8 +67,6 @@ beiscsi_##_name##_disp(struct device *dev,\
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct beiscsi_hba *phba = iscsi_host_priv(shost); \
- uint32_t param_val = 0; \
- param_val = phba->attr_##_name;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
phba->attr_##_name);\
}
@@ -218,160 +216,156 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
static int beiscsi_eh_abort(struct scsi_cmnd *sc)
{
+ struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
struct iscsi_cls_session *cls_session;
- struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
- struct beiscsi_io_task *aborted_io_task;
- struct iscsi_conn *conn;
+ struct beiscsi_io_task *abrt_io_task;
struct beiscsi_conn *beiscsi_conn;
- struct beiscsi_hba *phba;
struct iscsi_session *session;
- struct invalidate_command_table *inv_tbl;
- struct be_dma_mem nonemb_cmd;
- unsigned int cid, tag, num_invalidate;
+ struct invldt_cmd_tbl inv_tbl;
+ struct beiscsi_hba *phba;
+ struct iscsi_conn *conn;
int rc;
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- spin_lock_bh(&session->frwd_lock);
- if (!aborted_task || !aborted_task->sc) {
- /* we raced */
- spin_unlock_bh(&session->frwd_lock);
- return SUCCESS;
- }
-
- aborted_io_task = aborted_task->dd_data;
- if (!aborted_io_task->scsi_cmnd) {
- /* raced or invalid command */
- spin_unlock_bh(&session->frwd_lock);
+ /* check if we raced, task just got cleaned up under us */
+ spin_lock_bh(&session->back_lock);
+ if (!abrt_task || !abrt_task->sc) {
+ spin_unlock_bh(&session->back_lock);
return SUCCESS;
}
- spin_unlock_bh(&session->frwd_lock);
- /* Invalidate WRB Posted for this Task */
- AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
- aborted_io_task->pwrb_handle->pwrb,
- 1);
-
- conn = aborted_task->conn;
+ /* get a task ref till FW processes the req for the ICD used */
+ __iscsi_get_task(abrt_task);
+ abrt_io_task = abrt_task->dd_data;
+ conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
phba = beiscsi_conn->phba;
-
- /* invalidate iocb */
- cid = beiscsi_conn->beiscsi_conn_cid;
- inv_tbl = phba->inv_tbl;
- memset(inv_tbl, 0x0, sizeof(*inv_tbl));
- inv_tbl->cid = cid;
- inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
- num_invalidate = 1;
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
- sizeof(struct invalidate_commands_params_in),
- &nonemb_cmd.dma);
- if (nonemb_cmd.va == NULL) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
- "BM_%d : Failed to allocate memory for"
- "mgmt_invalidate_icds\n");
- return FAILED;
+ /* mark WRB invalid which have been not processed by FW yet */
+ if (is_chip_be2_be3r(phba)) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ abrt_io_task->pwrb_handle->pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
+ abrt_io_task->pwrb_handle->pwrb, 1);
}
- nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
+ inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid;
+ inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index;
+ spin_unlock_bh(&session->back_lock);
- tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
- cid, &nonemb_cmd);
- if (!tag) {
+ rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1);
+ iscsi_put_task(abrt_task);
+ if (rc) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
- "BM_%d : mgmt_invalidate_icds could not be"
- "submitted\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
-
+ "BM_%d : sc %p invalidation failed %d\n",
+ sc, rc);
return FAILED;
}
- rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
- if (rc != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
-
return iscsi_eh_abort(sc);
}
static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
{
- struct iscsi_task *abrt_task;
- struct beiscsi_io_task *abrt_io_task;
- struct iscsi_conn *conn;
+ struct beiscsi_invldt_cmd_tbl {
+ struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ];
+ struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ];
+ } *inv_tbl;
+ struct iscsi_cls_session *cls_session;
struct beiscsi_conn *beiscsi_conn;
- struct beiscsi_hba *phba;
+ struct beiscsi_io_task *io_task;
struct iscsi_session *session;
- struct iscsi_cls_session *cls_session;
- struct invalidate_command_table *inv_tbl;
- struct be_dma_mem nonemb_cmd;
- unsigned int cid, tag, i, num_invalidate;
- int rc;
+ struct beiscsi_hba *phba;
+ struct iscsi_conn *conn;
+ struct iscsi_task *task;
+ unsigned int i, nents;
+ int rc, more = 0;
- /* invalidate iocbs */
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
+
spin_lock_bh(&session->frwd_lock);
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
spin_unlock_bh(&session->frwd_lock);
return FAILED;
}
+
conn = session->leadconn;
beiscsi_conn = conn->dd_data;
phba = beiscsi_conn->phba;
- cid = beiscsi_conn->beiscsi_conn_cid;
- inv_tbl = phba->inv_tbl;
- memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
- num_invalidate = 0;
+
+ inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC);
+ if (!inv_tbl) {
+ spin_unlock_bh(&session->frwd_lock);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+ "BM_%d : invldt_cmd_tbl alloc failed\n");
+ return FAILED;
+ }
+ nents = 0;
+ /* take back_lock to prevent task from getting cleaned up under us */
+ spin_lock(&session->back_lock);
for (i = 0; i < conn->session->cmds_max; i++) {
- abrt_task = conn->session->cmds[i];
- abrt_io_task = abrt_task->dd_data;
- if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
+ task = conn->session->cmds[i];
+ if (!task->sc)
continue;
- if (sc->device->lun != abrt_task->sc->device->lun)
+ if (sc->device->lun != task->sc->device->lun)
continue;
+ /**
+ * Can't fit in more cmds? Normally this won't happen b'coz
+ * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ.
+ */
+ if (nents == BE_INVLDT_CMD_TBL_SZ) {
+ more = 1;
+ break;
+ }
- /* Invalidate WRB Posted for this Task */
- AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
- abrt_io_task->pwrb_handle->pwrb,
- 1);
+ /* get a task ref till FW processes the req for the ICD used */
+ __iscsi_get_task(task);
+ io_task = task->dd_data;
+ /* mark WRB invalid which have been not processed by FW yet */
+ if (is_chip_be2_be3r(phba)) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ io_task->pwrb_handle->pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
+ io_task->pwrb_handle->pwrb, 1);
+ }
- inv_tbl->cid = cid;
- inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
- num_invalidate++;
- inv_tbl++;
+ inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid;
+ inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index;
+ inv_tbl->task[nents] = task;
+ nents++;
}
+ spin_unlock_bh(&session->back_lock);
spin_unlock_bh(&session->frwd_lock);
- inv_tbl = phba->inv_tbl;
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
- sizeof(struct invalidate_commands_params_in),
- &nonemb_cmd.dma);
- if (nonemb_cmd.va == NULL) {
+ rc = SUCCESS;
+ if (!nents)
+ goto end_reset;
+
+ if (more) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
- "BM_%d : Failed to allocate memory for"
- "mgmt_invalidate_icds\n");
- return FAILED;
+ "BM_%d : number of cmds exceeds size of invalidation table\n");
+ rc = FAILED;
+ goto end_reset;
}
- nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
- memset(nonemb_cmd.va, 0, nonemb_cmd.size);
- tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
- cid, &nonemb_cmd);
- if (!tag) {
+
+ if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
- "BM_%d : mgmt_invalidate_icds could not be"
- " submitted\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
- return FAILED;
+ "BM_%d : cid %u scmds invalidation failed\n",
+ beiscsi_conn->beiscsi_conn_cid);
+ rc = FAILED;
}
- rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
- if (rc != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
- return iscsi_eh_device_reset(sc);
+end_reset:
+ for (i = 0; i < nents; i++)
+ iscsi_put_task(inv_tbl->task[i]);
+ kfree(inv_tbl);
+
+ if (rc == SUCCESS)
+ rc = iscsi_eh_device_reset(sc);
+ return rc;
}
/*------------------- PCI Driver operations and data ----------------- */
@@ -395,6 +389,7 @@ static struct scsi_host_template beiscsi_sht = {
.change_queue_depth = scsi_change_queue_depth,
.slave_configure = beiscsi_slave_configure,
.target_alloc = iscsi_target_alloc,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = beiscsi_eh_abort,
.eh_device_reset_handler = beiscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_session_reset,
@@ -646,7 +641,6 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
phba->params.num_sge_per_io = BE2_SGE;
phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
- phba->params.eq_timer = 64;
phba->params.num_eq_entries = 1024;
phba->params.num_cq_entries = 1024;
phba->params.wrbs_per_cxn = 256;
@@ -964,6 +958,10 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
unsigned long flags;
spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
+ if (!pwrb_context->wrb_handles_available) {
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
+ return NULL;
+ }
pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
pwrb_context->wrb_handles_available--;
if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
@@ -1014,6 +1012,7 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
pwrb_context->free_index = 0;
else
pwrb_context->free_index++;
+ pwrb_handle->pio_handle = NULL;
spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
}
@@ -1224,6 +1223,7 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
uint16_t wrb_index, cid, cri_index;
struct hwi_controller *phwi_ctrlr;
struct wrb_handle *pwrb_handle;
+ struct iscsi_session *session;
struct iscsi_task *task;
phwi_ctrlr = phba->phwi_ctrlr;
@@ -1242,8 +1242,12 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
cri_index = BE_GET_CRI_FROM_CID(cid);
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
+ session = beiscsi_conn->conn->session;
+ spin_lock_bh(&session->back_lock);
task = pwrb_handle->pio_handle;
- iscsi_put_task(task);
+ if (task)
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
}
static void
@@ -1323,16 +1327,15 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_hba *phba, struct sol_cqe *psol)
{
- struct hwi_wrb_context *pwrb_context;
- struct wrb_handle *pwrb_handle;
- struct iscsi_wrb *pwrb = NULL;
- struct hwi_controller *phwi_ctrlr;
- struct iscsi_task *task;
- unsigned int type;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
struct common_sol_cqe csol_cqe = {0};
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct wrb_handle *pwrb_handle;
+ struct iscsi_task *task;
uint16_t cri_index = 0;
+ uint8_t type;
phwi_ctrlr = phba->phwi_ctrlr;
@@ -1345,11 +1348,14 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
pwrb_handle = pwrb_context->pwrb_handle_basestd[
csol_cqe.wrb_index];
+ spin_lock_bh(&session->back_lock);
task = pwrb_handle->pio_handle;
- pwrb = pwrb_handle->pwrb;
+ if (!task) {
+ spin_unlock_bh(&session->back_lock);
+ return;
+ }
type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
- spin_lock_bh(&session->back_lock);
switch (type) {
case HWH_TYPE_IO:
case HWH_TYPE_IO_RD:
@@ -1711,13 +1717,12 @@ beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
struct list_head *hfree_list;
struct phys_addr *pasync_sge;
u32 ring_id, doorbell = 0;
- u16 index, num_entries;
u32 doorbell_offset;
u16 prod = 0, cons;
+ u16 index;
phwi_ctrlr = phba->phwi_ctrlr;
pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
- num_entries = pasync_ctx->num_entries;
if (header) {
cons = pasync_ctx->async_header.free_entries;
hfree_list = &pasync_ctx->async_header.free_list;
@@ -2374,13 +2379,10 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
{
uint8_t mem_descr_index, ulp_num;
- unsigned int num_cq_pages, num_async_pdu_buf_pages;
+ unsigned int num_async_pdu_buf_pages;
unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
- num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
- sizeof(struct sol_cqe));
-
phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
@@ -2737,7 +2739,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
- /* get async_ctx for each ULP */
+ /* get async_ctx for each ULP */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
(ulp_num * MEM_DESCR_OFFSET));
@@ -3367,7 +3369,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context,
struct hwi_controller *phwi_ctrlr)
{
- unsigned int wrb_mem_index, offset, size, num_wrb_rings;
+ unsigned int num_wrb_rings;
u64 pa_addr_lo;
unsigned int idx, num, i, ulp_num;
struct mem_array *pwrb_arr;
@@ -3432,10 +3434,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
}
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
- wrb_mem_index = 0;
- offset = 0;
- size = 0;
-
if (ulp_count > 1) {
ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
@@ -3663,7 +3661,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba)
struct be_ctrl_info *ctrl = &phba->ctrl;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
- struct hd_async_context *pasync_ctx;
int i, eq_for_mcc, ulp_num;
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
@@ -3700,8 +3697,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba)
q = &phwi_context->be_def_dataq[ulp_num];
if (q->created)
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
- pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
}
}
@@ -3804,7 +3799,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
/**
* Now that the default PDU rings have been created,
* let EP know about it.
- * Call beiscsi_cmd_iscsi_cleanup before posting?
*/
beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
ulp_num);
@@ -3850,14 +3844,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_ctrlr->wrb_context[cri].cid] =
async_arr_idx++;
}
- /**
- * Now that the default PDU rings have been created,
- * let EP know about it.
- */
- beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
- ulp_num);
- beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
- ulp_num);
}
}
@@ -3934,31 +3920,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
kfree(phba->phwi_ctrlr);
}
-static int beiscsi_init_controller(struct beiscsi_hba *phba)
-{
- int ret = -ENOMEM;
-
- ret = beiscsi_get_memory(phba);
- if (ret < 0) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_dev_probe -"
- "Failed in beiscsi_alloc_memory\n");
- return ret;
- }
-
- ret = hwi_init_controller(phba);
- if (ret)
- goto free_init;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : Return success from beiscsi_init_controller");
-
- return 0;
-
-free_init:
- beiscsi_free_mem(phba);
- return ret;
-}
-
static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
{
struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
@@ -4089,9 +4050,10 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
}
/* Allocate memory for CID array */
- ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
- BEISCSI_GET_CID_COUNT(phba,
- ulp_num), GFP_KERNEL);
+ ptr_cid_info->cid_array =
+ kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num),
+ sizeof(*ptr_cid_info->cid_array),
+ GFP_KERNEL);
if (!ptr_cid_info->cid_array) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory"
@@ -4231,33 +4193,30 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
{
int ret;
- ret = beiscsi_init_controller(phba);
+ ret = hwi_init_controller(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_dev_probe - Failed in"
- "beiscsi_init_controller\n");
+ "BM_%d : init controller failed\n");
return ret;
}
ret = beiscsi_init_sgl_handle(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_dev_probe - Failed in"
- "beiscsi_init_sgl_handle\n");
- goto do_cleanup_ctrlr;
+ "BM_%d : init sgl handles failed\n");
+ goto cleanup_port;
}
ret = hba_setup_cid_tbls(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed in hba_setup_cid_tbls\n");
+ "BM_%d : setup CID table failed\n");
kfree(phba->io_sgl_hndl_base);
kfree(phba->eh_sgl_hndl_base);
- goto do_cleanup_ctrlr;
+ goto cleanup_port;
}
-
return ret;
-do_cleanup_ctrlr:
+cleanup_port:
hwi_cleanup_port(phba);
return ret;
}
@@ -5417,10 +5376,10 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
phba->shost->max_id = phba->params.cxns_per_ctrl;
phba->shost->can_queue = phba->params.ios_per_ctrl;
- ret = hwi_init_controller(phba);
- if (ret) {
+ ret = beiscsi_init_port(phba);
+ if (ret < 0) {
__beiscsi_log(phba, KERN_ERR,
- "BM_%d : init controller failed %d\n", ret);
+ "BM_%d : init port failed\n");
goto disable_msix;
}
@@ -5526,6 +5485,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
cancel_work_sync(&pbe_eq->mcc_work);
}
hwi_cleanup_port(phba);
+ beiscsi_cleanup_port(phba);
}
static void beiscsi_sess_work(struct work_struct *work)
@@ -5638,11 +5598,12 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
static int beiscsi_dev_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
- struct beiscsi_hba *phba = NULL;
- struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct beiscsi_hba *phba = NULL;
struct be_eq_obj *pbe_eq;
unsigned int s_handle;
+ char wq_name[20];
int ret, i;
ret = beiscsi_enable_pci(pcidev);
@@ -5680,6 +5641,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
case OC_DEVICE_ID2:
phba->generation = BE_GEN2;
phba->iotask_fn = beiscsi_iotask;
+ dev_warn(&pcidev->dev,
+ "Obsolete/Unsupported BE2 Adapter Family\n");
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID3:
@@ -5735,11 +5698,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->shost->max_id = phba->params.cxns_per_ctrl;
phba->shost->can_queue = phba->params.ios_per_ctrl;
+ ret = beiscsi_get_memory(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : alloc host mem failed\n");
+ goto free_port;
+ }
+
ret = beiscsi_init_port(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_dev_probe-"
- "Failed in beiscsi_init_port\n");
+ "BM_%d : init port failed\n");
+ beiscsi_free_mem(phba);
goto free_port;
}
@@ -5754,9 +5724,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
+ snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
phba->shost->host_no);
- phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
+ phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-"
@@ -5881,7 +5851,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
/* free all resources */
destroy_workqueue(phba->wq);
- beiscsi_cleanup_port(phba);
beiscsi_free_mem(phba);
/* ctrl uninit */
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 6376657e45f7..218857926566 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "11.2.0.0"
+#define BUILD_STR "11.2.1.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -57,7 +57,6 @@
#define BE2_IO_DEPTH 1024
#define BE2_MAX_SESSIONS 256
-#define BE2_CMDS_PER_CXN 128
#define BE2_TMFS 16
#define BE2_NOPOUT_REQ 16
#define BE2_SGE 32
@@ -72,8 +71,13 @@
#define BEISCSI_SGLIST_ELEMENTS 30
-#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
-#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */
+/**
+ * BE_INVLDT_CMD_TBL_SZ is 128 which is total number commands that can
+ * be invalidated at a time, consider it before changing the value of
+ * BEISCSI_CMD_PER_LUN.
+ */
+#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
+#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */
#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
@@ -239,19 +243,7 @@ struct hba_parameters {
unsigned int num_cq_entries;
unsigned int num_eq_entries;
unsigned int wrbs_per_cxn;
- unsigned int crashmode;
- unsigned int hba_num;
-
- unsigned int mgmt_ws_sz;
unsigned int hwi_ws_sz;
-
- unsigned int eto;
- unsigned int ldto;
-
- unsigned int dbg_flags;
- unsigned int num_cxn;
-
- unsigned int eq_timer;
/**
* These are calculated from other params. They're here
* for debug purposes
@@ -272,11 +264,6 @@ struct hba_parameters {
unsigned int num_sge;
};
-struct invalidate_command_table {
- unsigned short icd;
- unsigned short cid;
-} __packed;
-
#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
(phwi_ctrlr->wrb_context[cri].ulp_num)
struct hwi_wrb_context {
@@ -334,7 +321,6 @@ struct beiscsi_hba {
struct be_bus_address pci_pa; /* CSR */
/* PCI representation of our HBA */
struct pci_dev *pcidev;
- unsigned short asic_revision;
unsigned int num_cpus;
unsigned int nxt_cqid;
struct msix_entry msix_entries[MAX_CPUS];
@@ -355,9 +341,9 @@ struct beiscsi_hba {
spinlock_t io_sgl_lock;
spinlock_t mgmt_sgl_lock;
spinlock_t async_pdu_lock;
- unsigned int age;
struct list_head hba_queue;
#define BE_MAX_SESSION 2048
+#define BE_INVALID_CID 0xffff
#define BE_SET_CID_TO_CRI(cri_index, cid) \
(phba->cid_to_cri_map[cid] = cri_index)
#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
@@ -425,12 +411,10 @@ struct beiscsi_hba {
u8 port_name;
u8 port_speed;
char fw_ver_str[BEISCSI_VER_STRLEN];
- char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
struct be_ctrl_info ctrl;
unsigned int generation;
unsigned int interface_handle;
- struct invalidate_command_table inv_tbl[128];
struct be_aic_obj aic_obj[MAX_CPUS];
unsigned int attr_log_enable;
@@ -525,10 +509,6 @@ struct beiscsi_io_task {
struct scsi_cmnd *scsi_cmnd;
int num_sg;
struct hwi_wrb_context *pwrb_context;
- unsigned int cmd_sn;
- unsigned int flags;
- unsigned short cid;
- unsigned short header_len;
itt_t libiscsi_itt;
struct be_cmd_bhs *cmd_bhs;
struct be_bus_address bhs_pa;
@@ -842,7 +822,7 @@ struct amap_iscsi_wrb_v2 {
u8 diff_enbl; /* DWORD 11 */
u8 u_run; /* DWORD 11 */
u8 o_run; /* DWORD 11 */
- u8 invalid; /* DWORD 11 */
+ u8 invld; /* DWORD 11 */
u8 dsp; /* DWORD 11 */
u8 dmsg; /* DWORD 11 */
u8 rsvd4; /* DWORD 11 */
@@ -1042,10 +1022,8 @@ struct hwi_controller {
struct list_head io_sgl_list;
struct list_head eh_sgl_list;
struct sgl_handle *psgl_handle_base;
- unsigned int wrb_mem_index;
struct hwi_wrb_context *wrb_context;
- struct mcc_wrb *pmcc_wrb_base;
struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
struct hwi_context_memory *phwi_ctxt;
@@ -1062,9 +1040,7 @@ enum hwh_type_enum {
};
struct wrb_handle {
- enum hwh_type_enum type;
unsigned short wrb_index;
-
struct iscsi_task *pio_handle;
struct iscsi_wrb *pwrb;
};
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ac05317bba7f..2f6d5c2ac329 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -66,7 +66,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct bsg_job *job,
struct be_dma_mem *nonemb_cmd)
{
- struct be_cmd_resp_hdr *resp;
struct be_mcc_wrb *wrb;
struct be_sge *mcc_sge;
unsigned int tag = 0;
@@ -76,7 +75,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
nonemb_cmd->size = job->request_payload.payload_len;
memset(nonemb_cmd->va, 0, nonemb_cmd->size);
- resp = nonemb_cmd->va;
region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
@@ -128,50 +126,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
return tag;
}
-unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
- struct invalidate_command_table *inv_tbl,
- unsigned int num_invalidate, unsigned int cid,
- struct be_dma_mem *nonemb_cmd)
-
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_sge *sge;
- struct invalidate_commands_params_in *req;
- unsigned int i, tag;
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = nonemb_cmd->va;
- memset(req, 0, sizeof(*req));
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
- OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
- sizeof(*req));
- req->ref_handle = 0;
- req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
- for (i = 0; i < num_invalidate; i++) {
- req->table[i].icd = inv_tbl->icd;
- req->table[i].cid = inv_tbl->cid;
- req->icd_count++;
- inv_tbl++;
- }
- sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(nonemb_cmd->size);
-
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
unsigned short cid,
@@ -1066,7 +1020,6 @@ unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba)
unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_cmd_get_session_resp *resp;
struct be_cmd_get_session_req *req;
struct be_dma_mem *nonemb_cmd;
struct be_mcc_wrb *wrb;
@@ -1081,7 +1034,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
}
nonemb_cmd = &phba->boot_struct.nonemb_cmd;
- nonemb_cmd->size = sizeof(*resp);
+ nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
nonemb_cmd->size,
&nonemb_cmd->dma);
@@ -1096,7 +1049,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
- sizeof(*resp));
+ sizeof(struct be_cmd_get_session_resp));
req->session_handle = phba->boot_struct.s_handle;
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
@@ -1309,7 +1262,8 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
case OC_DEVICE_ID2:
- return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n");
+ return snprintf(buf, PAGE_SIZE,
+ "Obsolete/Unsupported BE2 Adapter Family\n");
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID3:
@@ -1341,7 +1295,7 @@ beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
- return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
+ return snprintf(buf, PAGE_SIZE, "Port Identifier : %u\n",
phba->fw_config.phys_port);
}
@@ -1494,3 +1448,64 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
(params->dw[offsetof(struct amap_beiscsi_offload_params,
exp_statsn) / 32] + 1));
}
+
+int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
+ struct invldt_cmd_tbl *inv_tbl,
+ unsigned int nents)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct invldt_cmds_params_in *req;
+ struct be_dma_mem nonemb_cmd;
+ struct be_mcc_wrb *wrb;
+ unsigned int i, tag;
+ struct be_sge *sge;
+ int rc;
+
+ if (!nents || nents > BE_INVLDT_CMD_TBL_SZ)
+ return -EINVAL;
+
+ nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
+ nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.size,
+ &nonemb_cmd.dma);
+ if (!nonemb_cmd.va) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+ "BM_%d : invldt_cmds_params alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return -ENOMEM;
+ }
+
+ req = nonemb_cmd.va;
+ be_wrb_hdr_prepare(wrb, nonemb_cmd.size, false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
+ sizeof(*req));
+ req->ref_handle = 0;
+ req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
+ for (i = 0; i < nents; i++) {
+ req->table[i].icd = inv_tbl[i].icd;
+ req->table[i].cid = inv_tbl[i].cid;
+ req->icd_count++;
+ }
+ sge = nonembedded_sgl(wrb);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+ sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd.dma));
+ sge->len = cpu_to_le32(nonemb_cmd.size);
+
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
+ if (rc != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return rc;
+}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index b897cfd57c72..308f1472f98a 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -36,66 +36,6 @@
#define PCICFG_UE_STATUS_MASK_LOW 0xA8
#define PCICFG_UE_STATUS_MASK_HI 0xAC
-/**
- * Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field
- */
-struct amap_mcc_sge {
- u8 pa_lo[32]; /* dword 0 */
- u8 pa_hi[32]; /* dword 1 */
- u8 length[32]; /* DWORD 2 */
-} __packed;
-
-/**
- * Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field
- */
-struct amap_mcc_wrb_payload {
- union {
- struct amap_mcc_sge sgl[19];
- u8 embedded[59 * 32]; /* DWORDS 57 to 115 */
- } u;
-} __packed;
-
-/**
- * Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field
- */
-struct amap_mcc_wrb {
- u8 embedded; /* DWORD 0 */
- u8 rsvd0[2]; /* DWORD 0 */
- u8 sge_count[5]; /* DWORD 0 */
- u8 rsvd1[16]; /* DWORD 0 */
- u8 special[8]; /* DWORD 0 */
- u8 payload_length[32];
- u8 tag[64]; /* DWORD 2 */
- u8 rsvd2[32]; /* DWORD 4 */
- struct amap_mcc_wrb_payload payload;
-};
-
-struct mcc_sge {
- u32 pa_lo; /* dword 0 */
- u32 pa_hi; /* dword 1 */
- u32 length; /* DWORD 2 */
-} __packed;
-
-struct mcc_wrb_payload {
- union {
- struct mcc_sge sgl[19];
- u32 embedded[59]; /* DWORDS 57 to 115 */
- } u;
-} __packed;
-
-#define MCC_WRB_EMBEDDED_MASK 0x00000001
-
-struct mcc_wrb {
- u32 dw[0]; /* DWORD 0 */
- u32 payload_length;
- u32 tag[2]; /* DWORD 2 */
- u32 rsvd2[1]; /* DWORD 4 */
- struct mcc_wrb_payload payload;
-};
-
int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep,
@@ -104,10 +44,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
unsigned short cid,
unsigned int upload_flag);
-unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
- struct invalidate_command_table *inv_tbl,
- unsigned int num_invalidate, unsigned int cid,
- struct be_dma_mem *nonemb_cmd);
unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba,
struct bsg_job *job,
@@ -134,24 +70,31 @@ union iscsi_invalidate_connection_params {
struct iscsi_invalidate_connection_params_out response;
} __packed;
-struct invalidate_commands_params_in {
+#define BE_INVLDT_CMD_TBL_SZ 128
+struct invldt_cmd_tbl {
+ unsigned short icd;
+ unsigned short cid;
+} __packed;
+
+struct invldt_cmds_params_in {
struct be_cmd_req_hdr hdr;
unsigned int ref_handle;
unsigned int icd_count;
- struct invalidate_command_table table[128];
+ struct invldt_cmd_tbl table[BE_INVLDT_CMD_TBL_SZ];
unsigned short cleanup_type;
unsigned short unused;
} __packed;
-struct invalidate_commands_params_out {
+struct invldt_cmds_params_out {
+ struct be_cmd_resp_hdr hdr;
unsigned int ref_handle;
unsigned int icd_count;
- unsigned int icd_status[128];
+ unsigned int icd_status[BE_INVLDT_CMD_TBL_SZ];
} __packed;
-union invalidate_commands_params {
- struct invalidate_commands_params_in request;
- struct invalidate_commands_params_out response;
+union be_invldt_cmds_params {
+ struct invldt_cmds_params_in request;
+ struct invldt_cmds_params_out response;
} __packed;
struct mgmt_hba_attributes {
@@ -231,16 +174,6 @@ struct be_bsg_vendor_cmd {
#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
-/* MGMT CMD flags */
-
-#define MGMT_CMDH_FREE (1<<0)
-
-/* --- MGMT_ERROR_CODES --- */
-/* Error Codes returned in the status field of the CMD response header */
-#define MGMT_STATUS_SUCCESS 0 /* The CMD completed without errors */
-#define MGMT_STATUS_FAILED 1 /* Error status in the Status field of */
- /* the CMD_RESPONSE_HEADER */
-
#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
bus_address.u.a32.address_lo; \
@@ -270,6 +203,9 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
unsigned short cid,
unsigned short issue_reset,
unsigned short savecfg_flag);
+int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
+ struct invldt_cmd_tbl *inv_tbl,
+ unsigned int nents);
int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 1e7e139d71ea..4aa61e20e82d 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -28,24 +28,6 @@
BFA_TRC_FILE(FCS, FCS);
/*
- * FCS sub-modules
- */
-struct bfa_fcs_mod_s {
- void (*attach) (struct bfa_fcs_s *fcs);
- void (*modinit) (struct bfa_fcs_s *fcs);
- void (*modexit) (struct bfa_fcs_s *fcs);
-};
-
-#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
-
-static struct bfa_fcs_mod_s fcs_modules[] = {
- { bfa_fcs_port_attach, NULL, NULL },
- { bfa_fcs_uf_attach, NULL, NULL },
- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
- bfa_fcs_fabric_modexit },
-};
-
-/*
* fcs_api BFA FCS API
*/
@@ -58,52 +40,19 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
complete(&bfad->comp);
}
-
-
/*
- * fcs_api BFA FCS API
- */
-
-/*
- * fcs attach -- called once to initialize data structures at driver attach time
+ * fcs initialization, called once after bfa initialization is complete
*/
void
-bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
- bfa_boolean_t min_cfg)
+bfa_fcs_init(struct bfa_fcs_s *fcs)
{
- int i;
- struct bfa_fcs_mod_s *mod;
-
- fcs->bfa = bfa;
- fcs->bfad = bfad;
- fcs->min_cfg = min_cfg;
- fcs->num_rport_logins = 0;
-
- bfa->fcs = BFA_TRUE;
- fcbuild_init();
-
- for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
- mod = &fcs_modules[i];
- if (mod->attach)
- mod->attach(fcs);
- }
+ bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
+ bfa_trc(fcs, 0);
}
/*
- * fcs initialization, called once after bfa initialization is complete
+ * fcs_api BFA FCS API
*/
-void
-bfa_fcs_init(struct bfa_fcs_s *fcs)
-{
- int i;
- struct bfa_fcs_mod_s *mod;
-
- for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
- mod = &fcs_modules[i];
- if (mod->modinit)
- mod->modinit(fcs);
- }
-}
/*
* FCS update cfg - reset the pwwn/nwwn of fabric base logical port
@@ -180,26 +129,14 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
void
bfa_fcs_exit(struct bfa_fcs_s *fcs)
{
- struct bfa_fcs_mod_s *mod;
- int nmods, i;
-
bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
-
- nmods = ARRAY_SIZE(fcs_modules);
-
- for (i = 0; i < nmods; i++) {
-
- mod = &fcs_modules[i];
- if (mod->modexit) {
- bfa_wc_up(&fcs->wc);
- mod->modexit(fcs);
- }
- }
-
+ bfa_wc_up(&fcs->wc);
+ bfa_trc(fcs, 0);
+ bfa_lps_delete(fcs->fabric.lps);
+ bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_DELETE);
bfa_wc_wait(&fcs->wc);
}
-
/*
* Fabric module implementation.
*/
@@ -1128,62 +1065,6 @@ bfa_fcs_fabric_stop_comp(void *cbarg)
*/
/*
- * Attach time initialization.
- */
-void
-bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
-{
- struct bfa_fcs_fabric_s *fabric;
-
- fabric = &fcs->fabric;
- memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
-
- /*
- * Initialize base fabric.
- */
- fabric->fcs = fcs;
- INIT_LIST_HEAD(&fabric->vport_q);
- INIT_LIST_HEAD(&fabric->vf_q);
- fabric->lps = bfa_lps_alloc(fcs->bfa);
- WARN_ON(!fabric->lps);
-
- /*
- * Initialize fabric delete completion handler. Fabric deletion is
- * complete when the last vport delete is complete.
- */
- bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
- bfa_wc_up(&fabric->wc); /* For the base port */
-
- bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
- bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
-}
-
-void
-bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
-{
- bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
- bfa_trc(fcs, 0);
-}
-
-/*
- * Module cleanup
- */
-void
-bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
-{
- struct bfa_fcs_fabric_s *fabric;
-
- bfa_trc(fcs, 0);
-
- /*
- * Cleanup base fabric.
- */
- fabric = &fcs->fabric;
- bfa_lps_delete(fabric->lps);
- bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
-}
-
-/*
* Fabric module stop -- stop FCS actions
*/
void
@@ -1633,12 +1514,6 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
}
}
-void
-bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
-{
- bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
-}
-
/*
* BFA FCS UF ( Unsolicited Frames)
*/
@@ -1706,8 +1581,44 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
bfa_uf_free(uf);
}
+/*
+ * fcs attach -- called once to initialize data structures at driver attach time
+ */
void
-bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
+bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+ bfa_boolean_t min_cfg)
{
+ struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+
+ fcs->bfa = bfa;
+ fcs->bfad = bfad;
+ fcs->min_cfg = min_cfg;
+ fcs->num_rport_logins = 0;
+
+ bfa->fcs = BFA_TRUE;
+ fcbuild_init();
+
+ bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
+
+ memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+
+ /*
+ * Initialize base fabric.
+ */
+ fabric->fcs = fcs;
+ INIT_LIST_HEAD(&fabric->vport_q);
+ INIT_LIST_HEAD(&fabric->vf_q);
+ fabric->lps = bfa_lps_alloc(fcs->bfa);
+ WARN_ON(!fabric->lps);
+
+ /*
+ * Initialize fabric delete completion handler. Fabric deletion is
+ * complete when the last vport delete is complete.
+ */
+ bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
+ bfa_wc_up(&fabric->wc); /* For the base port */
+
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+ bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
}
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 0f797a55d504..e60f72b766ea 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -808,9 +808,7 @@ void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
/*
* fabric protected interface functions
*/
-void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
@@ -827,8 +825,6 @@ void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
wwn_t fabric_name);
u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
-void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
-void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a9a00169ad91..b2e8c0dfc79c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
struct bfad_fcxp *drv_fcxp;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
- struct fc_bsg_request *bsg_request = bsg_request;
+ struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t command_type = bsg_request->msgcode;
unsigned long flags;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 02d806012fa1..7eb0eef18fdd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -813,6 +813,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_bus_reset_handler = bfad_im_reset_bus_handler,
@@ -835,6 +836,7 @@ struct scsi_host_template bfad_im_vport_template = {
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_bus_reset_handler = bfad_im_reset_bus_handler,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c639d5a02656..93b5a0012417 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2282,7 +2282,7 @@ static int _bnx2fc_create(struct net_device *netdev,
}
/* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(netdev))
phys_dev = vlan_dev_real_dev(netdev);
/* verify if the physical device is a netxtreme2 device */
@@ -2320,7 +2320,7 @@ static int _bnx2fc_create(struct net_device *netdev,
goto ifput_err;
}
- if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(netdev)) {
vlan_id = vlan_dev_vlan_id(netdev);
interface->vlan_enabled = 1;
}
@@ -2538,7 +2538,7 @@ static bool bnx2fc_match(struct net_device *netdev)
struct net_device *phys_dev = netdev;
mutex_lock(&bnx2fc_dev_lock);
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(netdev))
phys_dev = vlan_dev_real_dev(netdev);
if (bnx2fc_hba_lookup(phys_dev)) {
@@ -2947,6 +2947,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.module = THIS_MODULE,
.name = "QLogic Offload FCoE Initiator",
.queuecommand = bnx2fc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bnx2fc_eh_abort, /* abts */
.eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
.eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index f501095f91ac..898461b146cc 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -74,7 +74,7 @@ static void bnx2fc_cmd_timeout(struct work_struct *work)
&io_req->req_flags)) {
/* Handle internally generated ABTS timeout */
BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
- io_req->refcount.refcount.counter);
+ kref_read(&io_req->refcount));
if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
&io_req->req_flags))) {
/*
@@ -1141,7 +1141,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
return SUCCESS;
}
BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
- io_req->refcount.refcount.counter);
+ kref_read(&io_req->refcount));
/* Hold IO request across abort processing */
kref_get(&io_req->refcount);
@@ -1299,7 +1299,7 @@ void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
{
BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
"refcnt = %d, cmd_type = %d\n",
- io_req->refcount.refcount.counter, io_req->cmd_type);
+ kref_read(&io_req->refcount), io_req->cmd_type);
bnx2fc_scsi_done(io_req, DID_ERROR);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
if (io_req->wait_for_comp)
@@ -1318,7 +1318,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
"refcnt = %d, cmd_type = %d\n",
io_req->xid,
- io_req->refcount.refcount.counter, io_req->cmd_type);
+ kref_read(&io_req->refcount), io_req->cmd_type);
if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
&io_req->req_flags)) {
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 133901fd3e35..f32a66f89d25 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2259,6 +2259,7 @@ static struct scsi_host_template bnx2i_host_template = {
.name = "QLogic Offload iSCSI Initiator",
.proc_name = "bnx2i",
.queuecommand = iscsi_queuecommand,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 89a52b941ea8..a1ff75f1384f 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2270,6 +2270,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
.name = CSIO_DRV_DESC,
.proc_name = KBUILD_MODNAME,
.queuecommand = csio_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
.slave_alloc = csio_slave_alloc,
@@ -2289,6 +2290,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.name = CSIO_DRV_DESC,
.proc_name = KBUILD_MODNAME,
.queuecommand = csio_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
.slave_alloc = csio_slave_alloc,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 33e83464e091..1880eb6c68f7 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -90,6 +90,7 @@ static struct scsi_host_template cxgb3i_host_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 9a2fdc305cf2..3fb3f5708ff7 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -103,6 +103,7 @@ static struct scsi_host_template cxgb4i_host_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 9167bcd9fffe..bd7d39ecbd24 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -223,7 +223,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
struct cxgbi_device *cdev, *tmp;
int i;
- if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(ndev)) {
vdev = ndev;
ndev = vlan_dev_real_dev(ndev);
log_debug(1 << CXGBI_DBG_DEV,
@@ -256,7 +256,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
struct cxgbi_device *cdev;
int i;
- if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(ndev)) {
vdev = ndev;
ndev = vlan_dev_real_dev(ndev);
pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
@@ -290,7 +290,7 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
struct cxgbi_device *cdev, *tmp;
int i;
- if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+ if (is_vlan_dev(ndev)) {
vdev = ndev;
ndev = vlan_dev_real_dev(ndev);
pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 95ba99044c3e..18e0ea83d361 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -301,7 +301,7 @@ static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
{
log_debug(1 << CXGBI_DBG_SOCK,
"%s, put csk 0x%p, ref %u-1.\n",
- fn, csk, atomic_read(&csk->refcnt.refcount));
+ fn, csk, kref_read(&csk->refcnt));
kref_put(&csk->refcnt, cxgbi_sock_free);
}
#define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk)
@@ -310,7 +310,7 @@ static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
{
log_debug(1 << CXGBI_DBG_SOCK,
"%s, get csk 0x%p, ref %u+1.\n",
- fn, csk, atomic_read(&csk->refcnt.refcount));
+ fn, csk, kref_read(&csk->refcnt));
kref_get(&csk->refcnt);
}
#define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 0e9de5d62da2..d11dcc59ff46 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -54,6 +54,9 @@ extern const struct file_operations cxlflash_cxl_fops;
/* RRQ for master issued cmds */
#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
+/* SQ for master issued cmds */
+#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
+
static inline void check_sizes(void)
{
@@ -155,8 +158,8 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
struct afu {
/* Stuff requiring alignment go first. */
-
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
+ struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
+ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
/* Beware of alignment till here. Preferably introduce new
* fields after this point
@@ -171,9 +174,13 @@ struct afu {
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
- struct kref mapcount;
-
ctx_hndl_t ctx_hndl; /* master's context handle */
+
+ atomic_t hsq_credits;
+ spinlock_t hsq_slock;
+ struct sisl_ioarcb *hsq_start;
+ struct sisl_ioarcb *hsq_end;
+ struct sisl_ioarcb *hsq_curr;
u64 *hrrq_start;
u64 *hrrq_end;
u64 *hrrq_curr;
@@ -191,6 +198,23 @@ struct afu {
};
+static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
+{
+ u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
+
+ return afu_cap & cmd_mode;
+}
+
+static inline bool afu_is_sq_cmd_mode(struct afu *afu)
+{
+ return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
+}
+
+static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
+{
+ return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
+}
+
static inline u64 lun_to_lunid(u64 lun)
{
__be64 lun_id;
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index 6c318db90c85..0efed177cc8b 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -32,11 +32,13 @@
*/
static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
{
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct llun_info *lli = NULL;
lli = kzalloc(sizeof(*lli), GFP_KERNEL);
if (unlikely(!lli)) {
- pr_err("%s: could not allocate lli\n", __func__);
+ dev_err(dev, "%s: could not allocate lli\n", __func__);
goto out;
}
@@ -58,11 +60,13 @@ out:
*/
static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
{
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct glun_info *gli = NULL;
gli = kzalloc(sizeof(*gli), GFP_KERNEL);
if (unlikely(!gli)) {
- pr_err("%s: could not allocate gli\n", __func__);
+ dev_err(dev, "%s: could not allocate gli\n", __func__);
goto out;
}
@@ -129,10 +133,10 @@ static struct glun_info *lookup_global(u8 *wwid)
*/
static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
{
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct llun_info *lli = NULL;
struct glun_info *gli = NULL;
- struct Scsi_Host *shost = sdev->host;
- struct cxlflash_cfg *cfg = shost_priv(shost);
if (unlikely(!wwid))
goto out;
@@ -165,7 +169,7 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
list_add(&gli->list, &global.gluns);
out:
- pr_debug("%s: returning %p\n", __func__, lli);
+ dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli);
return lli;
}
@@ -225,17 +229,18 @@ void cxlflash_term_global_luns(void)
int cxlflash_manage_lun(struct scsi_device *sdev,
struct dk_cxlflash_manage_lun *manage)
{
- int rc = 0;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct llun_info *lli = NULL;
+ int rc = 0;
u64 flags = manage->hdr.flags;
u32 chan = sdev->channel;
mutex_lock(&global.mutex);
lli = find_and_create_lun(sdev, manage->wwid);
- pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
- __func__, get_unaligned_be64(&manage->wwid[0]),
- get_unaligned_be64(&manage->wwid[8]),
- manage->hdr.flags, lli);
+ dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n",
+ __func__, get_unaligned_be64(&manage->wwid[0]),
+ get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli);
if (unlikely(!lli)) {
rc = -ENOMEM;
goto out;
@@ -265,11 +270,11 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
}
}
- pr_debug("%s: port_sel = %08X chan = %u lun_id = %016llX\n", __func__,
- lli->port_sel, chan, lli->lun_id[chan]);
+ dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n",
+ __func__, lli->port_sel, chan, lli->lun_id[chan]);
out:
mutex_unlock(&global.mutex);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b17ebf6d0a7e..7069639e92bc 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -43,6 +43,9 @@ MODULE_LICENSE("GPL");
*/
static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
{
+ struct afu *afu = cmd->parent;
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
struct sisl_ioarcb *ioarcb;
struct sisl_ioasa *ioasa;
u32 resid;
@@ -56,21 +59,20 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
resid = ioasa->resid;
scsi_set_resid(scp, resid);
- pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
- __func__, cmd, scp, resid);
+ dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
+ __func__, cmd, scp, resid);
}
if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
- pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
- __func__, cmd, scp);
+ dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
+ __func__, cmd, scp);
scp->result = (DID_ERROR << 16);
}
- pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
- "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
- __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
- ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
- ioasa->fc_extra);
+ dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
+ "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
+ ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
+ ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
if (ioasa->rc.scsi_rc) {
/* We have a SCSI status */
@@ -159,6 +161,7 @@ static void cmd_complete(struct afu_cmd *cmd)
ulong lock_flags;
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
bool cmd_is_tmf;
if (cmd->scp) {
@@ -170,9 +173,8 @@ static void cmd_complete(struct afu_cmd *cmd)
cmd_is_tmf = cmd->cmd_tmf;
- pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
- "ioasc=%d\n", __func__, scp, scp->result,
- cmd->sa.ioasc);
+ dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
+ __func__, scp, scp->result, cmd->sa.ioasc);
scsi_dma_unmap(scp);
scp->scsi_done(scp);
@@ -188,10 +190,11 @@ static void cmd_complete(struct afu_cmd *cmd)
}
/**
- * context_reset_ioarrin() - reset command owner context via IOARRIN register
+ * context_reset() - reset command owner context via specified register
* @cmd: AFU command that timed out.
+ * @reset_reg: MMIO register to perform reset.
*/
-static void context_reset_ioarrin(struct afu_cmd *cmd)
+static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
{
int nretry = 0;
u64 rrin = 0x1;
@@ -199,22 +202,44 @@ static void context_reset_ioarrin(struct afu_cmd *cmd)
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- pr_debug("%s: cmd=%p\n", __func__, cmd);
+ dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
- writeq_be(rrin, &afu->host_map->ioarrin);
+ writeq_be(rrin, reset_reg);
do {
- rrin = readq_be(&afu->host_map->ioarrin);
+ rrin = readq_be(reset_reg);
if (rrin != 0x1)
break;
/* Double delay each time */
udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
- dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
+ dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
__func__, rrin, nretry);
}
/**
+ * context_reset_ioarrin() - reset command owner context via IOARRIN register
+ * @cmd: AFU command that timed out.
+ */
+static void context_reset_ioarrin(struct afu_cmd *cmd)
+{
+ struct afu *afu = cmd->parent;
+
+ context_reset(cmd, &afu->host_map->ioarrin);
+}
+
+/**
+ * context_reset_sq() - reset command owner context w/ SQ Context Reset register
+ * @cmd: AFU command that timed out.
+ */
+static void context_reset_sq(struct afu_cmd *cmd)
+{
+ struct afu *afu = cmd->parent;
+
+ context_reset(cmd, &afu->host_map->sq_ctx_reset);
+}
+
+/**
* send_cmd_ioarrin() - sends an AFU command via IOARRIN register
* @afu: AFU associated with the host.
* @cmd: AFU command to send.
@@ -251,8 +276,51 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
out:
spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
- pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
- cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+ dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
+ cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
+ return rc;
+}
+
+/**
+ * send_cmd_sq() - sends an AFU command via SQ ring
+ * @afu: AFU associated with the host.
+ * @cmd: AFU command to send.
+ *
+ * Return:
+ * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
+ */
+static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
+{
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+ int newval;
+ ulong lock_flags;
+
+ newval = atomic_dec_if_positive(&afu->hsq_credits);
+ if (newval <= 0) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ cmd->rcb.ioasa = &cmd->sa;
+
+ spin_lock_irqsave(&afu->hsq_slock, lock_flags);
+
+ *afu->hsq_curr = cmd->rcb;
+ if (afu->hsq_curr < afu->hsq_end)
+ afu->hsq_curr++;
+ else
+ afu->hsq_curr = afu->hsq_start;
+ writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail);
+
+ spin_unlock_irqrestore(&afu->hsq_slock, lock_flags);
+out:
+ dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
+ "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
+ cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr,
+ readq_be(&afu->host_map->sq_head),
+ readq_be(&afu->host_map->sq_tail));
return rc;
}
@@ -266,6 +334,8 @@ out:
*/
static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
@@ -276,10 +346,8 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
}
if (unlikely(cmd->sa.ioasc != 0)) {
- pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
- "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
- cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
- cmd->sa.rc.fc_rc);
+ dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
+ __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
rc = -1;
}
@@ -298,8 +366,7 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
u32 port_sel = scp->device->channel + 1;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(scp->device->host);
struct afu_cmd *cmd = sc_to_afucz(scp);
struct device *dev = &cfg->dev->dev;
ulong lock_flags;
@@ -344,7 +411,7 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
to);
if (!to) {
cfg->tmf_active = false;
- dev_err(dev, "%s: TMF timed out!\n", __func__);
+ dev_err(dev, "%s: TMF timed out\n", __func__);
rc = -1;
}
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -352,16 +419,6 @@ out:
return rc;
}
-static void afu_unmap(struct kref *ref)
-{
- struct afu *afu = container_of(ref, struct afu, mapcount);
-
- if (likely(afu->afu_map)) {
- cxl_psa_unmap((void __iomem *)afu->afu_map);
- afu->afu_map = NULL;
- }
-}
-
/**
* cxlflash_driver_info() - information handler for this host driver
* @host: SCSI host associated with device.
@@ -382,7 +439,7 @@ static const char *cxlflash_driver_info(struct Scsi_Host *host)
*/
static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(host);
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = sc_to_afucz(scp);
@@ -392,10 +449,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
ulong lock_flags;
int nseg = 0;
int rc = 0;
- int kref_got = 0;
dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08X-%08X-%08X-%08X)\n",
+ "cdb=(%08x-%08x-%08x-%08x)\n",
__func__, scp, host->host_no, scp->device->channel,
scp->device->id, scp->device->lun,
get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
@@ -417,11 +473,11 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
switch (cfg->state) {
case STATE_RESET:
- dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
+ dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
case STATE_FAILTERM:
- dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
+ dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
scp->result = (DID_NO_CONNECT << 16);
scp->scsi_done(scp);
rc = 0;
@@ -430,13 +486,10 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
break;
}
- kref_get(&cfg->afu->mapcount);
- kref_got = 1;
-
if (likely(sg)) {
nseg = scsi_dma_map(scp);
if (unlikely(nseg < 0)) {
- dev_err(dev, "%s: Fail DMA map!\n", __func__);
+ dev_err(dev, "%s: Fail DMA map\n", __func__);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -463,9 +516,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
if (unlikely(rc))
scsi_dma_unmap(scp);
out:
- if (kref_got)
- kref_put(&afu->mapcount, afu_unmap);
- pr_devel("%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -503,13 +553,15 @@ static void free_mem(struct cxlflash_cfg *cfg)
*
* Safe to call with AFU in a partially allocated/initialized state.
*
- * Waits for any active internal AFU commands to timeout and then unmaps
- * the MMIO space.
+ * Cancels scheduled worker threads, waits for any active internal AFU
+ * commands to timeout and then unmaps the MMIO space.
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
+ cancel_work_sync(&cfg->work_q);
+
if (likely(afu)) {
while (atomic_read(&afu->cmds_active))
ssleep(1);
@@ -517,7 +569,6 @@ static void stop_afu(struct cxlflash_cfg *cfg)
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
}
- kref_put(&afu->mapcount, afu_unmap);
}
}
@@ -585,6 +636,8 @@ static void term_mc(struct cxlflash_cfg *cfg)
*/
static void term_afu(struct cxlflash_cfg *cfg)
{
+ struct device *dev = &cfg->dev->dev;
+
/*
* Tear down is carefully orchestrated to ensure
* no interrupts can come in when the problem state
@@ -600,7 +653,7 @@ static void term_afu(struct cxlflash_cfg *cfg)
term_mc(cfg);
- pr_debug("%s: returning\n", __func__);
+ dev_dbg(dev, "%s: returning\n", __func__);
}
/**
@@ -627,8 +680,7 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
return;
if (!afu || !afu->afu_map) {
- dev_dbg(dev, "%s: The problem state area is not mapped\n",
- __func__);
+ dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
return;
}
@@ -670,10 +722,11 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
static void cxlflash_remove(struct pci_dev *pdev)
{
struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
ulong lock_flags;
if (!pci_is_enabled(pdev)) {
- pr_debug("%s: Device is disabled\n", __func__);
+ dev_dbg(dev, "%s: Device is disabled\n", __func__);
return;
}
@@ -699,7 +752,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
scsi_remove_host(cfg->host);
/* fall through */
case INIT_STATE_AFU:
- cancel_work_sync(&cfg->work_q);
term_afu(cfg);
case INIT_STATE_PCI:
pci_disable_device(pdev);
@@ -709,7 +761,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
break;
}
- pr_debug("%s: returning\n", __func__);
+ dev_dbg(dev, "%s: returning\n", __func__);
}
/**
@@ -727,7 +779,7 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
int rc = 0;
struct device *dev = &cfg->dev->dev;
- /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
+ /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(sizeof(struct afu)));
if (unlikely(!cfg->afu)) {
@@ -751,6 +803,7 @@ out:
static int init_pci(struct cxlflash_cfg *cfg)
{
struct pci_dev *pdev = cfg->dev;
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
rc = pci_enable_device(pdev);
@@ -761,15 +814,14 @@ static int init_pci(struct cxlflash_cfg *cfg)
}
if (rc) {
- dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
- __func__);
+ dev_err(dev, "%s: Cannot enable adapter\n", __func__);
cxlflash_wait_for_pci_err_recovery(cfg);
goto out;
}
}
out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -782,19 +834,19 @@ out:
static int init_scsi(struct cxlflash_cfg *cfg)
{
struct pci_dev *pdev = cfg->dev;
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
rc = scsi_add_host(cfg->host, &pdev->dev);
if (rc) {
- dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
- __func__, rc);
+ dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
goto out;
}
scsi_scan_host(cfg->host);
out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -844,16 +896,12 @@ static void set_port_offline(__be64 __iomem *fc_regs)
* Return:
* TRUE (1) when the specified port is online
* FALSE (0) when the specified port fails to come online after timeout
- * -EINVAL when @delay_us is less than 1000
*/
-static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
+static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
{
u64 status;
- if (delay_us < 1000) {
- pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
- return -EINVAL;
- }
+ WARN_ON(delay_us < 1000);
do {
msleep(delay_us / 1000);
@@ -877,16 +925,12 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
* Return:
* TRUE (1) when the specified port is offline
* FALSE (0) when the specified port fails to go offline after timeout
- * -EINVAL when @delay_us is less than 1000
*/
-static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
+static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
{
u64 status;
- if (delay_us < 1000) {
- pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
- return -EINVAL;
- }
+ WARN_ON(delay_us < 1000);
do {
msleep(delay_us / 1000);
@@ -915,11 +959,14 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
u64 wwpn)
{
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
+
set_port_offline(fc_regs);
if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
FC_PORT_STATUS_RETRY_CNT)) {
- pr_debug("%s: wait on port %d to go offline timed out\n",
- __func__, port);
+ dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
+ __func__, port);
}
writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
@@ -927,8 +974,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
set_port_online(fc_regs);
if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
FC_PORT_STATUS_RETRY_CNT)) {
- pr_debug("%s: wait on port %d to go online timed out\n",
- __func__, port);
+ dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
+ __func__, port);
}
}
@@ -947,6 +994,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
*/
static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
{
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
u64 port_sel;
/* first switch the AFU to the other links, if any */
@@ -958,21 +1007,21 @@ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
set_port_offline(fc_regs);
if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
FC_PORT_STATUS_RETRY_CNT))
- pr_err("%s: wait on port %d to go offline timed out\n",
- __func__, port);
+ dev_err(dev, "%s: wait on port %d to go offline timed out\n",
+ __func__, port);
set_port_online(fc_regs);
if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
FC_PORT_STATUS_RETRY_CNT))
- pr_err("%s: wait on port %d to go online timed out\n",
- __func__, port);
+ dev_err(dev, "%s: wait on port %d to go online timed out\n",
+ __func__, port);
/* switch back to include this port */
port_sel |= (1ULL << port);
writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
- pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
+ dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
}
/*
@@ -1082,6 +1131,8 @@ static void afu_err_intr_init(struct afu *afu)
static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
{
struct afu *afu = (struct afu *)data;
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
u64 reg;
u64 reg_unmasked;
@@ -1089,18 +1140,17 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
if (reg_unmasked == 0UL) {
- pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
- __func__, (u64)afu, reg);
+ dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
+ __func__, reg);
goto cxlflash_sync_err_irq_exit;
}
- pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
- __func__, (u64)afu, reg);
+ dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
+ __func__, reg);
writeq_be(reg_unmasked, &afu->host_map->intr_clear);
cxlflash_sync_err_irq_exit:
- pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
return IRQ_HANDLED;
}
@@ -1115,6 +1165,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
{
struct afu *afu = (struct afu *)data;
struct afu_cmd *cmd;
+ struct sisl_ioasa *ioasa;
+ struct sisl_ioarcb *ioarcb;
bool toggle = afu->toggle;
u64 entry,
*hrrq_start = afu->hrrq_start,
@@ -1128,7 +1180,16 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
break;
- cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
+ entry &= ~SISL_RESP_HANDLE_T_BIT;
+
+ if (afu_is_sq_cmd_mode(afu)) {
+ ioasa = (struct sisl_ioasa *)entry;
+ cmd = container_of(ioasa, struct afu_cmd, sa);
+ } else {
+ ioarcb = (struct sisl_ioarcb *)entry;
+ cmd = container_of(ioarcb, struct afu_cmd, rcb);
+ }
+
cmd_complete(cmd);
/* Advance to next entry or wrap and flip the toggle bit */
@@ -1138,6 +1199,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
hrrq_curr = hrrq_start;
toggle ^= SISL_RESP_HANDLE_T_BIT;
}
+
+ atomic_inc(&afu->hsq_credits);
}
afu->hrrq_curr = hrrq_curr;
@@ -1169,7 +1232,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
if (reg_unmasked == 0) {
- dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
+ dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
__func__, reg);
goto out;
}
@@ -1185,7 +1248,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
port = info->port;
- dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
+ dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
__func__, port, info->desc,
readq_be(&global->fc_regs[port][FC_STATUS / 8]));
@@ -1198,7 +1261,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
__func__, port);
cfg->lr_state = LINK_RESET_REQUIRED;
cfg->lr_port = port;
- kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
}
@@ -1210,7 +1272,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
* should be the same and tracing one is sufficient.
*/
- dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
+ dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
__func__, port, reg);
writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
@@ -1219,13 +1281,11 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
if (info->action & SCAN_HOST) {
atomic_inc(&cfg->scan_host_needed);
- kref_get(&cfg->afu->mapcount);
schedule_work(&cfg->work_q);
}
}
out:
- dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
return IRQ_HANDLED;
}
@@ -1237,13 +1297,14 @@ out:
*/
static int start_context(struct cxlflash_cfg *cfg)
{
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
rc = cxl_start_context(cfg->mcctx,
cfg->afu->work.work_element_descriptor,
NULL);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -1256,7 +1317,8 @@ static int start_context(struct cxlflash_cfg *cfg)
*/
static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
- struct pci_dev *dev = cfg->dev;
+ struct device *dev = &cfg->dev->dev;
+ struct pci_dev *pdev = cfg->dev;
int rc = 0;
int ro_start, ro_size, i, j, k;
ssize_t vpd_size;
@@ -1265,10 +1327,10 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
/* Get the VPD data from the device */
- vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
+ vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
- dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
- __func__, vpd_size);
+ dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
+ __func__, vpd_size);
rc = -ENODEV;
goto out;
}
@@ -1277,8 +1339,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
PCI_VPD_LRDT_RO_DATA);
if (unlikely(ro_start < 0)) {
- dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
- __func__);
+ dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -1288,8 +1349,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
j = ro_size;
i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
if (unlikely((i + j) > vpd_size)) {
- pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
- __func__, (i + j), vpd_size);
+ dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
+ __func__, (i + j), vpd_size);
ro_size = vpd_size - i;
}
@@ -1307,8 +1368,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
if (unlikely(i < 0)) {
- dev_err(&dev->dev, "%s: Port %d WWPN not found "
- "in VPD\n", __func__, k);
+ dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
+ __func__, k);
rc = -ENODEV;
goto out;
}
@@ -1316,9 +1377,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
j = pci_vpd_info_field_size(&vpd_data[i]);
i += PCI_VPD_INFO_FLD_HDR_SIZE;
if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
- dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
- "VPD corrupt\n",
- __func__, k);
+ dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
+ __func__, k);
rc = -ENODEV;
goto out;
}
@@ -1326,15 +1386,15 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
if (unlikely(rc)) {
- dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
- "to integer\n", __func__, k);
+ dev_err(dev, "%s: WWPN conversion failed for port %d\n",
+ __func__, k);
rc = -ENODEV;
goto out;
}
}
out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -1388,12 +1448,18 @@ static int init_global(struct cxlflash_cfg *cfg)
goto out;
}
- pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
+ dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n",
+ __func__, wwpn[0], wwpn[1]);
- /* Set up RRQ in AFU for master issued cmds */
+ /* Set up RRQ and SQ in AFU for master issued cmds */
writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
+ if (afu_is_sq_cmd_mode(afu)) {
+ writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start);
+ writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end);
+ }
+
/* AFU configuration */
reg = readq_be(&afu->afu_map->global.regs.afu_config);
reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
@@ -1443,7 +1509,6 @@ static int init_global(struct cxlflash_cfg *cfg)
&afu->ctrl_map->ctx_cap);
/* Initialize heartbeat */
afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
-
out:
return rc;
}
@@ -1455,6 +1520,7 @@ out:
static int start_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
init_pcr(cfg);
@@ -1468,9 +1534,20 @@ static int start_afu(struct cxlflash_cfg *cfg)
afu->hrrq_curr = afu->hrrq_start;
afu->toggle = 1;
+ /* Initialize SQ */
+ if (afu_is_sq_cmd_mode(afu)) {
+ memset(&afu->sq, 0, sizeof(afu->sq));
+ afu->hsq_start = &afu->sq[0];
+ afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1];
+ afu->hsq_curr = afu->hsq_start;
+
+ spin_lock_init(&afu->hsq_slock);
+ atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
+ }
+
rc = init_global(cfg);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -1490,7 +1567,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
rc = cxl_allocate_afu_irqs(ctx, 3);
if (unlikely(rc)) {
- dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
+ dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc);
level = UNDO_NOOP;
goto out;
@@ -1499,8 +1576,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
"SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
- __func__);
+ dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
level = FREE_IRQ;
goto out;
}
@@ -1508,8 +1584,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
"SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
- __func__);
+ dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
level = UNMAP_ONE;
goto out;
}
@@ -1517,8 +1592,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
"SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
- __func__);
+ dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
level = UNMAP_TWO;
goto out;
}
@@ -1552,15 +1626,13 @@ static int init_mc(struct cxlflash_cfg *cfg)
/* During initialization reset the AFU to start from a clean slate */
rc = cxl_afu_reset(cfg->mcctx);
if (unlikely(rc)) {
- dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
- __func__, rc);
+ dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc);
goto ret;
}
level = init_intr(cfg, ctx);
if (unlikely(level)) {
- dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
- __func__, rc);
+ dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
goto out;
}
@@ -1575,7 +1647,7 @@ static int init_mc(struct cxlflash_cfg *cfg)
goto out;
}
ret:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
out:
term_intr(cfg, level);
@@ -1602,7 +1674,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
rc = init_mc(cfg);
if (rc) {
- dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
+ dev_err(dev, "%s: init_mc failed rc=%d\n",
__func__, rc);
goto out;
}
@@ -1610,11 +1682,10 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Map the entire MMIO space of the AFU */
afu->afu_map = cxl_psa_map(cfg->mcctx);
if (!afu->afu_map) {
- dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
+ dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
rc = -ENOMEM;
goto err1;
}
- kref_init(&afu->mapcount);
/* No byte reverse on reading afu_version or string will be backwards */
reg = readq(&afu->afu_map->global.regs.afu_version);
@@ -1622,24 +1693,28 @@ static int init_afu(struct cxlflash_cfg *cfg)
afu->interface_version =
readq_be(&afu->afu_map->global.regs.interface_version);
if ((afu->interface_version + 1) == 0) {
- pr_err("Back level AFU, please upgrade. AFU version %s "
- "interface version 0x%llx\n", afu->version,
+ dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
+ "interface version %016llx\n", afu->version,
afu->interface_version);
rc = -EINVAL;
- goto err2;
+ goto err1;
}
- afu->send_cmd = send_cmd_ioarrin;
- afu->context_reset = context_reset_ioarrin;
+ if (afu_is_sq_cmd_mode(afu)) {
+ afu->send_cmd = send_cmd_sq;
+ afu->context_reset = context_reset_sq;
+ } else {
+ afu->send_cmd = send_cmd_ioarrin;
+ afu->context_reset = context_reset_ioarrin;
+ }
- pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
- afu->version, afu->interface_version);
+ dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
+ afu->version, afu->interface_version);
rc = start_afu(cfg);
if (rc) {
- dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
- __func__, rc);
- goto err2;
+ dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
+ goto err1;
}
afu_err_intr_init(cfg->afu);
@@ -1649,11 +1724,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Restore the LUN mappings */
cxlflash_restore_luntable(cfg);
out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
-err2:
- kref_put(&afu->mapcount, afu_unmap);
err1:
term_intr(cfg, UNMAP_THREE);
term_mc(cfg);
@@ -1693,7 +1766,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
- pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
+ dev_dbg(dev, "%s: Sync not required state=%u\n",
+ __func__, cfg->state);
return 0;
}
@@ -1710,7 +1784,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
init_completion(&cmd->cevent);
cmd->parent = afu;
- pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+ dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
cmd->rcb.ctx_id = afu->ctx_hndl;
@@ -1735,7 +1809,7 @@ out:
atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
kfree(buf);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -1747,16 +1821,17 @@ out:
*/
static int afu_reset(struct cxlflash_cfg *cfg)
{
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
+
/* Stop the context before the reset. Since the context is
* no longer available restart it after the reset is complete
*/
-
term_afu(cfg);
rc = init_afu(cfg);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -1785,18 +1860,18 @@ static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
{
int rc = SUCCESS;
struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(host);
+ struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
int rcr = 0;
- pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
- host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+ dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
+ scp->device->channel, scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
retry:
switch (cfg->state) {
@@ -1813,7 +1888,7 @@ retry:
break;
}
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -1835,16 +1910,16 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
int rc = SUCCESS;
int rcr = 0;
struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(host);
+ struct device *dev = &cfg->dev->dev;
- pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
- host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+ dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
+ scp->device->channel, scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
switch (cfg->state) {
case STATE_NORMAL:
@@ -1870,7 +1945,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
break;
}
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -1936,8 +2011,7 @@ static ssize_t port0_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
struct afu *afu = cfg->afu;
return cxlflash_show_port_status(0, afu, buf);
@@ -1955,8 +2029,7 @@ static ssize_t port1_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
struct afu *afu = cfg->afu;
return cxlflash_show_port_status(1, afu, buf);
@@ -1973,8 +2046,7 @@ static ssize_t port1_show(struct device *dev,
static ssize_t lun_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
struct afu *afu = cfg->afu;
return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
@@ -2007,7 +2079,7 @@ static ssize_t lun_mode_store(struct device *dev,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(shost);
struct afu *afu = cfg->afu;
int rc;
u32 lun_mode;
@@ -2069,7 +2141,7 @@ static ssize_t cxlflash_show_port_lun_table(u32 port,
for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "%03d: %016llX\n", i, readq_be(&fc_port[i]));
+ "%03d: %016llx\n", i, readq_be(&fc_port[i]));
return bytes;
}
@@ -2085,8 +2157,7 @@ static ssize_t port0_lun_table_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
struct afu *afu = cfg->afu;
return cxlflash_show_port_lun_table(0, afu, buf);
@@ -2104,8 +2175,7 @@ static ssize_t port1_lun_table_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
struct afu *afu = cfg->afu;
return cxlflash_show_port_lun_table(1, afu, buf);
@@ -2250,7 +2320,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
scsi_scan_host(cfg->host);
- kref_put(&afu->mapcount, afu_unmap);
}
/**
@@ -2265,6 +2334,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
{
struct Scsi_Host *host;
struct cxlflash_cfg *cfg = NULL;
+ struct device *dev = &pdev->dev;
struct dev_dependent_vals *ddv;
int rc = 0;
@@ -2276,8 +2346,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
if (!host) {
- dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
- __func__);
+ dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
rc = -ENOMEM;
goto out;
}
@@ -2288,12 +2357,11 @@ static int cxlflash_probe(struct pci_dev *pdev,
host->unique_id = host->host_no;
host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
- cfg = (struct cxlflash_cfg *)host->hostdata;
+ cfg = shost_priv(host);
cfg->host = host;
rc = alloc_mem(cfg);
if (rc) {
- dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
- __func__);
+ dev_err(dev, "%s: alloc_mem failed\n", __func__);
rc = -ENOMEM;
scsi_host_put(cfg->host);
goto out;
@@ -2334,30 +2402,27 @@ static int cxlflash_probe(struct pci_dev *pdev,
rc = init_pci(cfg);
if (rc) {
- dev_err(&pdev->dev, "%s: call to init_pci "
- "failed rc=%d!\n", __func__, rc);
+ dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
goto out_remove;
}
cfg->init_state = INIT_STATE_PCI;
rc = init_afu(cfg);
if (rc) {
- dev_err(&pdev->dev, "%s: call to init_afu "
- "failed rc=%d!\n", __func__, rc);
+ dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
goto out_remove;
}
cfg->init_state = INIT_STATE_AFU;
rc = init_scsi(cfg);
if (rc) {
- dev_err(&pdev->dev, "%s: call to init_scsi "
- "failed rc=%d!\n", __func__, rc);
+ dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
goto out_remove;
}
cfg->init_state = INIT_STATE_SCSI;
out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
out_remove:
@@ -2395,7 +2460,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
drain_ioctls(cfg);
rc = cxlflash_mark_contexts_error(cfg);
if (unlikely(rc))
- dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
+ dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
__func__, rc);
term_afu(cfg);
return PCI_ERS_RESULT_NEED_RESET;
@@ -2429,7 +2494,7 @@ static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
rc = init_afu(cfg);
if (unlikely(rc)) {
- dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
+ dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2477,8 +2542,6 @@ static struct pci_driver cxlflash_driver = {
*/
static int __init init_cxlflash(void)
{
- pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
-
cxlflash_list_init();
return pci_register_driver(&cxlflash_driver);
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 1a2d09c148b3..a6e48a893fef 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,10 @@ struct sisl_ioarcb {
u16 timeout; /* in units specified by req_flags */
u32 rsvd1;
u8 cdb[16]; /* must be in big endian */
- u64 reserved; /* Reserved area */
+ union {
+ u64 reserved; /* Reserved for IOARRIN mode */
+ struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
+ };
} __packed;
struct sisl_rc {
@@ -260,6 +263,11 @@ struct sisl_host_map {
__be64 cmd_room;
__be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
__be64 mbox_w; /* restricted use */
+ __be64 sq_start; /* Submission Queue (R/W): write sequence and */
+ __be64 sq_end; /* inclusion semantics are the same as RRQ */
+ __be64 sq_head; /* Submission Queue Head (R): for debugging */
+ __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */
+ __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */
};
/* per context provisioning & control MMIO */
@@ -348,6 +356,15 @@ struct sisl_global_regs {
__be64 rsvd[0xf8];
__le64 afu_version;
__be64 interface_version;
+#define SISL_INTVER_CAP_SHIFT 16
+#define SISL_INTVER_MAJ_SHIFT 8
+#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL
+#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL
+#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL
+#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL
+#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
+#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
+#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
};
#define CXLFLASH_NUM_FC_PORTS 2
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 9636970d9611..90869cee2b20 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -212,7 +212,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
}
out:
- dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
+ dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
"ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
ctx_ctrl);
@@ -260,7 +260,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
writeq_be(val, &ctrl_map->ctx_cap);
val = readq_be(&ctrl_map->ctx_cap);
if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
- dev_err(dev, "%s: ctx may be closed val=%016llX\n",
+ dev_err(dev, "%s: ctx may be closed val=%016llx\n",
__func__, val);
rc = -EAGAIN;
goto out;
@@ -302,7 +302,7 @@ out:
*/
static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct glun_info *gli = lli->parent;
u8 *cmd_buf = NULL;
@@ -326,7 +326,7 @@ retry:
scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
- dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
+ dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
retry_cnt ? "re" : "", scsi_cmd[0]);
/* Drop the ioctl read semahpore across lengthy call */
@@ -336,7 +336,7 @@ retry:
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
- dev_err(dev, "%s: Failed state! result=0x08%X\n",
+ dev_err(dev, "%s: Failed state result=%08x\n",
__func__, result);
rc = -ENODEV;
goto out;
@@ -378,7 +378,7 @@ retry:
}
if (result) {
- dev_err(dev, "%s: command failed, result=0x%x\n",
+ dev_err(dev, "%s: command failed, result=%08x\n",
__func__, result);
rc = -EIO;
goto out;
@@ -415,29 +415,32 @@ out:
struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
struct llun_info *lli)
{
+ struct cxlflash_cfg *cfg = ctxi->cfg;
+ struct device *dev = &cfg->dev->dev;
struct sisl_rht_entry *rhte = NULL;
if (unlikely(!ctxi->rht_start)) {
- pr_debug("%s: Context does not have allocated RHT!\n",
+ dev_dbg(dev, "%s: Context does not have allocated RHT\n",
__func__);
goto out;
}
if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
- pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
+ dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
+ __func__, rhndl);
goto out;
}
if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
- pr_debug("%s: Bad resource handle LUN! (%d)\n",
- __func__, rhndl);
+ dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
+ __func__, rhndl);
goto out;
}
rhte = &ctxi->rht_start[rhndl];
if (unlikely(rhte->nmask == 0)) {
- pr_debug("%s: Unopened resource handle! (%d)\n",
- __func__, rhndl);
+ dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
+ __func__, rhndl);
rhte = NULL;
goto out;
}
@@ -456,6 +459,8 @@ out:
struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
struct llun_info *lli)
{
+ struct cxlflash_cfg *cfg = ctxi->cfg;
+ struct device *dev = &cfg->dev->dev;
struct sisl_rht_entry *rhte = NULL;
int i;
@@ -470,7 +475,7 @@ struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
if (likely(rhte))
ctxi->rht_lun[i] = lli;
- pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
+ dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
return rhte;
}
@@ -547,7 +552,7 @@ int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
if (gli->mode == MODE_NONE)
gli->mode = mode;
else if (gli->mode != mode) {
- pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
+ pr_debug("%s: gli_mode=%d requested_mode=%d\n",
__func__, gli->mode, mode);
rc = -EINVAL;
goto out;
@@ -605,7 +610,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
struct ctx_info *ctxi,
struct dk_cxlflash_release *release)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
@@ -622,13 +627,13 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
struct sisl_rht_entry *rhte;
struct sisl_rht_entry_f1 *rhte_f1;
- dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
+ dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
__func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
if (!ctxi) {
ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context! (%llu)\n",
+ dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
__func__, ctxid);
rc = -EINVAL;
goto out;
@@ -639,7 +644,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
rhte = get_rhte(ctxi, rhndl, lli);
if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+ dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
__func__, rhndl);
rc = -EINVAL;
goto out;
@@ -758,13 +763,13 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
if (unlikely(!ctxi || !lli || !ws)) {
- dev_err(dev, "%s: Unable to allocate context!\n", __func__);
+ dev_err(dev, "%s: Unable to allocate context\n", __func__);
goto err;
}
rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
if (unlikely(!rhte)) {
- dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
+ dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
goto err;
}
@@ -858,7 +863,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
struct ctx_info *ctxi,
struct dk_cxlflash_detach *detach)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct lun_access *lun_access, *t;
@@ -875,7 +880,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
if (!ctxi) {
ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context! (%llu)\n",
+ dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
__func__, ctxid);
rc = -EINVAL;
goto out;
@@ -964,7 +969,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
ctxid = cxl_process_element(ctx);
if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed! (%d)\n",
+ dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
goto out;
}
@@ -973,18 +978,18 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
if (unlikely(!ctxi)) {
ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
if (!ctxi) {
- dev_dbg(dev, "%s: Context %d already free!\n",
+ dev_dbg(dev, "%s: ctxid=%d already free\n",
__func__, ctxid);
goto out_release;
}
- dev_dbg(dev, "%s: Another process owns context %d!\n",
+ dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
__func__, ctxid);
put_context(ctxi);
goto out;
}
- dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid);
+ dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
detach.context_id = ctxi->ctxid;
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
@@ -1011,17 +1016,20 @@ static void unmap_context(struct ctx_info *ctxi)
/**
* get_err_page() - obtains and allocates the error notification page
+ * @cfg: Internal structure associated with the host.
*
* Return: error notification page on success, NULL on failure
*/
-static struct page *get_err_page(void)
+static struct page *get_err_page(struct cxlflash_cfg *cfg)
{
struct page *err_page = global.err_page;
+ struct device *dev = &cfg->dev->dev;
if (unlikely(!err_page)) {
err_page = alloc_page(GFP_KERNEL);
if (unlikely(!err_page)) {
- pr_err("%s: Unable to allocate err_page!\n", __func__);
+ dev_err(dev, "%s: Unable to allocate err_page\n",
+ __func__);
goto out;
}
@@ -1039,7 +1047,7 @@ static struct page *get_err_page(void)
}
out:
- pr_debug("%s: returning err_page=%p\n", __func__, err_page);
+ dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
return err_page;
}
@@ -1074,14 +1082,14 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ctxid = cxl_process_element(ctx);
if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed! (%d)\n",
+ dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
goto err;
}
ctxi = get_context(cfg, ctxid, file, ctrl);
if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+ dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
goto err;
}
@@ -1091,13 +1099,12 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
} else {
- dev_dbg(dev, "%s: err recovery active, use err_page!\n",
+ dev_dbg(dev, "%s: err recovery active, use err_page\n",
__func__);
- err_page = get_err_page();
+ err_page = get_err_page(cfg);
if (unlikely(!err_page)) {
- dev_err(dev, "%s: Could not obtain error page!\n",
- __func__);
+ dev_err(dev, "%s: Could not get err_page\n", __func__);
rc = VM_FAULT_RETRY;
goto out;
}
@@ -1147,7 +1154,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
ctxid = cxl_process_element(ctx);
if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed! (%d)\n",
+ dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
rc = -EIO;
goto out;
@@ -1155,7 +1162,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
ctxi = get_context(cfg, ctxid, file, ctrl);
if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+ dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
rc = -EIO;
goto out;
}
@@ -1251,7 +1258,7 @@ retry:
break;
goto retry;
case STATE_FAILTERM:
- dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
+ dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
rc = -ENODEV;
break;
default:
@@ -1276,7 +1283,7 @@ retry:
static int cxlflash_disk_attach(struct scsi_device *sdev,
struct dk_cxlflash_attach *attach)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
@@ -1287,6 +1294,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
int rc = 0;
u32 perms;
int ctxid = -1;
+ u64 flags = 0UL;
u64 rctxid = 0UL;
struct file *file = NULL;
@@ -1302,24 +1310,24 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
}
if (gli->max_lba == 0) {
- dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
+ dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
__func__, lli->lun_id[sdev->channel]);
rc = read_cap16(sdev, lli);
if (rc) {
- dev_err(dev, "%s: Invalid device! (%d)\n",
+ dev_err(dev, "%s: Invalid device rc=%d\n",
__func__, rc);
rc = -ENODEV;
goto out;
}
- dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
- dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
+ dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
+ dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
}
if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
rctxid = attach->context_id;
ctxi = get_context(cfg, rctxid, NULL, 0);
if (!ctxi) {
- dev_dbg(dev, "%s: Bad context! (%016llX)\n",
+ dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
__func__, rctxid);
rc = -EINVAL;
goto out;
@@ -1327,7 +1335,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
list_for_each_entry(lun_access, &ctxi->luns, list)
if (lun_access->lli == lli) {
- dev_dbg(dev, "%s: Already attached!\n",
+ dev_dbg(dev, "%s: Already attached\n",
__func__);
rc = -EINVAL;
goto out;
@@ -1336,13 +1344,13 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
rc = scsi_device_get(sdev);
if (unlikely(rc)) {
- dev_err(dev, "%s: Unable to get sdev reference!\n", __func__);
+ dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
goto out;
}
lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
if (unlikely(!lun_access)) {
- dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
+ dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
rc = -ENOMEM;
goto err;
}
@@ -1352,7 +1360,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
/* Non-NULL context indicates reuse (another context reference) */
if (ctxi) {
- dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
+ dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
__func__, rctxid);
kref_get(&ctxi->kref);
list_add(&lun_access->list, &ctxi->luns);
@@ -1361,7 +1369,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
ctxi = create_context(cfg);
if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Failed to create context! (%d)\n",
+ dev_err(dev, "%s: Failed to create context ctxid=%d\n",
__func__, ctxid);
goto err;
}
@@ -1387,7 +1395,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
ctxid = cxl_process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+ dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err;
}
@@ -1426,10 +1434,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
out_attach:
if (fd != -1)
- attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD;
- else
- attach->hdr.return_flags = 0;
+ flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
+ if (afu_is_sq_cmd_mode(afu))
+ flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
+ attach->hdr.return_flags = flags;
attach->context_id = ctxi->ctxid;
attach->block_size = gli->blk_len;
attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
@@ -1520,7 +1529,7 @@ static int recover_context(struct cxlflash_cfg *cfg,
ctxid = cxl_process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+ dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err2;
}
@@ -1611,12 +1620,13 @@ err1:
static int cxlflash_afu_recover(struct scsi_device *sdev,
struct dk_cxlflash_recover_afu *recover)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct afu *afu = cfg->afu;
struct ctx_info *ctxi = NULL;
struct mutex *mutex = &cfg->ctx_recovery_mutex;
+ u64 flags;
u64 ctxid = DECODE_CTXID(recover->context_id),
rctxid = recover->context_id;
long reg;
@@ -1632,19 +1642,19 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
goto out;
rc = check_state(cfg);
if (rc) {
- dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
+ dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
rc = -ENODEV;
goto out;
}
- dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
+ dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
__func__, recover->reason, rctxid);
retry:
/* Ensure that this process is attached to the context */
ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
rc = -EINVAL;
goto out;
}
@@ -1653,12 +1663,12 @@ retry:
retry_recover:
rc = recover_context(cfg, ctxi, &new_adap_fd);
if (unlikely(rc)) {
- dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
+ dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
__func__, ctxid, rc);
if ((rc == -ENODEV) &&
((atomic_read(&cfg->recovery_threads) > 1) ||
(lretry--))) {
- dev_dbg(dev, "%s: Going to try again!\n",
+ dev_dbg(dev, "%s: Going to try again\n",
__func__);
mutex_unlock(mutex);
msleep(100);
@@ -1672,11 +1682,16 @@ retry_recover:
}
ctxi->err_recovery_active = false;
+
+ flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
+ DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
+ if (afu_is_sq_cmd_mode(afu))
+ flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
+
+ recover->hdr.return_flags = flags;
recover->context_id = ctxi->ctxid;
recover->adap_fd = new_adap_fd;
recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
- DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
goto out;
}
@@ -1699,7 +1714,7 @@ retry_recover:
goto retry;
}
- dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
+ dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
out:
if (likely(ctxi))
put_context(ctxi);
@@ -1718,7 +1733,7 @@ out:
static int process_sense(struct scsi_device *sdev,
struct dk_cxlflash_verify *verify)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
@@ -1729,7 +1744,7 @@ static int process_sense(struct scsi_device *sdev,
rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
if (!rc) {
- dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
+ dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
rc = -EINVAL;
goto out;
}
@@ -1785,7 +1800,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
{
int rc = 0;
struct ctx_info *ctxi = NULL;
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
@@ -1795,20 +1810,20 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
rctxid = verify->context_id;
u64 last_lba = 0;
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
- "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
+ dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
+ "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
verify->hint, verify->hdr.flags);
ctxi = get_context(cfg, rctxid, lli, 0);
if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
rc = -EINVAL;
goto out;
}
rhte = get_rhte(ctxi, rhndl, lli);
if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+ dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
__func__, rhndl);
rc = -EINVAL;
goto out;
@@ -1855,7 +1870,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
out:
if (likely(ctxi))
put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
+ dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
__func__, rc, verify->last_lba);
return rc;
}
@@ -1907,7 +1922,7 @@ static char *decode_ioctl(int cmd)
*/
static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
@@ -1927,25 +1942,25 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
struct ctx_info *ctxi = NULL;
struct sisl_rht_entry *rhte = NULL;
- pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+ dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
if (unlikely(rc)) {
- dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
- __func__);
+ dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
goto out;
}
ctxi = get_context(cfg, rctxid, lli, 0);
if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
rc = -EINVAL;
goto err1;
}
rhte = rhte_checkout(ctxi, lli);
if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: too many opens for this context\n", __func__);
+ dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
+ __func__, ctxid);
rc = -EMFILE; /* too many opens */
goto err1;
}
@@ -1963,7 +1978,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
out:
if (likely(ctxi))
put_context(ctxi);
- dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
+ dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
__func__, rsrc_handle, rc, last_lba);
return rc;
@@ -1985,7 +2000,7 @@ err1:
*/
static int ioctl_common(struct scsi_device *sdev, int cmd)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
int rc = 0;
@@ -2002,7 +2017,7 @@ static int ioctl_common(struct scsi_device *sdev, int cmd)
case DK_CXLFLASH_VLUN_RESIZE:
case DK_CXLFLASH_RELEASE:
case DK_CXLFLASH_DETACH:
- dev_dbg(dev, "%s: Command override! (%d)\n",
+ dev_dbg(dev, "%s: Command override rc=%d\n",
__func__, rc);
rc = 0;
break;
@@ -2032,7 +2047,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
typedef int (*sioctl) (struct scsi_device *, void *);
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
struct dk_cxlflash_hdr *hdr;
@@ -2111,7 +2126,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
}
if (unlikely(copy_from_user(&buf, arg, size))) {
- dev_err(dev, "%s: copy_from_user() fail! "
+ dev_err(dev, "%s: copy_from_user() fail "
"size=%lu cmd=%d (%s) arg=%p\n",
__func__, size, cmd, decode_ioctl(cmd), arg);
rc = -EFAULT;
@@ -2127,7 +2142,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
}
if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
+ dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
rc = -EINVAL;
goto cxlflash_ioctl_exit;
}
@@ -2135,7 +2150,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
rc = do_ioctl(sdev, (void *)&buf);
if (likely(!rc))
if (unlikely(copy_to_user(arg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail! "
+ dev_err(dev, "%s: copy_to_user() fail "
"size=%lu cmd=%d (%s) arg=%p\n",
__func__, size, cmd, decode_ioctl(cmd), arg);
rc = -EFAULT;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 90c5d7f5278e..8fcc804dbef9 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -66,8 +66,8 @@ static int ba_init(struct ba_lun *ba_lun)
int last_word_underflow = 0;
u64 *lam;
- pr_debug("%s: Initializing LUN: lun_id = %llX, "
- "ba_lun->lsize = %lX, ba_lun->au_size = %lX\n",
+ pr_debug("%s: Initializing LUN: lun_id=%016llx "
+ "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
__func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
/* Calculate bit map size */
@@ -80,7 +80,7 @@ static int ba_init(struct ba_lun *ba_lun)
/* Allocate lun information container */
bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
if (unlikely(!bali)) {
- pr_err("%s: Failed to allocate lun_info for lun_id %llX\n",
+ pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
__func__, ba_lun->lun_id);
return -ENOMEM;
}
@@ -96,7 +96,7 @@ static int ba_init(struct ba_lun *ba_lun)
GFP_KERNEL);
if (unlikely(!bali->lun_alloc_map)) {
pr_err("%s: Failed to allocate lun allocation map: "
- "lun_id = %llX\n", __func__, ba_lun->lun_id);
+ "lun_id=%016llx\n", __func__, ba_lun->lun_id);
kfree(bali);
return -ENOMEM;
}
@@ -125,7 +125,7 @@ static int ba_init(struct ba_lun *ba_lun)
bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
GFP_KERNEL);
if (unlikely(!bali->aun_clone_map)) {
- pr_err("%s: Failed to allocate clone map: lun_id = %llX\n",
+ pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
__func__, ba_lun->lun_id);
kfree(bali->lun_alloc_map);
kfree(bali);
@@ -136,7 +136,7 @@ static int ba_init(struct ba_lun *ba_lun)
ba_lun->ba_lun_handle = bali;
pr_debug("%s: Successfully initialized the LUN: "
- "lun_id = %llX, bitmap size = %X, free_aun_cnt = %llX\n",
+ "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
__func__, ba_lun->lun_id, bali->lun_bmap_size,
bali->free_aun_cnt);
return 0;
@@ -165,10 +165,9 @@ static int find_free_range(u32 low,
num_bits = (sizeof(*lam) * BITS_PER_BYTE);
bit_pos = find_first_bit(lam, num_bits);
- pr_devel("%s: Found free bit %llX in LUN "
- "map entry %llX at bitmap index = %X\n",
- __func__, bit_pos, bali->lun_alloc_map[i],
- i);
+ pr_devel("%s: Found free bit %llu in LUN "
+ "map entry %016llx at bitmap index = %d\n",
+ __func__, bit_pos, bali->lun_alloc_map[i], i);
*bit_word = i;
bali->free_aun_cnt--;
@@ -194,11 +193,11 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
bali = ba_lun->ba_lun_handle;
pr_debug("%s: Received block allocation request: "
- "lun_id = %llX, free_aun_cnt = %llX\n",
+ "lun_id=%016llx free_aun_cnt=%llx\n",
__func__, ba_lun->lun_id, bali->free_aun_cnt);
if (bali->free_aun_cnt == 0) {
- pr_debug("%s: No space left on LUN: lun_id = %llX\n",
+ pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
__func__, ba_lun->lun_id);
return -1ULL;
}
@@ -212,7 +211,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
bali, &bit_word);
if (bit_pos == -1) {
pr_debug("%s: Could not find an allocation unit on LUN:"
- " lun_id = %llX\n", __func__, ba_lun->lun_id);
+ " lun_id=%016llx\n", __func__, ba_lun->lun_id);
return -1ULL;
}
}
@@ -223,8 +222,8 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
else
bali->free_curr_idx = bit_word;
- pr_debug("%s: Allocating AU number %llX, on lun_id %llX, "
- "free_aun_cnt = %llX\n", __func__,
+ pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
+ "free_aun_cnt=%llx\n", __func__,
((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
bali->free_aun_cnt);
@@ -266,18 +265,18 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
bali = ba_lun->ba_lun_handle;
if (validate_alloc(bali, to_free)) {
- pr_debug("%s: The AUN %llX is not allocated on lun_id %llX\n",
+ pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
__func__, to_free, ba_lun->lun_id);
return -1;
}
- pr_debug("%s: Received a request to free AU %llX on lun_id %llX, "
- "free_aun_cnt = %llX\n", __func__, to_free, ba_lun->lun_id,
+ pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
+ "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
bali->free_aun_cnt);
if (bali->aun_clone_map[to_free] > 0) {
- pr_debug("%s: AUN %llX on lun_id %llX has been cloned. Clone "
- "count = %X\n", __func__, to_free, ba_lun->lun_id,
+ pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
+ __func__, to_free, ba_lun->lun_id,
bali->aun_clone_map[to_free]);
bali->aun_clone_map[to_free]--;
return 0;
@@ -294,8 +293,8 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
else if (idx > bali->free_high_idx)
bali->free_high_idx = idx;
- pr_debug("%s: Successfully freed AU at bit_pos %X, bit map index %X on "
- "lun_id %llX, free_aun_cnt = %llX\n", __func__, bit_pos, idx,
+ pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
+ "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
ba_lun->lun_id, bali->free_aun_cnt);
return 0;
@@ -313,16 +312,16 @@ static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
struct ba_lun_info *bali = ba_lun->ba_lun_handle;
if (validate_alloc(bali, to_clone)) {
- pr_debug("%s: AUN %llX is not allocated on lun_id %llX\n",
+ pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
__func__, to_clone, ba_lun->lun_id);
return -1;
}
- pr_debug("%s: Received a request to clone AUN %llX on lun_id %llX\n",
+ pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
__func__, to_clone, ba_lun->lun_id);
if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
- pr_debug("%s: AUN %llX on lun_id %llX hit max clones already\n",
+ pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
__func__, to_clone, ba_lun->lun_id);
return -1;
}
@@ -433,7 +432,7 @@ static int write_same16(struct scsi_device *sdev,
u64 offset = lba;
int left = nblks;
u32 to = sdev->request_queue->rq_timeout;
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
@@ -459,7 +458,7 @@ static int write_same16(struct scsi_device *sdev,
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
- dev_err(dev, "%s: Failed state! result=0x08%X\n",
+ dev_err(dev, "%s: Failed state result=%08x\n",
__func__, result);
rc = -ENODEV;
goto out;
@@ -467,7 +466,7 @@ static int write_same16(struct scsi_device *sdev,
if (result) {
dev_err_ratelimited(dev, "%s: command failed for "
- "offset %lld result=0x%x\n",
+ "offset=%lld result=%08x\n",
__func__, offset, result);
rc = -EIO;
goto out;
@@ -480,7 +479,7 @@ out:
kfree(cmd_buf);
kfree(scsi_cmd);
kfree(sense_buf);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -508,6 +507,8 @@ static int grow_lxt(struct afu *afu,
struct sisl_rht_entry *rhte,
u64 *new_size)
{
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
@@ -527,7 +528,8 @@ static int grow_lxt(struct afu *afu,
mutex_lock(&blka->mutex);
av_size = ba_space(&blka->ba_lun);
if (unlikely(av_size <= 0)) {
- pr_debug("%s: ba_space error: av_size %d\n", __func__, av_size);
+ dev_dbg(dev, "%s: ba_space error av_size=%d\n",
+ __func__, av_size);
mutex_unlock(&blka->mutex);
rc = -ENOSPC;
goto out;
@@ -568,8 +570,8 @@ static int grow_lxt(struct afu *afu,
*/
aun = ba_alloc(&blka->ba_lun);
if ((aun == -1ULL) || (aun >= blka->nchunk))
- pr_debug("%s: ba_alloc error: allocated chunk# %llX, "
- "max %llX\n", __func__, aun, blka->nchunk - 1);
+ dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
+ "max=%llu\n", __func__, aun, blka->nchunk - 1);
/* select both ports, use r/w perms from RHT */
lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
@@ -599,7 +601,7 @@ static int grow_lxt(struct afu *afu,
kfree(lxt_old);
*new_size = my_new_size;
out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -621,6 +623,8 @@ static int shrink_lxt(struct afu *afu,
struct ctx_info *ctxi,
u64 *new_size)
{
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct sisl_lxt_entry *lxt, *lxt_old;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
@@ -706,7 +710,7 @@ static int shrink_lxt(struct afu *afu,
kfree(lxt_old);
*new_size = my_new_size;
out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -728,7 +732,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
struct ctx_info *ctxi,
struct dk_cxlflash_resize *resize)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
struct afu *afu = cfg->afu;
@@ -751,13 +756,13 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
- pr_debug("%s: ctxid=%llu rhndl=0x%llx, req_size=0x%llx,"
- "new_size=%llx\n", __func__, ctxid, resize->rsrc_handle,
- resize->req_size, new_size);
+ dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
+ __func__, ctxid, resize->rsrc_handle, resize->req_size,
+ new_size);
if (unlikely(gli->mode != MODE_VIRTUAL)) {
- pr_debug("%s: LUN mode does not support resize! (%d)\n",
- __func__, gli->mode);
+ dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
+ __func__, gli->mode);
rc = -EINVAL;
goto out;
@@ -766,7 +771,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
if (!ctxi) {
ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
if (unlikely(!ctxi)) {
- pr_debug("%s: Bad context! (%llu)\n", __func__, ctxid);
+ dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
+ __func__, ctxid);
rc = -EINVAL;
goto out;
}
@@ -776,7 +782,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
rhte = get_rhte(ctxi, rhndl, lli);
if (unlikely(!rhte)) {
- pr_debug("%s: Bad resource handle! (%u)\n", __func__, rhndl);
+ dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
+ __func__, rhndl);
rc = -EINVAL;
goto out;
}
@@ -794,8 +801,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
out:
if (put_ctx)
put_context(ctxi);
- pr_debug("%s: resized to %lld returning rc=%d\n",
- __func__, resize->last_lba, rc);
+ dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
+ __func__, resize->last_lba, rc);
return rc;
}
@@ -815,6 +822,7 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
u32 chan;
u32 lind;
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
struct sisl_global_map __iomem *agm = &afu->afu_map->global;
mutex_lock(&global.mutex);
@@ -828,15 +836,15 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
if (lli->port_sel == BOTH_PORTS) {
writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
- pr_debug("%s: Virtual LUN on slot %d id0=%llx, "
- "id1=%llx\n", __func__, lind,
- lli->lun_id[0], lli->lun_id[1]);
+ dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx "
+ "id1=%llx\n", __func__, lind,
+ lli->lun_id[0], lli->lun_id[1]);
} else {
chan = PORT2CHAN(lli->port_sel);
writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
- pr_debug("%s: Virtual LUN on slot %d chan=%d, "
- "id=%llx\n", __func__, lind, chan,
- lli->lun_id[chan]);
+ dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d "
+ "id=%llx\n", __func__, lind, chan,
+ lli->lun_id[chan]);
}
}
@@ -860,6 +868,7 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
u32 lind;
int rc = 0;
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
struct sisl_global_map __iomem *agm = &afu->afu_map->global;
mutex_lock(&global.mutex);
@@ -882,8 +891,8 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
cfg->promote_lun_index++;
- pr_debug("%s: Virtual LUN on slot %d id0=%llx, id1=%llx\n",
- __func__, lind, lli->lun_id[0], lli->lun_id[1]);
+ dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx id1=%llx\n",
+ __func__, lind, lli->lun_id[0], lli->lun_id[1]);
} else {
/*
* If this LUN is visible only from one port, we will put
@@ -898,14 +907,14 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
lind = lli->lun_index = cfg->last_lun_index[chan];
writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
cfg->last_lun_index[chan]--;
- pr_debug("%s: Virtual LUN on slot %d chan=%d, id=%llx\n",
- __func__, lind, chan, lli->lun_id[chan]);
+ dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d id=%llx\n",
+ __func__, lind, chan, lli->lun_id[chan]);
}
lli->in_table = true;
out:
mutex_unlock(&global.mutex);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
@@ -923,7 +932,7 @@ out:
*/
int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
@@ -942,14 +951,14 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
struct ctx_info *ctxi = NULL;
struct sisl_rht_entry *rhte = NULL;
- pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+ dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
/* Setup the LUNs block allocator on first call */
mutex_lock(&gli->mutex);
if (gli->mode == MODE_NONE) {
rc = init_vlun(lli);
if (rc) {
- dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
+ dev_err(dev, "%s: init_vlun failed rc=%d\n",
__func__, rc);
rc = -ENOMEM;
goto err0;
@@ -958,29 +967,28 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
if (unlikely(rc)) {
- dev_err(dev, "%s: Failed to attach to LUN! (VIRTUAL)\n",
- __func__);
+ dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
goto err0;
}
mutex_unlock(&gli->mutex);
rc = init_luntable(cfg, lli);
if (rc) {
- dev_err(dev, "%s: call to init_luntable failed rc=%d!\n",
- __func__, rc);
+ dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
goto err1;
}
ctxi = get_context(cfg, rctxid, lli, 0);
if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
rc = -EINVAL;
goto err1;
}
rhte = rhte_checkout(ctxi, lli);
if (unlikely(!rhte)) {
- dev_err(dev, "%s: too many opens for this context\n", __func__);
+ dev_err(dev, "%s: too many opens ctxid=%llu\n",
+ __func__, ctxid);
rc = -EMFILE; /* too many opens */
goto err1;
}
@@ -996,7 +1004,7 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
resize.rsrc_handle = rsrc_handle;
rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
if (rc) {
- dev_err(dev, "%s: resize failed rc %d\n", __func__, rc);
+ dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
goto err2;
}
last_lba = resize.last_lba;
@@ -1013,8 +1021,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
out:
if (likely(ctxi))
put_context(ctxi);
- pr_debug("%s: returning handle 0x%llx rc=%d llba %lld\n",
- __func__, rsrc_handle, rc, last_lba);
+ dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
+ __func__, rsrc_handle, rc, last_lba);
return rc;
err2:
@@ -1047,6 +1055,8 @@ static int clone_lxt(struct afu *afu,
struct sisl_rht_entry *rhte,
struct sisl_rht_entry *rhte_src)
{
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct device *dev = &cfg->dev->dev;
struct sisl_lxt_entry *lxt;
u32 ngrps;
u64 aun; /* chunk# allocated by block allocator */
@@ -1101,7 +1111,7 @@ static int clone_lxt(struct afu *afu,
cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- pr_debug("%s: returning\n", __func__);
+ dev_dbg(dev, "%s: returning\n", __func__);
return 0;
}
@@ -1120,7 +1130,8 @@ static int clone_lxt(struct afu *afu,
int cxlflash_disk_clone(struct scsi_device *sdev,
struct dk_cxlflash_clone *clone)
{
- struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+ struct device *dev = &cfg->dev->dev;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
struct blka *blka = &gli->blka;
@@ -1140,8 +1151,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
bool found;
LIST_HEAD(sidecar);
- pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
+ dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
+ __func__, ctxid_src, ctxid_dst);
/* Do not clone yourself */
if (unlikely(rctxid_src == rctxid_dst)) {
@@ -1151,16 +1162,16 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
if (unlikely(gli->mode != MODE_VIRTUAL)) {
rc = -EINVAL;
- pr_debug("%s: Clone not supported on physical LUNs! (%d)\n",
- __func__, gli->mode);
+ dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
+ __func__, gli->mode);
goto out;
}
ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
if (unlikely(!ctxi_src || !ctxi_dst)) {
- pr_debug("%s: Bad context! (%llu,%llu)\n", __func__,
- ctxid_src, ctxid_dst);
+ dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
+ __func__, ctxid_src, ctxid_dst);
rc = -EINVAL;
goto out;
}
@@ -1185,8 +1196,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
lun_access_dst = kzalloc(sizeof(*lun_access_dst),
GFP_KERNEL);
if (unlikely(!lun_access_dst)) {
- pr_err("%s: Unable to allocate lun_access!\n",
- __func__);
+ dev_err(dev, "%s: lun_access allocation fail\n",
+ __func__);
rc = -ENOMEM;
goto out;
}
@@ -1197,7 +1208,7 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
}
if (unlikely(!ctxi_src->rht_out)) {
- pr_debug("%s: Nothing to clone!\n", __func__);
+ dev_dbg(dev, "%s: Nothing to clone\n", __func__);
goto out_success;
}
@@ -1256,7 +1267,7 @@ out:
put_context(ctxi_src);
if (ctxi_dst)
put_context(ctxi_dst);
- pr_debug("%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
err:
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 5b80746980b8..4a7679f6c73d 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -88,12 +88,6 @@ struct clariion_dh_data {
*/
unsigned char buffer[CLARIION_BUFFER_SIZE];
/*
- * SCSI sense buffer for commands -- assumes serial issuance
- * and completion sequence of all commands for same multipath.
- */
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
- unsigned int senselen;
- /*
* LUN state
*/
int lun_state;
@@ -116,44 +110,38 @@ struct clariion_dh_data {
/*
* Parse MODE_SELECT cmd reply.
*/
-static int trespass_endio(struct scsi_device *sdev, char *sense)
+static int trespass_endio(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sshdr)
{
int err = SCSI_DH_IO;
- struct scsi_sense_hdr sshdr;
-
- if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
- sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
- "0x%2x, 0x%2x while sending CLARiiON trespass "
- "command.\n", CLARIION_NAME, sshdr.sense_key,
- sshdr.asc, sshdr.ascq);
- if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
- (sshdr.ascq == 0x00)) {
- /*
- * Array based copy in progress -- do not send
- * mode_select or copy will be aborted mid-stream.
- */
- sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
- "progress while sending CLARiiON trespass "
- "command.\n", CLARIION_NAME);
- err = SCSI_DH_DEV_TEMP_BUSY;
- } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
- (sshdr.ascq == 0x03)) {
- /*
- * LUN Not Ready - Manual Intervention Required
- * indicates in-progress ucode upgrade (NDU).
- */
- sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
- "ucode upgrade NDU operation while sending "
- "CLARiiON trespass command.\n", CLARIION_NAME);
- err = SCSI_DH_DEV_TEMP_BUSY;
- } else
- err = SCSI_DH_DEV_FAILED;
- } else {
- sdev_printk(KERN_INFO, sdev,
- "%s: failed to send MODE SELECT, no sense available\n",
- CLARIION_NAME);
- }
+ sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
+ "0x%2x, 0x%2x while sending CLARiiON trespass "
+ "command.\n", CLARIION_NAME, sshdr->sense_key,
+ sshdr->asc, sshdr->ascq);
+
+ if (sshdr->sense_key == 0x05 && sshdr->asc == 0x04 &&
+ sshdr->ascq == 0x00) {
+ /*
+ * Array based copy in progress -- do not send
+ * mode_select or copy will be aborted mid-stream.
+ */
+ sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
+ "progress while sending CLARiiON trespass "
+ "command.\n", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else if (sshdr->sense_key == 0x02 && sshdr->asc == 0x04 &&
+ sshdr->ascq == 0x03) {
+ /*
+ * LUN Not Ready - Manual Intervention Required
+ * indicates in-progress ucode upgrade (NDU).
+ */
+ sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
+ "ucode upgrade NDU operation while sending "
+ "CLARiiON trespass command.\n", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else
+ err = SCSI_DH_DEV_FAILED;
return err;
}
@@ -257,103 +245,15 @@ out:
return sp_model;
}
-/*
- * Get block request for REQ_BLOCK_PC command issued to path. Currently
- * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
- *
- * Uses data and sense buffers in hardware handler context structure and
- * assumes serial servicing of commands, both issuance and completion.
- */
-static struct request *get_req(struct scsi_device *sdev, int cmd,
- unsigned char *buffer)
-{
- struct request *rq;
- int len = 0;
-
- rq = blk_get_request(sdev->request_queue,
- (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
- if (IS_ERR(rq)) {
- sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
- return NULL;
- }
-
- blk_rq_set_block_pc(rq);
- rq->cmd_len = COMMAND_SIZE(cmd);
- rq->cmd[0] = cmd;
-
- switch (cmd) {
- case MODE_SELECT:
- len = sizeof(short_trespass);
- rq->cmd[1] = 0x10;
- rq->cmd[4] = len;
- break;
- case MODE_SELECT_10:
- len = sizeof(long_trespass);
- rq->cmd[1] = 0x10;
- rq->cmd[8] = len;
- break;
- case INQUIRY:
- len = CLARIION_BUFFER_SIZE;
- rq->cmd[4] = len;
- memset(buffer, 0, len);
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- rq->timeout = CLARIION_TIMEOUT;
- rq->retries = CLARIION_RETRIES;
-
- if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
- blk_put_request(rq);
- return NULL;
- }
-
- return rq;
-}
-
-static int send_inquiry_cmd(struct scsi_device *sdev, int page,
- struct clariion_dh_data *csdev)
-{
- struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
- int err;
-
- if (!rq)
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq->sense = csdev->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = csdev->senselen = 0;
-
- rq->cmd[0] = INQUIRY;
- if (page != 0) {
- rq->cmd[1] = 1;
- rq->cmd[2] = page;
- }
- err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
- if (err == -EIO) {
- sdev_printk(KERN_INFO, sdev,
- "%s: failed to send %s INQUIRY: %x\n",
- CLARIION_NAME, page?"EVPD":"standard",
- rq->errors);
- csdev->senselen = rq->sense_len;
- err = SCSI_DH_IO;
- }
-
- blk_put_request(rq);
-
- return err;
-}
-
static int send_trespass_cmd(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- struct request *rq;
unsigned char *page22;
- int err, len, cmd;
+ unsigned char cdb[COMMAND_SIZE(MODE_SELECT)];
+ int err, res = SCSI_DH_OK, len;
+ struct scsi_sense_hdr sshdr;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
page22 = short_trespass;
@@ -361,40 +261,37 @@ static int send_trespass_cmd(struct scsi_device *sdev,
/* Set Honor Reservations bit */
page22[6] |= 0x80;
len = sizeof(short_trespass);
- cmd = MODE_SELECT;
+ cdb[0] = MODE_SELECT;
+ cdb[1] = 0x10;
+ cdb[4] = len;
} else {
page22 = long_trespass;
if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
/* Set Honor Reservations bit */
page22[10] |= 0x80;
len = sizeof(long_trespass);
- cmd = MODE_SELECT_10;
+ cdb[0] = MODE_SELECT_10;
+ cdb[8] = len;
}
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
- rq = get_req(sdev, cmd, csdev->buffer);
- if (!rq)
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq->sense = csdev->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = csdev->senselen = 0;
-
- err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
- if (err == -EIO) {
- if (rq->sense_len) {
- err = trespass_endio(sdev, csdev->sense);
- } else {
+ err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
+ csdev->buffer, len, &sshdr,
+ CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
+ NULL, req_flags, 0);
+ if (err) {
+ if (scsi_sense_valid(&sshdr))
+ res = trespass_endio(sdev, &sshdr);
+ else {
sdev_printk(KERN_INFO, sdev,
"%s: failed to send MODE SELECT: %x\n",
- CLARIION_NAME, rq->errors);
+ CLARIION_NAME, err);
+ res = SCSI_DH_IO;
}
}
- blk_put_request(rq);
-
- return err;
+ return res;
}
static int clariion_check_sense(struct scsi_device *sdev,
@@ -464,21 +361,7 @@ static int clariion_std_inquiry(struct scsi_device *sdev,
int err;
char *sp_model;
- err = send_inquiry_cmd(sdev, 0, csdev);
- if (err != SCSI_DH_OK && csdev->senselen) {
- struct scsi_sense_hdr sshdr;
-
- if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
- &sshdr)) {
- sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
- "%02x/%02x/%02x\n", CLARIION_NAME,
- sshdr.sense_key, sshdr.asc, sshdr.ascq);
- }
- err = SCSI_DH_IO;
- goto out;
- }
-
- sp_model = parse_sp_model(sdev, csdev->buffer);
+ sp_model = parse_sp_model(sdev, sdev->inquiry);
if (!sp_model) {
err = SCSI_DH_DEV_UNSUPP;
goto out;
@@ -500,30 +383,12 @@ out:
static int clariion_send_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- int err, retry = CLARIION_RETRIES;
-
-retry:
- err = send_inquiry_cmd(sdev, 0xC0, csdev);
- if (err != SCSI_DH_OK && csdev->senselen) {
- struct scsi_sense_hdr sshdr;
-
- err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
- &sshdr);
- if (!err)
- return SCSI_DH_IO;
-
- err = clariion_check_sense(sdev, &sshdr);
- if (retry > 0 && err == ADD_TO_MLQUEUE) {
- retry--;
- goto retry;
- }
- sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
- "%02x/%02x/%02x\n", CLARIION_NAME,
- sshdr.sense_key, sshdr.asc, sshdr.ascq);
- err = SCSI_DH_IO;
- } else {
+ int err = SCSI_DH_IO;
+
+ if (!scsi_get_vpd_page(sdev, 0xC0, csdev->buffer,
+ CLARIION_BUFFER_SIZE))
err = parse_sp_info_reply(sdev, csdev);
- }
+
return err;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 308e87195dc1..be43c940636d 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -38,13 +38,10 @@
#define HP_SW_PATH_PASSIVE 1
struct hp_sw_dh_data {
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
int path_state;
int retries;
int retry_cnt;
struct scsi_device *sdev;
- activate_complete callback_fn;
- void *callback_data;
};
static int hp_sw_start_stop(struct hp_sw_dh_data *);
@@ -56,43 +53,34 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *);
*
* Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
*/
-static int tur_done(struct scsi_device *sdev, unsigned char *sense)
+static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
+ struct scsi_sense_hdr *sshdr)
{
- struct scsi_sense_hdr sshdr;
- int ret;
+ int ret = SCSI_DH_IO;
- ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
- if (!ret) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending tur failed, no sense available\n",
- HP_SW_NAME);
- ret = SCSI_DH_IO;
- goto done;
- }
- switch (sshdr.sense_key) {
+ switch (sshdr->sense_key) {
case UNIT_ATTENTION:
ret = SCSI_DH_IMM_RETRY;
break;
case NOT_READY:
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
+ if (sshdr->asc == 0x04 && sshdr->ascq == 2) {
/*
* LUN not ready - Initialization command required
*
* This is the passive path
*/
- ret = SCSI_DH_DEV_OFFLINED;
+ h->path_state = HP_SW_PATH_PASSIVE;
+ ret = SCSI_DH_OK;
break;
}
/* Fallthrough */
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed, sense %x/%x/%x\n",
- HP_SW_NAME, sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
+ HP_SW_NAME, sshdr->sense_key, sshdr->asc,
+ sshdr->ascq);
break;
}
-
-done:
return ret;
}
@@ -105,131 +93,36 @@ done:
*/
static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
{
- struct request *req;
- int ret;
+ unsigned char cmd[6] = { TEST_UNIT_READY };
+ struct scsi_sense_hdr sshdr;
+ int ret = SCSI_DH_OK, res;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
retry:
- req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
- if (IS_ERR(req))
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
- req->cmd[0] = TEST_UNIT_READY;
- req->timeout = HP_SW_TIMEOUT;
- req->sense = h->sense;
- memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
- req->sense_len = 0;
-
- ret = blk_execute_rq(req->q, NULL, req, 1);
- if (ret == -EIO) {
- if (req->sense_len > 0) {
- ret = tur_done(sdev, h->sense);
- } else {
+ res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
+ HP_SW_TIMEOUT, HP_SW_RETRIES,
+ NULL, req_flags, 0);
+ if (res) {
+ if (scsi_sense_valid(&sshdr))
+ ret = tur_done(sdev, h, &sshdr);
+ else {
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed with %x\n",
- HP_SW_NAME, req->errors);
+ HP_SW_NAME, res);
ret = SCSI_DH_IO;
}
} else {
h->path_state = HP_SW_PATH_ACTIVE;
ret = SCSI_DH_OK;
}
- if (ret == SCSI_DH_IMM_RETRY) {
- blk_put_request(req);
+ if (ret == SCSI_DH_IMM_RETRY)
goto retry;
- }
- if (ret == SCSI_DH_DEV_OFFLINED) {
- h->path_state = HP_SW_PATH_PASSIVE;
- ret = SCSI_DH_OK;
- }
-
- blk_put_request(req);
return ret;
}
/*
- * start_done - Handle START STOP UNIT return status
- * @sdev: sdev the command has been sent to
- * @errors: blk error code
- */
-static int start_done(struct scsi_device *sdev, unsigned char *sense)
-{
- struct scsi_sense_hdr sshdr;
- int rc;
-
- rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
- if (!rc) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending start_stop_unit failed, "
- "no sense available\n",
- HP_SW_NAME);
- return SCSI_DH_IO;
- }
- switch (sshdr.sense_key) {
- case NOT_READY:
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
- /*
- * LUN not ready - manual intervention required
- *
- * Switch-over in progress, retry.
- */
- rc = SCSI_DH_RETRY;
- break;
- }
- /* fall through */
- default:
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
- HP_SW_NAME, sshdr.sense_key, sshdr.asc,
- sshdr.ascq);
- rc = SCSI_DH_IO;
- }
-
- return rc;
-}
-
-static void start_stop_endio(struct request *req, int error)
-{
- struct hp_sw_dh_data *h = req->end_io_data;
- unsigned err = SCSI_DH_OK;
-
- if (error || host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE) {
- sdev_printk(KERN_WARNING, h->sdev,
- "%s: sending start_stop_unit failed with %x\n",
- HP_SW_NAME, req->errors);
- err = SCSI_DH_IO;
- goto done;
- }
-
- if (req->sense_len > 0) {
- err = start_done(h->sdev, h->sense);
- if (err == SCSI_DH_RETRY) {
- err = SCSI_DH_IO;
- if (--h->retry_cnt) {
- blk_put_request(req);
- err = hp_sw_start_stop(h);
- if (err == SCSI_DH_OK)
- return;
- }
- }
- }
-done:
- req->end_io_data = NULL;
- __blk_put_request(req->q, req);
- if (h->callback_fn) {
- h->callback_fn(h->callback_data, err);
- h->callback_fn = h->callback_data = NULL;
- }
- return;
-
-}
-
-/*
* hp_sw_start_stop - Send START STOP UNIT command
* @sdev: sdev command should be sent to
*
@@ -237,26 +130,48 @@ done:
*/
static int hp_sw_start_stop(struct hp_sw_dh_data *h)
{
- struct request *req;
-
- req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
- if (IS_ERR(req))
- return SCSI_DH_RES_TEMP_UNAVAIL;
-
- blk_rq_set_block_pc(req);
- req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- req->cmd_len = COMMAND_SIZE(START_STOP);
- req->cmd[0] = START_STOP;
- req->cmd[4] = 1; /* Start spin cycle */
- req->timeout = HP_SW_TIMEOUT;
- req->sense = h->sense;
- memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
- req->sense_len = 0;
- req->end_io_data = h;
+ unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 };
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdev = h->sdev;
+ int res, rc = SCSI_DH_OK;
+ int retry_cnt = HP_SW_RETRIES;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
- blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio);
- return SCSI_DH_OK;
+retry:
+ res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
+ HP_SW_TIMEOUT, HP_SW_RETRIES,
+ NULL, req_flags, 0);
+ if (res) {
+ if (!scsi_sense_valid(&sshdr)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "no sense available\n", HP_SW_NAME);
+ return SCSI_DH_IO;
+ }
+ switch (sshdr.sense_key) {
+ case NOT_READY:
+ if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ if (--retry_cnt)
+ goto retry;
+ rc = SCSI_DH_RETRY;
+ break;
+ }
+ /* fall through */
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "sense %x/%x/%x\n", HP_SW_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ rc = SCSI_DH_IO;
+ }
+ }
+ return rc;
}
static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
@@ -290,15 +205,8 @@ static int hp_sw_activate(struct scsi_device *sdev,
ret = hp_sw_tur(sdev, h);
- if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
- h->retry_cnt = h->retries;
- h->callback_fn = fn;
- h->callback_data = data;
+ if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE)
ret = hp_sw_start_stop(h);
- if (ret == SCSI_DH_OK)
- return 0;
- h->callback_fn = h->callback_data = NULL;
- }
if (fn)
fn(data, ret);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 00d9c326158e..b64eaae8533d 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -205,7 +205,6 @@ struct rdac_dh_data {
#define RDAC_NON_PREFERRED 1
char preferred;
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
struct c4_inquiry c4;
@@ -262,40 +261,12 @@ do { \
sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);
-static struct request *get_rdac_req(struct scsi_device *sdev,
- void *buffer, unsigned buflen, int rw)
+static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
+ struct list_head *list,
+ unsigned char *cdb)
{
- struct request *rq;
- struct request_queue *q = sdev->request_queue;
-
- rq = blk_get_request(q, rw, GFP_NOIO);
-
- if (IS_ERR(rq)) {
- sdev_printk(KERN_INFO, sdev,
- "get_rdac_req: blk_get_request failed.\n");
- return NULL;
- }
- blk_rq_set_block_pc(rq);
-
- if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
- blk_put_request(rq);
- sdev_printk(KERN_INFO, sdev,
- "get_rdac_req: blk_rq_map_kern failed.\n");
- return NULL;
- }
-
- rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- rq->retries = RDAC_RETRIES;
- rq->timeout = RDAC_TIMEOUT;
-
- return rq;
-}
-
-static struct request *rdac_failover_get(struct scsi_device *sdev,
- struct rdac_dh_data *h, struct list_head *list)
-{
- struct request *rq;
+ struct scsi_device *sdev = ctlr->ms_sdev;
+ struct rdac_dh_data *h = sdev->handler_data;
struct rdac_mode_common *common;
unsigned data_size;
struct rdac_queue_data *qdata;
@@ -332,27 +303,17 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
lun_table[qdata->h->lun] = 0x81;
}
- /* get request for block layer packet command */
- rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
- if (!rq)
- return NULL;
-
/* Prepare the command. */
if (h->ctlr->use_ms10) {
- rq->cmd[0] = MODE_SELECT_10;
- rq->cmd[7] = data_size >> 8;
- rq->cmd[8] = data_size & 0xff;
+ cdb[0] = MODE_SELECT_10;
+ cdb[7] = data_size >> 8;
+ cdb[8] = data_size & 0xff;
} else {
- rq->cmd[0] = MODE_SELECT;
- rq->cmd[4] = data_size;
+ cdb[0] = MODE_SELECT;
+ cdb[4] = data_size;
}
- rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
- return rq;
+ return data_size;
}
static void release_controller(struct kref *kref)
@@ -400,46 +361,14 @@ static struct rdac_controller *get_controller(int index, char *array_name,
return ctlr;
}
-static int submit_inquiry(struct scsi_device *sdev, int page_code,
- unsigned int len, struct rdac_dh_data *h)
-{
- struct request *rq;
- struct request_queue *q = sdev->request_queue;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
-
- rq = get_rdac_req(sdev, &h->inq, len, READ);
- if (!rq)
- goto done;
-
- /* Prepare the command. */
- rq->cmd[0] = INQUIRY;
- rq->cmd[1] = 1;
- rq->cmd[2] = page_code;
- rq->cmd[4] = len;
- rq->cmd_len = COMMAND_SIZE(INQUIRY);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
-
- err = blk_execute_rq(q, NULL, rq, 1);
- if (err == -EIO)
- err = SCSI_DH_IO;
-
- blk_put_request(rq);
-done:
- return err;
-}
-
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
char *array_name, u8 *array_id)
{
- int err, i;
- struct c8_inquiry *inqp;
+ int err = SCSI_DH_IO, i;
+ struct c8_inquiry *inqp = &h->inq.c8;
- err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c8;
+ if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp,
+ sizeof(struct c8_inquiry))) {
if (inqp->page_code != 0xc8)
return SCSI_DH_NOSYS;
if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
@@ -453,20 +382,20 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
memset(array_id, 0, UNIQUE_ID_LEN);
memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
+ err = SCSI_DH_OK;
}
return err;
}
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
- int err, access_state;
+ int err = SCSI_DH_IO, access_state;
struct rdac_dh_data *tmp;
- struct c9_inquiry *inqp;
+ struct c9_inquiry *inqp = &h->inq.c9;
h->state = RDAC_STATE_ACTIVE;
- err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c9;
+ if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp,
+ sizeof(struct c9_inquiry))) {
/* detect the operating mode */
if ((inqp->avte_cvp >> 5) & 0x1)
h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
@@ -501,6 +430,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
tmp->sdev->access_state = access_state;
}
rcu_read_unlock();
+ err = SCSI_DH_OK;
}
return err;
@@ -509,12 +439,11 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
static int initialize_controller(struct scsi_device *sdev,
struct rdac_dh_data *h, char *array_name, u8 *array_id)
{
- int err, index;
- struct c4_inquiry *inqp;
+ int err = SCSI_DH_IO, index;
+ struct c4_inquiry *inqp = &h->inq.c4;
- err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c4;
+ if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp,
+ sizeof(struct c4_inquiry))) {
/* get the controller index */
if (inqp->slot_id[1] == 0x31)
index = 0;
@@ -530,18 +459,18 @@ static int initialize_controller(struct scsi_device *sdev,
h->sdev = sdev;
}
spin_unlock(&list_lock);
+ err = SCSI_DH_OK;
}
return err;
}
static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
- int err;
- struct c2_inquiry *inqp;
+ int err = SCSI_DH_IO;
+ struct c2_inquiry *inqp = &h->inq.c2;
- err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
- if (err == SCSI_DH_OK) {
- inqp = &h->inq.c2;
+ if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp,
+ sizeof(struct c2_inquiry))) {
/*
* If more than MODE6_MAX_LUN luns are supported, use
* mode select 10
@@ -550,36 +479,35 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
h->ctlr->use_ms10 = 1;
else
h->ctlr->use_ms10 = 0;
+ err = SCSI_DH_OK;
}
return err;
}
static int mode_select_handle_sense(struct scsi_device *sdev,
- unsigned char *sensebuf)
+ struct scsi_sense_hdr *sense_hdr)
{
- struct scsi_sense_hdr sense_hdr;
- int err = SCSI_DH_IO, ret;
+ int err = SCSI_DH_IO;
struct rdac_dh_data *h = sdev->handler_data;
- ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
- if (!ret)
+ if (!scsi_sense_valid(sense_hdr))
goto done;
- switch (sense_hdr.sense_key) {
+ switch (sense_hdr->sense_key) {
case NO_SENSE:
case ABORTED_COMMAND:
case UNIT_ATTENTION:
err = SCSI_DH_RETRY;
break;
case NOT_READY:
- if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
/* LUN Not Ready and is in the Process of Becoming
* Ready
*/
err = SCSI_DH_RETRY;
break;
case ILLEGAL_REQUEST:
- if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
+ if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36)
/*
* Command Lock contention
*/
@@ -592,7 +520,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
- sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
+ sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
done:
return err;
@@ -602,13 +530,16 @@ static void send_mode_select(struct work_struct *work)
{
struct rdac_controller *ctlr =
container_of(work, struct rdac_controller, ms_work);
- struct request *rq;
struct scsi_device *sdev = ctlr->ms_sdev;
struct rdac_dh_data *h = sdev->handler_data;
- struct request_queue *q = sdev->request_queue;
- int err, retry_cnt = RDAC_RETRY_COUNT;
+ int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
+ unsigned char cdb[COMMAND_SIZE(MODE_SELECT_10)];
+ struct scsi_sense_hdr sshdr;
+ unsigned int data_size;
+ u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -616,21 +547,19 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
-retry:
- err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev, h, &list);
- if (!rq)
- goto done;
+ retry:
+ data_size = rdac_failover_get(ctlr, &list, cdb);
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
- err = blk_execute_rq(q, NULL, rq, 1);
- blk_put_request(rq);
- if (err != SCSI_DH_OK) {
- err = mode_select_handle_sense(sdev, h->sense);
+ if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
+ &h->ctlr->mode_select, data_size, &sshdr,
+ RDAC_TIMEOUT * HZ,
+ RDAC_RETRIES, NULL, req_flags, 0)) {
+ err = mode_select_handle_sense(sdev, &sshdr);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
if (err == SCSI_DH_IMM_RETRY)
@@ -643,7 +572,6 @@ retry:
(char *) h->ctlr->array_name, h->ctlr->index);
}
-done:
list_for_each_entry_safe(qdata, tmp, &list, entry) {
list_del(&qdata->entry);
if (err == SCSI_DH_OK)
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 5f75e638ec95..256dd6791fcc 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2768,16 +2768,12 @@ static int adpt_i2o_activate_hba(adpt_hba* pHba)
static int adpt_i2o_online_hba(adpt_hba* pHba)
{
- if (adpt_i2o_systab_send(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
+ if (adpt_i2o_systab_send(pHba) < 0)
return -1;
- }
/* In READY state */
- if (adpt_i2o_enable_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
+ if (adpt_i2o_enable_hba(pHba) < 0)
return -1;
- }
/* In OPERATIONAL state */
return 0;
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index d6e53aee2295..6432a50b26d8 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -237,7 +237,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)
flags |= IRQF_SHARED;
esas2r_log(ESAS2R_LOG_INFO,
- "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
+ "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
a->pcid->irq, a, a->name, flags);
if (request_irq(a->pcid->irq,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 3e8483410f61..b35ed3829421 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1301,7 +1301,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
if (ioctl == NULL) {
esas2r_log(ESAS2R_LOG_WARN,
- "ioctl_handler kzalloc failed for %d bytes",
+ "ioctl_handler kzalloc failed for %zu bytes",
sizeof(struct atto_express_ioctl));
return -ENOMEM;
}
diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
index 7b6397bb5b94..75b9d23cd736 100644
--- a/drivers/scsi/esas2r/esas2r_log.h
+++ b/drivers/scsi/esas2r/esas2r_log.h
@@ -61,8 +61,8 @@ enum {
#endif
};
-int esas2r_log(const long level, const char *format, ...);
-int esas2r_log_dev(const long level,
+__printf(2, 3) int esas2r_log(const long level, const char *format, ...);
+__printf(3, 4) int esas2r_log_dev(const long level,
const struct device *dev,
const char *format,
...);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 5092c821d088..f2e9d8aa979c 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -198,7 +198,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
GFP_KERNEL);
if (a->local_atto_ioctl == NULL) {
esas2r_log(ESAS2R_LOG_WARN,
- "write_hw kzalloc failed for %d bytes",
+ "write_hw kzalloc failed for %zu bytes",
sizeof(struct atto_ioctl));
return -ENOMEM;
}
@@ -1186,7 +1186,7 @@ retry:
} else {
esas2r_log(ESAS2R_LOG_CRIT,
"unable to allocate a request for a "
- "device reset (%d:%d)!",
+ "device reset (%d:%llu)!",
cmd->device->id,
cmd->device->lun);
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 59150cad0353..ab7bc1505e0b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = {
.name = "FCoE Driver",
.proc_name = FCOE_NAME,
.queuecommand = fc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = fc_eh_abort,
.eh_device_reset_handler = fc_eh_device_reset,
.eh_host_reset_handler = fc_eh_host_reset,
@@ -326,8 +327,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
/* look for SAN MAC address, if multiple SAN MACs exist, only
* use the first one for SPMA */
- real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
- vlan_dev_real_dev(netdev) : netdev;
+ real_dev = is_vlan_dev(netdev) ? vlan_dev_real_dev(netdev) : netdev;
fcoe->realdev = real_dev;
rcu_read_lock();
for_each_dev_addr(real_dev, ha) {
@@ -730,7 +730,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
ctlr = fcoe_to_ctlr(fcoe);
/* Figure out the VLAN ID, if any */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(netdev))
lport->vlan = vlan_dev_vlan_id(netdev);
else
lport->vlan = 0;
@@ -959,13 +959,13 @@ static inline int fcoe_em_config(struct fc_lport *lport)
* Reuse existing offload em instance in case
* it is already allocated on real eth device
*/
- if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(fcoe->netdev))
cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
else
cur_real_dev = fcoe->netdev;
list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
- if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(oldfcoe->netdev))
old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
else
old_real_dev = oldfcoe->netdev;
@@ -1563,7 +1563,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb->protocol = htons(ETH_P_FCOE);
skb->priority = fcoe->priority;
- if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
+ if (is_vlan_dev(fcoe->netdev) &&
fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
/* must set skb->dev before calling vlan_put_tag */
skb->dev = fcoe->realdev;
@@ -1794,7 +1794,7 @@ fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
struct net_device *real_dev;
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
- if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(fcoe->netdev))
real_dev = vlan_dev_real_dev(fcoe->netdev);
else
real_dev = fcoe->netdev;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 58ce9020d69c..ba58b7953263 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -106,6 +106,7 @@ static struct scsi_host_template fnic_host_template = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = fnic_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = fnic_abort_cmd,
.eh_device_reset_handler = fnic_device_reset,
.eh_host_reset_handler = fnic_host_reset,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 6f9665d50d84..67c8dac321ad 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -26,14 +26,55 @@
#include <linux/blkdev.h>
#include <linux/module.h>
#include <scsi/scsi_host.h>
-#include "g_NCR5380.h"
-#include "NCR5380.h"
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/isa.h>
#include <linux/pnp.h>
#include <linux/interrupt.h>
+/* Definitions for the core NCR5380 driver. */
+
+#define NCR5380_read(reg) \
+ ioread8(hostdata->io + hostdata->offset + (reg))
+#define NCR5380_write(reg, value) \
+ iowrite8(value, hostdata->io + hostdata->offset + (reg))
+
+#define NCR5380_implementation_fields \
+ int offset; \
+ int c400_ctl_status; \
+ int c400_blk_cnt; \
+ int c400_host_buf; \
+ int io_width
+
+#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
+#define NCR5380_dma_recv_setup generic_NCR5380_pread
+#define NCR5380_dma_send_setup generic_NCR5380_pwrite
+#define NCR5380_dma_residual NCR5380_dma_residual_none
+
+#define NCR5380_intr generic_NCR5380_intr
+#define NCR5380_queue_command generic_NCR5380_queue_command
+#define NCR5380_abort generic_NCR5380_abort
+#define NCR5380_bus_reset generic_NCR5380_bus_reset
+#define NCR5380_info generic_NCR5380_info
+
+#define NCR5380_io_delay(x) udelay(x)
+
+#include "NCR5380.h"
+
+#define DRV_MODULE_NAME "g_NCR5380"
+
+#define NCR53C400_mem_base 0x3880
+#define NCR53C400_host_buffer 0x3900
+#define NCR53C400_region_size 0x3a00
+
+#define BOARD_NCR5380 0
+#define BOARD_NCR53C400 1
+#define BOARD_NCR53C400A 2
+#define BOARD_DTC3181E 3
+#define BOARD_HP_C2502 4
+
+#define IRQ_AUTO 254
+
#define MAX_CARDS 8
/* old-style parameters for compatibility */
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
deleted file mode 100644
index 81b22d989648..000000000000
--- a/drivers/scsi/g_NCR5380.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Generic Generic NCR5380 driver defines
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 440-4894
- *
- * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
- * K.Lentin@cs.monash.edu.au
- */
-
-#ifndef GENERIC_NCR5380_H
-#define GENERIC_NCR5380_H
-
-#define DRV_MODULE_NAME "g_NCR5380"
-
-#define NCR5380_read(reg) \
- ioread8(hostdata->io + hostdata->offset + (reg))
-#define NCR5380_write(reg, value) \
- iowrite8(value, hostdata->io + hostdata->offset + (reg))
-
-#define NCR5380_implementation_fields \
- int offset; \
- int c400_ctl_status; \
- int c400_blk_cnt; \
- int c400_host_buf; \
- int io_width;
-
-#define NCR53C400_mem_base 0x3880
-#define NCR53C400_host_buffer 0x3900
-#define NCR53C400_region_size 0x3a00
-
-#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
-#define NCR5380_dma_recv_setup generic_NCR5380_pread
-#define NCR5380_dma_send_setup generic_NCR5380_pwrite
-#define NCR5380_dma_residual NCR5380_dma_residual_none
-
-#define NCR5380_intr generic_NCR5380_intr
-#define NCR5380_queue_command generic_NCR5380_queue_command
-#define NCR5380_abort generic_NCR5380_abort
-#define NCR5380_bus_reset generic_NCR5380_bus_reset
-#define NCR5380_info generic_NCR5380_info
-
-#define NCR5380_io_delay(x) udelay(x)
-
-#define BOARD_NCR5380 0
-#define BOARD_NCR53C400 1
-#define BOARD_NCR53C400A 2
-#define BOARD_DTC3181E 3
-#define BOARD_HP_C2502 4
-
-#define IRQ_AUTO 254
-
-#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index c0cd505a9ef7..9216deaa3ff5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -95,6 +95,7 @@ struct hisi_sas_port {
struct hisi_sas_cq {
struct hisi_hba *hisi_hba;
+ struct tasklet_struct tasklet;
int rd_point;
int id;
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d50e9cfefd24..53637a941b94 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -71,6 +71,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
struct device *dev = &hisi_hba->pdev->dev;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
if (!slot->task)
return;
@@ -97,6 +99,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
slot->task = NULL;
slot->port = NULL;
hisi_sas_slot_index_free(hisi_hba, slot->idx);
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
/* slot memory is fully zeroed when it is reused */
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -141,11 +145,10 @@ static void hisi_sas_slot_abort(struct work_struct *work)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_tmf_task tmf_task;
- struct domain_device *device = task->dev;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
struct scsi_lun lun;
struct device *dev = &hisi_hba->pdev->dev;
int tag = abort_slot->idx;
+ unsigned long flags;
if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
dev_err(dev, "cannot abort slot for non-ssp task\n");
@@ -159,11 +162,11 @@ static void hisi_sas_slot_abort(struct work_struct *work)
hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
out:
/* Do cleanup for this task */
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
if (task->task_done)
task->task_done(task);
- if (sas_dev)
- atomic64_dec(&sas_dev->running_req);
}
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -1118,7 +1121,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
}
exit:
- dev_info(dev, "internal task abort: task to dev %016llx task=%p "
+ dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
"resp: 0x%x sts 0x%x\n",
SAS_ADDR(device->sas_addr),
task,
@@ -1450,7 +1453,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
refclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(refclk))
- dev_info(dev, "no ref clk property\n");
+ dev_dbg(dev, "no ref clk property\n");
else
hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
@@ -1549,10 +1552,6 @@ int hisi_sas_probe(struct platform_device *pdev,
hisi_sas_init_add(hisi_hba);
- rc = hisi_hba->hw->hw_init(hisi_hba);
- if (rc)
- goto err_out_ha;
-
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
@@ -1561,6 +1560,10 @@ int hisi_sas_probe(struct platform_device *pdev,
if (rc)
goto err_out_register_ha;
+ rc = hisi_hba->hw->hw_init(hisi_hba);
+ if (rc)
+ goto err_out_register_ha;
+
scsi_scan_host(shost);
return 0;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8a1be0ba8a22..854fbeaade3e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1596,6 +1596,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
hisi_hba->complete_hdr[queue];
u32 irq_value, rd_point = cq->rd_point, wr_point;
+ spin_lock(&hisi_hba->lock);
irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
@@ -1628,6 +1629,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ spin_unlock(&hisi_hba->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b934aec1eebb..1b214450dcb5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -207,6 +207,8 @@
#define TXID_AUTO (PORT_BASE + 0xb8)
#define TXID_AUTO_CT3_OFF 1
#define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF)
+#define TX_HARDRST_OFF 2
+#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
@@ -215,6 +217,7 @@
#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8)
#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
+#define CON_CONTROL (PORT_BASE + 0x118)
#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
#define CHL_INT0 (PORT_BASE + 0x1b4)
#define CHL_INT0_HOTPLUG_TOUT_OFF 0
@@ -333,6 +336,11 @@
#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
#define ITCT_HDR_VLN_OFF 9
#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
+#define ITCT_HDR_SMP_TIMEOUT_OFF 16
+#define ITCT_HDR_SMP_TIMEOUT_8US 1
+#define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \
+ 250) /* 2ms */
+#define ITCT_HDR_AWT_CONTINUE_OFF 25
#define ITCT_HDR_PORT_ID_OFF 28
#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
/* qw2 */
@@ -526,6 +534,8 @@ enum {
#define SATA_PROTOCOL_FPDMA 0x8
#define SATA_PROTOCOL_ATAPI 0x10
+static void hisi_sas_link_timeout_disable_link(unsigned long data);
+
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
void __iomem *regs = hisi_hba->regs + off;
@@ -693,6 +703,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
(device->linkrate << ITCT_HDR_MCR_OFF) |
(1 << ITCT_HDR_VLN_OFF) |
+ (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) |
+ (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
(port->id << ITCT_HDR_PORT_ID_OFF));
itct->qw0 = cpu_to_le64(qw0);
@@ -702,7 +714,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
/* qw2 */
if (!dev_is_sata(device))
- itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
+ itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
(0x1ULL << ITCT_HDR_BITLT_OFF) |
(0x32ULL << ITCT_HDR_MCTLT_OFF) |
(0x1ULL << ITCT_HDR_RTOLT_OFF));
@@ -711,7 +723,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
static void free_device_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
- u64 qw0, dev_id = sas_dev->device_id;
+ u64 dev_id = sas_dev->device_id;
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
@@ -735,8 +747,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
dev_dbg(dev, "got clear ITCT done interrupt\n");
/* invalid the itct state*/
- qw0 = cpu_to_le64(itct->qw0);
- qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
+ memset(itct, 0, sizeof(struct hisi_sas_itct));
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
@@ -978,6 +989,50 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
upper_32_bits(hisi_hba->initial_fis_dma));
}
+static void hisi_sas_link_timeout_enable_link(unsigned long data)
+{
+ struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ int i, reg_val;
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL);
+ if (!(reg_val & BIT(0))) {
+ hisi_sas_phy_write32(hisi_hba, i,
+ CON_CONTROL, 0x7);
+ break;
+ }
+ }
+
+ hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
+ mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
+}
+
+static void hisi_sas_link_timeout_disable_link(unsigned long data)
+{
+ struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ int i, reg_val;
+
+ reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
+ for (i = 0; i < hisi_hba->n_phy && reg_val; i++) {
+ if (reg_val & BIT(i)) {
+ hisi_sas_phy_write32(hisi_hba, i,
+ CON_CONTROL, 0x6);
+ break;
+ }
+ }
+
+ hisi_hba->timer.function = hisi_sas_link_timeout_enable_link;
+ mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
+{
+ hisi_hba->timer.data = (unsigned long)hisi_hba;
+ hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
+ hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
+ add_timer(&hisi_hba->timer);
+}
+
static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = &hisi_hba->pdev->dev;
@@ -1025,14 +1080,21 @@ static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ u32 txid_auto;
+
stop_phy_v2_hw(hisi_hba, phy_no);
+ if (phy->identify.device_type == SAS_END_DEVICE) {
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto | TX_HARDRST_MSK);
+ }
msleep(100);
start_phy_v2_hw(hisi_hba, phy_no);
}
-static void start_phys_v2_hw(unsigned long data)
+static void start_phys_v2_hw(struct hisi_hba *hisi_hba)
{
- struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
int i;
for (i = 0; i < hisi_hba->n_phy; i++)
@@ -1041,10 +1103,7 @@ static void start_phys_v2_hw(unsigned long data)
static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
{
- struct timer_list *timer = &hisi_hba->timer;
-
- setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
- mod_timer(timer, jiffies + HZ);
+ start_phys_v2_hw(hisi_hba);
}
static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1771,8 +1830,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
}
out:
- if (sas_dev)
- atomic64_dec(&sas_dev->running_req);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -2020,9 +2077,12 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (phy->identify.device_type == SAS_END_DEVICE)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SSP;
- else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ else if (phy->identify.device_type != SAS_PHY_UNUSED) {
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
+ if (!timer_pending(&hisi_hba->timer))
+ set_link_timer_quirk(hisi_hba);
+ }
queue_work(hisi_hba->wq, &phy->phyup_ws);
end:
@@ -2033,10 +2093,23 @@ end:
return res;
}
+static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba)
+{
+ u32 port_state;
+
+ port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+ if (port_state & 0x1ff)
+ return true;
+
+ return false;
+}
+
static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
int res = 0;
u32 phy_state, sl_ctrl, txid_auto;
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct hisi_sas_port *port = phy->port;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
@@ -2046,6 +2119,10 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
sl_ctrl & ~SL_CONTROL_CTA_MSK);
+ if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id))
+ if (!check_any_wideports_v2_hw(hisi_hba) &&
+ timer_pending(&hisi_hba->timer))
+ del_timer(&hisi_hba->timer);
txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -2481,21 +2558,19 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
+static void cq_tasklet_v2_hw(unsigned long val)
{
- struct hisi_sas_cq *cq = p;
+ struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
struct hisi_sas_itct *itct;
struct hisi_sas_complete_v2_hdr *complete_queue;
- u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id;
+ u32 rd_point = cq->rd_point, wr_point, dev_id;
int queue = cq->id;
complete_queue = hisi_hba->complete_hdr[queue];
- irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
-
- hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+ spin_lock(&hisi_hba->lock);
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
@@ -2545,6 +2620,19 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ spin_unlock(&hisi_hba->lock);
+}
+
+static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
+{
+ struct hisi_sas_cq *cq = p;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ int queue = cq->id;
+
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+
+ tasklet_schedule(&cq->tasklet);
+
return IRQ_HANDLED;
}
@@ -2726,6 +2814,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (i = 0; i < hisi_hba->queue_count; i++) {
int idx = i + 96; /* First cq interrupt is irq96 */
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ struct tasklet_struct *t = &cq->tasklet;
irq = irq_map[idx];
if (!irq) {
@@ -2742,6 +2832,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
irq, rc);
return -ENOENT;
}
+ tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
}
return 0;
@@ -2807,6 +2898,12 @@ static int hisi_sas_v2_probe(struct platform_device *pdev)
static int hisi_sas_v2_remove(struct platform_device *pdev)
{
+ struct sas_ha_struct *sha = platform_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ if (timer_pending(&hisi_hba->timer))
+ del_timer(&hisi_hba->timer);
+
return hisi_sas_remove(pdev);
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 258a3f9a2519..831a1c8b9f89 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -213,6 +213,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
+ error = scsi_init_sense_cache(shost);
+ if (error)
+ goto fail;
+
if (shost_use_blk_mq(shost)) {
error = scsi_mq_setup_tags(shost);
if (error)
@@ -226,19 +230,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
}
}
- /*
- * Note that we allocate the freelist even for the MQ case for now,
- * as we need a command set aside for scsi_reset_provider. Having
- * the full host freelist and one command available for that is a
- * little heavy-handed, but avoids introducing a special allocator
- * just for this. Eventually the structure of scsi_reset_provider
- * will need a major overhaul.
- */
- error = scsi_setup_command_freelist(shost);
- if (error)
- goto out_destroy_tags;
-
-
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
if (!dma_dev)
@@ -258,7 +249,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
error = device_add(&shost->shost_gendev);
if (error)
- goto out_destroy_freelist;
+ goto out_disable_runtime_pm;
scsi_host_set_state(shost, SHOST_RUNNING);
get_device(shost->shost_gendev.parent);
@@ -308,13 +299,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_del(&shost->shost_dev);
out_del_gendev:
device_del(&shost->shost_gendev);
- out_destroy_freelist:
+ out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_destroy_command_freelist(shost);
- out_destroy_tags:
if (shost_use_blk_mq(shost))
scsi_mq_destroy_tags(shost);
fail:
@@ -355,7 +344,6 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- scsi_destroy_command_freelist(shost);
if (shost_use_blk_mq(shost)) {
if (shost->tag_set.tags)
scsi_mq_destroy_tags(shost);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cbc0c5fe5a60..524a0c755ed7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5539,8 +5539,8 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
* Retries always go down the normal I/O path.
*/
if (likely(cmd->retries == 0 &&
- cmd->request->cmd_type == REQ_TYPE_FS &&
- h->acciopath_status)) {
+ !blk_rq_is_passthrough(cmd->request) &&
+ h->acciopath_status)) {
rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
if (rc == 0)
return 0;
@@ -9263,13 +9263,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
access = SA5_ioaccel_mode1_access;
writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
writel(4, &h->cfgtable->HostWrite.CoalIntCount);
- } else {
- if (trans_support & CFGTBL_Trans_io_accel2) {
+ } else
+ if (trans_support & CFGTBL_Trans_io_accel2)
access = SA5_ioaccel_mode2_access;
- writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
- writel(4, &h->cfgtable->HostWrite.CoalIntCount);
- }
- }
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
if (hpsa_wait_for_mode_change_ack(h)) {
dev_err(&h->pdev->dev,
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 64e98295b707..bf6cdc106654 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -578,38 +578,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
}
static struct access_method SA5_access = {
- SA5_submit_command,
- SA5_intr_mask,
- SA5_intr_pending,
- SA5_completed,
+ .submit_command = SA5_submit_command,
+ .set_intr_mask = SA5_intr_mask,
+ .intr_pending = SA5_intr_pending,
+ .command_completed = SA5_completed,
};
static struct access_method SA5_ioaccel_mode1_access = {
- SA5_submit_command,
- SA5_performant_intr_mask,
- SA5_ioaccel_mode1_intr_pending,
- SA5_ioaccel_mode1_completed,
+ .submit_command = SA5_submit_command,
+ .set_intr_mask = SA5_performant_intr_mask,
+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
+ .command_completed = SA5_ioaccel_mode1_completed,
};
static struct access_method SA5_ioaccel_mode2_access = {
- SA5_submit_command_ioaccel2,
- SA5_performant_intr_mask,
- SA5_performant_intr_pending,
- SA5_performant_completed,
+ .submit_command = SA5_submit_command_ioaccel2,
+ .set_intr_mask = SA5_performant_intr_mask,
+ .intr_pending = SA5_performant_intr_pending,
+ .command_completed = SA5_performant_completed,
};
static struct access_method SA5_performant_access = {
- SA5_submit_command,
- SA5_performant_intr_mask,
- SA5_performant_intr_pending,
- SA5_performant_completed,
+ .submit_command = SA5_submit_command,
+ .set_intr_mask = SA5_performant_intr_mask,
+ .intr_pending = SA5_performant_intr_pending,
+ .command_completed = SA5_performant_completed,
};
static struct access_method SA5_performant_access_no_read = {
- SA5_submit_command_no_read,
- SA5_performant_intr_mask,
- SA5_performant_intr_pending,
- SA5_performant_completed,
+ .submit_command = SA5_submit_command_no_read,
+ .set_intr_mask = SA5_performant_intr_mask,
+ .intr_pending = SA5_performant_intr_pending,
+ .command_completed = SA5_performant_completed,
};
struct board_type {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 78b72c28a55d..2c92dabb55f6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3090,6 +3090,7 @@ static struct scsi_host_template driver_template = {
.name = "IBM POWER Virtual FC Adapter",
.proc_name = IBMVFC_NAME,
.queuecommand = ibmvfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = ibmvfc_eh_abort_handler,
.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 50cd01165e35..1deb0a9f14a6 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2072,6 +2072,7 @@ static struct scsi_host_template driver_template = {
.name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
.proc_name = "ibmvscsi",
.queuecommand = ibmvscsi_queuecommand,
+ .eh_timed_out = srp_timed_out,
.eh_abort_handler = ibmvscsi_eh_abort_handler,
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 8fb5c54c7dd3..0f807798c624 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -46,6 +46,7 @@
#define INITIAL_SRP_LIMIT 800
#define DEFAULT_MAX_SECTORS 256
+#define MAX_TXU 1024 * 1024
static uint max_vdma_size = MAX_H_COPY_RDMA;
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
}
info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!info) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
info->mad_version = cpu_to_be32(MAD_VERSION_1);
info->os_type = cpu_to_be32(LINUX);
memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
- info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+ info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
dma_wmb();
rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
}
cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!cap) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target);
@@ -3815,6 +3816,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
static const struct target_core_fabric_ops ibmvscsis_ops = {
.module = THIS_MODULE,
.name = "ibmvscsis",
+ .max_data_sg_nents = MAX_TXU / PAGE_SIZE,
.get_fabric_name = ibmvscsis_get_fabric_name,
.tpg_get_wwn = ibmvscsis_get_fabric_wwn,
.tpg_get_tag = ibmvscsis_get_tag,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index ace4f1f41b8e..4228aba1f654 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -967,6 +967,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.sg_tablesize = 4096,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 919736a74ffa..aa76f36abe03 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -2095,7 +2095,7 @@ int fc_lport_bsg_request(struct bsg_job *job)
bsg_reply->reply_payload_rcv_len = 0;
if (rsp)
- rsp->resid_len = job->reply_payload.payload_len;
+ scsi_req(rsp)->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f9b6fba689ff..834d1212b6d5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1930,7 +1930,7 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
return 0;
}
-static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
struct iscsi_task *task = NULL, *running_task;
@@ -2063,6 +2063,7 @@ done:
"timer reset" : "nh");
return rc;
}
+EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
static void iscsi_check_transport_timeouts(unsigned long data)
{
@@ -2585,8 +2586,6 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
if (!shost->cmd_per_lun)
shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
- if (!shost->transportt->eh_timed_out)
- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
return scsi_add_host(shost, pdev);
}
EXPORT_SYMBOL_GPL(iscsi_host_add);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e10d98..570b2cb2da43 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2174,12 +2174,12 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
bio_data(rsp->bio), blk_rq_bytes(rsp));
if (ret > 0) {
/* positive number is the untransferred residual */
- rsp->resid_len = ret;
- req->resid_len = 0;
+ scsi_req(rsp)->resid_len = ret;
+ scsi_req(req)->resid_len = 0;
ret = 0;
} else if (ret == 0) {
- rsp->resid_len = 0;
- req->resid_len = 0;
+ scsi_req(rsp)->resid_len = 0;
+ scsi_req(req)->resid_len = 0;
}
return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d24792575169..45cbbc44f4d7 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -274,15 +274,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
switch (req_data[1]) {
case SMP_REPORT_GENERAL:
- req->resid_len -= 8;
- rsp->resid_len -= 32;
+ scsi_req(req)->resid_len -= 8;
+ scsi_req(rsp)->resid_len -= 32;
resp_data[2] = SMP_RESP_FUNC_ACC;
resp_data[9] = sas_ha->num_phys;
break;
case SMP_REPORT_MANUF_INFO:
- req->resid_len -= 8;
- rsp->resid_len -= 64;
+ scsi_req(req)->resid_len -= 8;
+ scsi_req(rsp)->resid_len -= 64;
resp_data[2] = SMP_RESP_FUNC_ACC;
memcpy(resp_data + 12, shost->hostt->name,
SAS_EXPANDER_VENDOR_ID_LEN);
@@ -295,13 +295,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_DISCOVER:
- req->resid_len -= 16;
- if ((int)req->resid_len < 0) {
- req->resid_len = 0;
+ scsi_req(req)->resid_len -= 16;
+ if ((int)scsi_req(req)->resid_len < 0) {
+ scsi_req(req)->resid_len = 0;
error = -EINVAL;
goto out;
}
- rsp->resid_len -= 56;
+ scsi_req(rsp)->resid_len -= 56;
sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
break;
@@ -311,13 +311,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_REPORT_PHY_SATA:
- req->resid_len -= 16;
- if ((int)req->resid_len < 0) {
- req->resid_len = 0;
+ scsi_req(req)->resid_len -= 16;
+ if ((int)scsi_req(req)->resid_len < 0) {
+ scsi_req(req)->resid_len = 0;
error = -EINVAL;
goto out;
}
- rsp->resid_len -= 60;
+ scsi_req(rsp)->resid_len -= 60;
sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
break;
@@ -331,15 +331,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
int to_write = req_data[4];
if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
- req->resid_len < base_frame_size + to_write * 4) {
+ scsi_req(req)->resid_len < base_frame_size + to_write * 4) {
resp_data[2] = SMP_RESP_INV_FRM_LEN;
break;
}
to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
req_data[3], to_write, &req_data[8]);
- req->resid_len -= base_frame_size + to_write * 4;
- rsp->resid_len -= 8;
+ scsi_req(req)->resid_len -= base_frame_size + to_write * 4;
+ scsi_req(rsp)->resid_len -= 8;
break;
}
@@ -348,13 +348,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_PHY_CONTROL:
- req->resid_len -= 44;
- if ((int)req->resid_len < 0) {
- req->resid_len = 0;
+ scsi_req(req)->resid_len -= 44;
+ if ((int)scsi_req(req)->resid_len < 0) {
+ scsi_req(req)->resid_len = 0;
error = -EINVAL;
goto out;
}
- rsp->resid_len -= 8;
+ scsi_req(rsp)->resid_len -= 8;
sas_phy_control(sas_ha, req_data[9], req_data[10],
req_data[32] >> 4, req_data[33] >> 4,
resp_data);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 362da44f2948..15ef8e2e685c 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -560,7 +560,6 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
i = to_sas_internal(stt);
i->dft = dft;
stt->create_work_queue = 1;
- stt->eh_timed_out = sas_scsi_timed_out;
stt->eh_strategy_handler = sas_scsi_recover_host;
return stt;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 9cf0bc260b0e..b306b7843d99 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -64,8 +64,6 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
-enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
-
int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 519dac4e341e..9bd55bce83af 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -803,13 +803,6 @@ out:
shost->host_failed, tries);
}
-enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
-{
- scmd_dbg(cmd, "command %p timed out\n", cmd);
-
- return BLK_EH_NOT_HANDLED;
-}
-
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8a20b4e86224..6593b073c524 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -727,7 +727,6 @@ struct lpfc_hba {
uint32_t cfg_fcp_io_channel;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
- uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c84775562c65..50cf402dea29 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2073,6 +2073,13 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
phba->soft_wwn_enable = 1;
+
+ dev_printk(KERN_WARNING, &phba->pcidev->dev,
+ "lpfc%d: soft_wwpn assignment has been enabled.\n",
+ phba->brd_no);
+ dev_printk(KERN_WARNING, &phba->pcidev->dev,
+ " The soft_wwpn feature is not supported by Broadcom.");
+
return count;
}
static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
@@ -2143,7 +2150,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
phba->soft_wwn_enable = 0;
rc = lpfc_wwn_set(buf, cnt, wwpn);
- if (!rc) {
+ if (rc) {
/* not able to set wwpn, unlock it */
phba->soft_wwn_enable = 1;
return rc;
@@ -2224,7 +2231,7 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
rc = lpfc_wwn_set(buf, cnt, wwnn);
- if (!rc) {
+ if (rc) {
/* Allow wwnn to be set many times, as long as the enable
* is set. However, once the wwpn is set, everything locks.
*/
@@ -2435,7 +2442,8 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
else
phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
phba->cfg_oas_flags &= ~OAS_LUN_VALID;
- phba->cfg_oas_priority = phba->cfg_XLanePriority;
+ if (phba->cfg_oas_priority == 0)
+ phba->cfg_oas_priority = phba->cfg_XLanePriority;
phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
return count;
}
@@ -2561,7 +2569,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
rc = -ENOMEM;
} else {
lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
- (struct lpfc_name *)tgt_wwpn, lun);
+ (struct lpfc_name *)tgt_wwpn, lun, pri);
}
return rc;
@@ -2585,7 +2593,8 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
*/
static uint64_t
lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
- uint8_t tgt_wwpn[], uint32_t *lun_status)
+ uint8_t tgt_wwpn[], uint32_t *lun_status,
+ uint32_t *lun_pri)
{
uint64_t found_lun;
@@ -2598,7 +2607,7 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
&phba->sli4_hba.oas_next_lun,
(struct lpfc_name *)vpt_wwpn,
(struct lpfc_name *)tgt_wwpn,
- &found_lun, lun_status))
+ &found_lun, lun_status, lun_pri))
return found_lun;
else
return NOT_OAS_ENABLED_LUN;
@@ -2670,7 +2679,8 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
phba->cfg_oas_tgt_wwpn,
- &phba->cfg_oas_lun_status);
+ &phba->cfg_oas_lun_status,
+ &phba->cfg_oas_priority);
if (oas_lun != NOT_OAS_ENABLED_LUN)
phba->cfg_oas_flags |= OAS_LUN_VALID;
@@ -2701,6 +2711,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint64_t scsi_lun;
+ uint32_t pri;
ssize_t rc;
if (!phba->cfg_fof)
@@ -2718,17 +2729,20 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
return -EINVAL;
+ pri = phba->cfg_oas_priority;
+ if (pri == 0)
+ pri = phba->cfg_XLanePriority;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
"priority 0x%x with oas state %d\n",
wwn_to_u64(phba->cfg_oas_vpt_wwpn),
wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
- phba->cfg_oas_priority, phba->cfg_oas_lun_state);
+ pri, phba->cfg_oas_lun_state);
rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
phba->cfg_oas_tgt_wwpn, scsi_lun,
- phba->cfg_oas_lun_state,
- phba->cfg_oas_priority);
+ phba->cfg_oas_lun_state, pri);
if (rc)
return rc;
@@ -4670,14 +4684,6 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/*
- * This parameter will be depricated, the driver cannot limit the
- * protection data s/g list.
- */
-LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
- LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
- "Max Protection Scatter Gather Segment Count");
-
-/*
* lpfc_enable_mds_diags: Enable MDS Diagnostics
* 0 = MDS Diagnostics disabled (default)
* 1 = MDS Diagnostics enabled
@@ -4766,7 +4772,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sg_seg_cnt,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
- &dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
&dev_attr_lpfc_sriov_nr_virtfn,
@@ -5061,6 +5066,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
*/
/**
+ * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+
+ lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+ sizeof fc_host_symbolic_name(shost));
+}
+
+/**
* lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
* @shost: kernel scsi host pointer.
**/
@@ -5597,6 +5615,8 @@ struct fc_function_template lpfc_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
+
+ .get_host_symbolic_name = lpfc_get_host_symbolic_name,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
@@ -5664,6 +5684,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
+
+ .get_host_symbolic_name = lpfc_get_host_symbolic_name,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
@@ -5768,7 +5790,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
- lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 15d2bfdf582d..309643a2c55c 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -480,7 +480,7 @@ void lpfc_sli4_offline_eratt(struct lpfc_hba *);
struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
struct lpfc_name *,
struct lpfc_name *,
- uint64_t, bool);
+ uint64_t, uint32_t, bool);
void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
struct list_head *list,
@@ -489,9 +489,10 @@ struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
struct lpfc_name *, uint64_t, uint8_t);
bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
- struct lpfc_name *, uint64_t);
+ struct lpfc_name *, uint64_t, uint8_t);
bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
struct lpfc_name *, uint64_t *, struct lpfc_name *,
- struct lpfc_name *, uint64_t *, uint32_t *);
+ struct lpfc_name *, uint64_t *,
+ uint32_t *, uint32_t *);
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a63542bac153..caa7a7b0ec53 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -607,7 +607,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf+len, size-len, "usgmap:%x ",
ndlp->nlp_usg_map);
len += snprintf(buf+len, size-len, "refcnt:%x",
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
len += snprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 236e4e51d161..3a1f1a2a2b55 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1999,6 +1999,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ sp->cmn.valid_vendor_ver_level = 0;
+ memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
did, 0, 0);
@@ -3590,12 +3593,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
+ elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
+ elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -3688,7 +3693,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount),
+ kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
if (NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp);
@@ -3988,6 +3993,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
} else {
memcpy(pcmd, &vport->fc_sparam,
sizeof(struct serv_parm));
+
+ sp->cmn.valid_vendor_ver_level = 0;
+ memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
@@ -8849,8 +8857,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct ls_rjt stat;
- if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
- BUG();
+ BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
switch (rspiocb->iocb.ulpStatus) {
case IOSTAT_NPORT_RJT:
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ed223937798a..82047070cdc9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -3440,7 +3440,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount),
+ kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3861,7 +3861,7 @@ out:
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount),
+ kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
@@ -4238,7 +4238,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"0277 lpfc_enable_node: ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
return NULL;
}
/* The ndlp should not already be in active mode */
@@ -4248,7 +4248,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"0278 lpfc_enable_node: ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
return NULL;
}
@@ -4272,7 +4272,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount),
+ kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
}
@@ -4546,7 +4546,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
(bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_2) &&
- (atomic_read(&ndlp->kref.refcount) > 0)) {
+ (kref_read(&ndlp->kref) > 0)) {
mbox->context1 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl =
lpfc_sli4_unreg_rpi_cmpl_clr;
@@ -4695,14 +4695,14 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"0280 lpfc_cleanup_node: ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
lpfc_dequeue_node(vport, ndlp);
} else {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
"0281 lpfc_cleanup_node: ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
lpfc_disable_node(vport, ndlp);
}
@@ -4791,7 +4791,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount),
+ kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
!= NULL) {
@@ -5557,7 +5557,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount),
+ kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
@@ -5728,7 +5728,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount),
+ kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
ndlp->active_rrqs_xri_bitmap =
@@ -5767,7 +5767,7 @@ lpfc_nlp_release(struct kref *kref)
"0279 lpfc_nlp_release: ndlp:x%p did %x "
"usgmap:x%x refcnt:%d rpi:%x\n",
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
+ kref_read(&ndlp->kref), ndlp->nlp_rpi);
/* remove ndlp from action. */
lpfc_nlp_remove(ndlp->vport, ndlp);
@@ -5804,7 +5804,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node get: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
/* The check of ndlp usage to prevent incrementing the
* ndlp reference count that is in the process of being
* released.
@@ -5817,7 +5817,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
"0276 lpfc_nlp_get: ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
return NULL;
} else
kref_get(&ndlp->kref);
@@ -5844,7 +5844,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node put: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
/* Check the ndlp memory free acknowledge flag to avoid the
@@ -5857,7 +5857,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
"0274 lpfc_nlp_put: ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
return 1;
}
/* Check the ndlp inactivate log flag to avoid the possible
@@ -5870,7 +5870,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
"0275 lpfc_nlp_put: ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
return 1;
}
/* For last put, mark the ndlp usage flags to make sure no
@@ -5878,7 +5878,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
* in between the process when the final kref_put has been
* invoked on this ndlp.
*/
- if (atomic_read(&ndlp->kref.refcount) == 1) {
+ if (kref_read(&ndlp->kref) == 1) {
/* Indicate ndlp is put to inactive state. */
NLP_SET_IACT_REQ(ndlp);
/* Acknowledge ndlp memory free has been seen. */
@@ -5906,8 +5906,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node not used: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount));
- if (atomic_read(&ndlp->kref.refcount) == 1)
+ kref_read(&ndlp->kref));
+ if (kref_read(&ndlp->kref) == 1)
if (lpfc_nlp_put(ndlp))
return 1;
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 822654322e67..3b970d370600 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -360,6 +360,12 @@ struct csp {
* Word 1 Bit 30 in PLOGI request is random offset
*/
#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
+/*
+ * Word 1 Bit 29 in common service parameter is overloaded.
+ * Word 1 Bit 29 in FLOGI response is multiple NPort assignment
+ * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level
+ */
+#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4776fd85514f..64717c171b15 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2660,8 +2660,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
"usgmap:x%x refcnt:%d\n",
ndlp->nlp_DID, (void *)ndlp,
ndlp->nlp_usg_map,
- atomic_read(
- &ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
}
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ad350d969bdc..1180a22beb43 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5452,7 +5452,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
device_data = lpfc_create_device_data(phba,
&vport->fc_portname,
&target_wwpn,
- sdev->lun, true);
+ sdev->lun,
+ phba->cfg_XLanePriority,
+ true);
if (!device_data)
return -ENOMEM;
spin_lock_irqsave(&phba->devicelock, flags);
@@ -5587,7 +5589,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
struct lpfc_name *target_wwpn, uint64_t lun,
- bool atomic_create)
+ uint32_t pri, bool atomic_create)
{
struct lpfc_device_data *lun_info;
@@ -5614,7 +5616,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
sizeof(struct lpfc_name));
lun_info->device_id.lun = lun;
lun_info->oas_enabled = false;
- lun_info->priority = phba->cfg_XLanePriority;
+ lun_info->priority = pri;
lun_info->available = false;
return lun_info;
}
@@ -5716,7 +5718,8 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
struct lpfc_name *found_vport_wwpn,
struct lpfc_name *found_target_wwpn,
uint64_t *found_lun,
- uint32_t *found_lun_status)
+ uint32_t *found_lun_status,
+ uint32_t *found_lun_pri)
{
unsigned long flags;
@@ -5763,6 +5766,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
OAS_LUN_STATUS_EXISTS;
else
*found_lun_status = 0;
+ *found_lun_pri = lun_info->priority;
if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
memset(vport_wwpn, 0x0,
sizeof(struct lpfc_name));
@@ -5824,13 +5828,14 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
if (lun_info) {
if (!lun_info->oas_enabled)
lun_info->oas_enabled = true;
+ lun_info->priority = pri;
spin_unlock_irqrestore(&phba->devicelock, flags);
return true;
}
/* Create an lun info structure and add to list of luns */
lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
- false);
+ pri, false);
if (lun_info) {
lun_info->oas_enabled = true;
lun_info->priority = pri;
@@ -5864,7 +5869,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
**/
bool
lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
- struct lpfc_name *target_wwpn, uint64_t lun)
+ struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
{
struct lpfc_device_data *lun_info;
@@ -5882,6 +5887,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
target_wwpn, lun);
if (lun_info) {
lun_info->oas_enabled = false;
+ lun_info->priority = pri;
if (!lun_info->available)
lpfc_delete_device_data(phba, lun_info);
spin_unlock_irqrestore(&phba->devicelock, flags);
@@ -5923,6 +5929,7 @@ struct scsi_host_template lpfc_template = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
@@ -5949,6 +5956,7 @@ struct scsi_host_template lpfc_vport_template = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4faa7672fc1d..d977a472f89f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -120,6 +120,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+ /* ensure WQE bcopy flushed before doorbell write */
+ wmb();
/* Update the host index before invoking device */
host_index = q->host_index;
@@ -5954,18 +5956,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
+ phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
+ phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
+ phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
+ phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
+ phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}
@@ -6306,7 +6315,8 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
LPFC_SLI4_MBX_EMBED);
mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
- mbox->u.mqe.un.set_host_data.param_len = 8;
+ mbox->u.mqe.un.set_host_data.param_len =
+ LPFC_HOST_OS_DRIVER_VERSION_SIZE;
snprintf(mbox->u.mqe.un.set_host_data.data,
LPFC_HOST_OS_DRIVER_VERSION_SIZE,
"Linux %s v"LPFC_DRIVER_VERSION,
@@ -10028,6 +10038,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ abtsiocbp->vport = vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort xri x%x, original iotag x%x, "
@@ -17219,7 +17230,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
unsigned long iflags = 0;
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
- union lpfc_wqe wqe;
+ union lpfc_wqe128 wqe128;
+ union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
uint32_t txq_cnt = 0;
spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -17258,9 +17270,9 @@ lpfc_drain_txq(struct lpfc_hba *phba)
piocbq->sli4_xritag = sglq->sli4_xritag;
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
fail_msg = "to convert bpl to sgl";
- else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
+ else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
fail_msg = "to convert iocb to wqe";
- else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
fail_msg = " - Wq is full";
else
lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 50bfc43ebcb0..0ee0623a354c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.2"
+#define LPFC_DRIVER_VERSION "11.2.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index c27f4b724547..e18bbc66e83b 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -537,6 +537,12 @@ enable_vport(struct fc_vport *fc_vport)
spin_lock_irq(shost->host_lock);
vport->load_flag |= FC_LOADING;
+ if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+ spin_unlock_irq(shost->host_lock);
+ lpfc_issue_init_vpi(vport);
+ goto out;
+ }
+
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
@@ -557,6 +563,8 @@ enable_vport(struct fc_vport *fc_vport)
} else {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
}
+
+out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1827 Vport Enabled.\n");
return VPORT_OK;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index ccb68d12692c..196acc79714b 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -154,7 +154,7 @@ __asm__ __volatile__ \
static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
- unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
+ u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
int n = len;
int transferred;
@@ -257,7 +257,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len)
{
unsigned char *s = src;
- unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
+ u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
int n = len;
int transferred;
@@ -381,10 +381,10 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
hostdata = shost_priv(instance);
hostdata->base = pio_mem->start;
- hostdata->io = (void *)pio_mem->start;
+ hostdata->io = (u8 __iomem *)pio_mem->start;
if (pdma_mem && setup_use_pdma)
- hostdata->pdma_io = (void *)pdma_mem->start;
+ hostdata->pdma_io = (u8 __iomem *)pdma_mem->start;
else
host_flags |= FLAG_NO_PSEUDO_DMA;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fdd519c1dd57..e7e5974e1a2c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.812.07.00-rc1"
-#define MEGASAS_RELDATE "August 22, 2016"
+#define MEGASAS_VERSION "07.701.16.00-rc1"
+#define MEGASAS_RELDATE "February 2, 2017"
/*
* Device IDs
@@ -56,6 +56,11 @@
#define PCI_DEVICE_ID_LSI_INTRUDER_24 0x00cf
#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052
#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053
+#define PCI_DEVICE_ID_LSI_VENTURA 0x0014
+#define PCI_DEVICE_ID_LSI_HARPOON 0x0016
+#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
+#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
+#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C
/*
* Intel HBA SSDIDs
@@ -100,7 +105,7 @@
*/
/*
- * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
+ * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
* protocol between the software and firmware. Commands are issued using
* "message frames"
*/
@@ -690,6 +695,18 @@ struct MR_PD_INFO {
u8 reserved1[512-428];
} __packed;
+/*
+ * Definition of structure used to expose attributes of VD or JBOD
+ * (this structure is to be filled by firmware when MR_DCMD_DRV_GET_TARGET_PROP
+ * is fired by driver)
+ */
+struct MR_TARGET_PROPERTIES {
+ u32 max_io_size_kb;
+ u32 device_qdepth;
+ u32 sector_size;
+ u8 reserved[500];
+} __packed;
+
/*
* defines the physical drive address structure
*/
@@ -728,7 +745,6 @@ struct megasas_pd_list {
u16 tid;
u8 driveType;
u8 driveState;
- u8 interface;
} __packed;
/*
@@ -1312,7 +1328,55 @@ struct megasas_ctrl_info {
#endif
} adapterOperations3;
- u8 pad[0x800-0x7EC];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:7;
+ /* Indicates whether the CPLD image is part of
+ * the package and stored in flash
+ */
+ u8 cpld_in_flash:1;
+#else
+ u8 cpld_in_flash:1;
+ u8 reserved:7;
+#endif
+ u8 reserved1[3];
+ /* Null terminated string. Has the version
+ * information if cpld_in_flash = FALSE
+ */
+ u8 userCodeDefinition[12];
+ } cpld; /* Valid only if upgradableCPLD is TRUE */
+
+ struct {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ u16 reserved:8;
+ u16 fw_swaps_bbu_vpd_info:1;
+ u16 support_pd_map_target_id:1;
+ u16 support_ses_ctrl_in_multipathcfg:1;
+ u16 image_upload_supported:1;
+ u16 support_encrypted_mfc:1;
+ u16 supported_enc_algo:1;
+ u16 support_ibutton_less:1;
+ u16 ctrl_info_ext_supported:1;
+ #else
+
+ u16 ctrl_info_ext_supported:1;
+ u16 support_ibutton_less:1;
+ u16 supported_enc_algo:1;
+ u16 support_encrypted_mfc:1;
+ u16 image_upload_supported:1;
+ /* FW supports LUN based association and target port based */
+ u16 support_ses_ctrl_in_multipathcfg:1;
+ /* association for the SES device connected in multipath mode */
+ /* FW defines Jbod target Id within MR_PD_CFG_SEQ */
+ u16 support_pd_map_target_id:1;
+ /* FW swaps relevant fields in MR_BBU_VPD_INFO_FIXED to
+ * provide the data in little endian order
+ */
+ u16 fw_swaps_bbu_vpd_info:1;
+ u16 reserved:8;
+ #endif
+ } adapter_operations4;
+ u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
} __packed;
/*
@@ -1339,12 +1403,15 @@ struct megasas_ctrl_info {
#define MEGASAS_FW_BUSY 1
-#define VD_EXT_DEBUG 0
+/* Driver's internal Logging levels*/
+#define OCR_LOGS (1 << 0)
#define SCAN_PD_CHANNEL 0x1
#define SCAN_VD_CHANNEL 0x2
#define MEGASAS_KDUMP_QUEUE_DEPTH 100
+#define MR_LARGE_IO_MIN_SIZE (32 * 1024)
+#define MR_R1_LDIO_PIGGYBACK_DEFAULT 4
enum MR_SCSI_CMD_TYPE {
READ_WRITE_LDIO = 0,
@@ -1391,7 +1458,7 @@ enum FW_BOOT_CONTEXT {
*/
#define MEGASAS_INT_CMDS 32
#define MEGASAS_SKINNY_INT_CMDS 5
-#define MEGASAS_FUSION_INTERNAL_CMDS 5
+#define MEGASAS_FUSION_INTERNAL_CMDS 8
#define MEGASAS_FUSION_IOCTL_CMDS 3
#define MEGASAS_MFI_IOCTL_CMDS 27
@@ -1429,13 +1496,19 @@ enum FW_BOOT_CONTEXT {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
#define MR_MAX_MSIX_REG_ARRAY 16
#define MR_RDPQ_MODE_OFFSET 0X00800000
+
+#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16
+#define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF
+#define MR_MIN_MAP_SIZE 0x10000
+/* 64k */
+
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
*/
-
+
struct megasas_register_set {
u32 doorbell; /*0000h*/
u32 fusion_seq_offset; /*0004h*/
@@ -1471,14 +1544,14 @@ struct megasas_register_set {
u32 outbound_scratch_pad ; /*00B0h*/
u32 outbound_scratch_pad_2; /*00B4h*/
u32 outbound_scratch_pad_3; /*00B8h*/
+ u32 outbound_scratch_pad_4; /*00BCh*/
- u32 reserved_4; /*00BCh*/
u32 inbound_low_queue_port ; /*00C0h*/
u32 inbound_high_queue_port ; /*00C4h*/
- u32 reserved_5; /*00C8h*/
+ u32 inbound_single_queue_port; /*00C8h*/
u32 res_6[11]; /*CCh*/
u32 host_diag;
u32 seq_offset;
@@ -1544,33 +1617,35 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:20;
- u32 support_qd_throttling:1;
- u32 support_fp_rlbypass:1;
- u32 support_vfid_in_ioframe:1;
- u32 support_ext_io_size:1;
- u32 support_ext_queue_depth:1;
- u32 security_protocol_cmds_fw:1;
- u32 support_core_affinity:1;
- u32 support_ndrive_r1_lb:1;
- u32 support_max_255lds:1;
- u32 support_fastpath_wb:1;
- u32 support_additional_msix:1;
- u32 support_fp_remote_lun:1;
+ u32 reserved:19;
+ u32 support_pd_map_target_id:1;
+ u32 support_qd_throttling:1;
+ u32 support_fp_rlbypass:1;
+ u32 support_vfid_in_ioframe:1;
+ u32 support_ext_io_size:1;
+ u32 support_ext_queue_depth:1;
+ u32 security_protocol_cmds_fw:1;
+ u32 support_core_affinity:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_max_255lds:1;
+ u32 support_fastpath_wb:1;
+ u32 support_additional_msix:1;
+ u32 support_fp_remote_lun:1;
#else
- u32 support_fp_remote_lun:1;
- u32 support_additional_msix:1;
- u32 support_fastpath_wb:1;
- u32 support_max_255lds:1;
- u32 support_ndrive_r1_lb:1;
- u32 support_core_affinity:1;
- u32 security_protocol_cmds_fw:1;
- u32 support_ext_queue_depth:1;
- u32 support_ext_io_size:1;
- u32 support_vfid_in_ioframe:1;
- u32 support_fp_rlbypass:1;
- u32 support_qd_throttling:1;
- u32 reserved:20;
+ u32 support_fp_remote_lun:1;
+ u32 support_additional_msix:1;
+ u32 support_fastpath_wb:1;
+ u32 support_max_255lds:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_core_affinity:1;
+ u32 security_protocol_cmds_fw:1;
+ u32 support_ext_queue_depth:1;
+ u32 support_ext_io_size:1;
+ u32 support_vfid_in_ioframe:1;
+ u32 support_fp_rlbypass:1;
+ u32 support_qd_throttling:1;
+ u32 support_pd_map_target_id:1;
+ u32 reserved:19;
#endif
} mfi_capabilities;
__le32 reg;
@@ -1803,6 +1878,8 @@ union megasas_frame {
struct MR_PRIV_DEVICE {
bool is_tm_capable;
bool tm_busy;
+ atomic_t r1_ldio_hint;
+ u8 interface_type;
};
struct megasas_cmd;
@@ -1994,17 +2071,24 @@ struct MR_DRV_SYSTEM_INFO {
};
enum MR_PD_TYPE {
- UNKNOWN_DRIVE = 0,
- PARALLEL_SCSI = 1,
- SAS_PD = 2,
- SATA_PD = 3,
- FC_PD = 4,
+ UNKNOWN_DRIVE = 0,
+ PARALLEL_SCSI = 1,
+ SAS_PD = 2,
+ SATA_PD = 3,
+ FC_PD = 4,
+ NVME_PD = 5,
};
/* JBOD Queue depth definitions */
#define MEGASAS_SATA_QD 32
#define MEGASAS_SAS_QD 64
#define MEGASAS_DEFAULT_PD_QD 64
+#define MEGASAS_NVME_QD 32
+
+#define MR_DEFAULT_NVME_PAGE_SIZE 4096
+#define MR_DEFAULT_NVME_PAGE_SHIFT 12
+#define MR_DEFAULT_NVME_MDTS_KB 128
+#define MR_NVME_PAGE_SIZE_MASK 0x000000FF
struct megasas_instance {
@@ -2022,6 +2106,8 @@ struct megasas_instance {
dma_addr_t hb_host_mem_h;
struct MR_PD_INFO *pd_info;
dma_addr_t pd_info_h;
+ struct MR_TARGET_PROPERTIES *tgt_prop;
+ dma_addr_t tgt_prop_h;
__le32 *reply_queue;
dma_addr_t reply_queue_h;
@@ -2039,6 +2125,7 @@ struct megasas_instance {
u32 crash_dump_drv_support;
u32 crash_dump_app_support;
u32 secure_jbod_support;
+ u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
bool use_seqnum_jbod_fp; /* Added for PD sequence */
spinlock_t crashdump_lock;
@@ -2051,6 +2138,7 @@ struct megasas_instance {
u16 max_num_sge;
u16 max_fw_cmds;
+ u16 max_mpt_cmds;
u16 max_mfi_cmds;
u16 max_scsi_cmds;
u16 ldio_threshold;
@@ -2065,6 +2153,7 @@ struct megasas_instance {
/* used to sync fire the cmd to fw */
spinlock_t hba_lock;
/* used to synch producer, consumer ptrs in dpc */
+ spinlock_t stream_lock;
spinlock_t completion_lock;
struct dma_pool *frame_dma_pool;
struct dma_pool *sense_dma_pool;
@@ -2087,6 +2176,11 @@ struct megasas_instance {
atomic_t fw_outstanding;
atomic_t ldio_outstanding;
atomic_t fw_reset_no_pci_access;
+ atomic_t ieee_sgl;
+ atomic_t prp_sgl;
+ atomic_t sge_holes_type1;
+ atomic_t sge_holes_type2;
+ atomic_t sge_holes_type3;
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
@@ -2142,6 +2236,13 @@ struct megasas_instance {
u8 is_rdpq;
bool dev_handle;
bool fw_sync_cache_support;
+ u32 mfi_frame_size;
+ bool is_ventura;
+ bool msix_combined;
+ u16 max_raid_mapsize;
+ /* preffered count to send as LDIO irrspective of FP capable.*/
+ u8 r1_ldio_hint_default;
+ u32 nvme_page_size;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2230,12 +2331,12 @@ struct megasas_instance_template {
u32 (*init_adapter)(struct megasas_instance *);
u32 (*build_and_issue_cmd) (struct megasas_instance *,
struct scsi_cmnd *);
- int (*issue_dcmd)(struct megasas_instance *instance,
+ void (*issue_dcmd)(struct megasas_instance *instance,
struct megasas_cmd *cmd);
};
-#define MEGASAS_IS_LOGICAL(scp) \
- ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LOGICAL(sdev) \
+ ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
@@ -2346,7 +2447,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
@@ -2354,13 +2455,16 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
__le16 get_updated_dev_handle(struct megasas_instance *instance,
- struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
+ struct LD_LOAD_BALANCE_INFO *lbInfo,
+ struct IO_REQUEST_INFO *in_info,
+ struct MR_DRV_RAID_MAP_ALL *drv_map);
void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
struct LD_LOAD_BALANCE_INFO *lbInfo);
int megasas_get_ctrl_info(struct megasas_instance *instance);
/* PD sequence */
int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev);
int megasas_set_crash_dump_params(struct megasas_instance *instance,
u8 crash_buf_state);
void megasas_free_host_crash_buffer(struct megasas_instance *instance);
@@ -2382,4 +2486,7 @@ void megasas_update_sdev_properties(struct scsi_device *sdev);
int megasas_reset_fusion(struct Scsi_Host *shost, int reason);
int megasas_task_abort_fusion(struct scsi_cmnd *scmd);
int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
+u32 mega_mod64(u64 dividend, u32 divisor);
+int megasas_alloc_fusion_context(struct megasas_instance *instance);
+void megasas_free_fusion_context(struct megasas_instance *instance);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d5cf15eb8c5e..7ac9a9ee9bd4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -43,6 +43,7 @@
#include <linux/uio.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include <linux/fs.h>
#include <linux/compat.h>
#include <linux/blkdev.h>
@@ -116,8 +117,10 @@ static int megasas_ld_list_query(struct megasas_instance *instance,
static int megasas_issue_init_mfi(struct megasas_instance *instance);
static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
-static int
-megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
+static void megasas_get_pd_info(struct megasas_instance *instance,
+ struct scsi_device *sdev);
+static int megasas_get_target_prop(struct megasas_instance *instance,
+ struct scsi_device *sdev);
/*
* PCI ID table for all supported controllers
*/
@@ -155,6 +158,12 @@ static struct pci_device_id megasas_pci_table[] = {
/* Intruder 24 port*/
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
+ /* VENTURA */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
{}
};
@@ -196,12 +205,12 @@ void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
-int
+void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
instance->instancet->fire_cmd(instance,
cmd->frame_phys_addr, 0, instance->reg_set);
- return 0;
+ return;
}
/**
@@ -259,6 +268,8 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
cmd->scmd = NULL;
cmd->frame_count = 0;
cmd->flags = 0;
+ memset(cmd->frame, 0, instance->mfi_frame_size);
+ cmd->frame->io.context = cpu_to_le32(cmd->index);
if (!fusion && reset_devices)
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
list_add(&cmd->list, (&instance->cmd_pool)->next);
@@ -989,13 +1000,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
- if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
- (instance->instancet->issue_dcmd(instance, cmd))) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
return DCMD_NOT_FIRED;
}
+ instance->instancet->issue_dcmd(instance, cmd);
+
return wait_and_poll(instance, cmd, instance->requestorId ?
MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
}
@@ -1017,13 +1029,14 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
int ret = 0;
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
- if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
- (instance->instancet->issue_dcmd(instance, cmd))) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
return DCMD_NOT_FIRED;
}
+ instance->instancet->issue_dcmd(instance, cmd);
+
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
@@ -1081,13 +1094,14 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cmd->sync_cmd = 1;
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
- if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
- (instance->instancet->issue_dcmd(instance, cmd))) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
return DCMD_NOT_FIRED;
}
+ instance->instancet->issue_dcmd(instance, cmd);
+
if (timeout) {
ret = wait_event_timeout(instance->abort_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
@@ -1273,7 +1287,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
u16 flags = 0;
struct megasas_pthru_frame *pthru;
- is_logical = MEGASAS_IS_LOGICAL(scp);
+ is_logical = MEGASAS_IS_LOGICAL(scp->device);
device_id = MEGASAS_DEV_INDEX(scp);
pthru = (struct megasas_pthru_frame *)cmd->frame;
@@ -1513,11 +1527,11 @@ inline int megasas_cmd_type(struct scsi_cmnd *cmd)
case WRITE_6:
case READ_16:
case WRITE_16:
- ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
break;
default:
- ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
}
return ret;
@@ -1537,7 +1551,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
struct megasas_io_frame *ldio;
struct megasas_pthru_frame *pthru;
u32 sgcount;
- u32 max_cmd = instance->max_fw_cmds;
+ u16 max_cmd = instance->max_fw_cmds;
dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
@@ -1662,7 +1676,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* Check for an mpio path and adjust behavior */
if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
if (megasas_check_mpio_paths(instance, scmd) ==
- (DID_RESET << 16)) {
+ (DID_REQUEUE << 16)) {
return SCSI_MLQUEUE_HOST_BUSY;
} else {
scmd->result = DID_NO_CONNECT << 16;
@@ -1693,15 +1707,16 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
scmd->result = 0;
- if (MEGASAS_IS_LOGICAL(scmd) &&
+ if (MEGASAS_IS_LOGICAL(scmd->device) &&
(scmd->device->id >= instance->fw_supported_vd_count ||
scmd->device->lun)) {
scmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
- if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
- (!instance->fw_sync_cache_support)) {
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
+ MEGASAS_IS_LOGICAL(scmd->device) &&
+ (!instance->fw_sync_cache_support)) {
scmd->result = DID_OK << 16;
goto out_done;
}
@@ -1728,16 +1743,21 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
}
/*
-* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
+* megasas_set_dynamic_target_properties -
+* Device property set by driver may not be static and it is required to be
+* updated after OCR
+*
+* set tm_capable.
+* set dma alignment (only for eedp protection enable vd).
*
* @sdev: OS provided scsi device
*
* Returns void
*/
-void megasas_update_sdev_properties(struct scsi_device *sdev)
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
{
- u16 pd_index = 0;
- u32 device_id, ld;
+ u16 pd_index = 0, ld;
+ u32 device_id;
struct megasas_instance *instance;
struct fusion_context *fusion;
struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -1749,67 +1769,129 @@ void megasas_update_sdev_properties(struct scsi_device *sdev)
fusion = instance->ctrl_context;
mr_device_priv_data = sdev->hostdata;
- if (!fusion)
+ if (!fusion || !mr_device_priv_data)
return;
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
- instance->use_seqnum_jbod_fp) {
- pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
- sdev->id;
- pd_sync = (void *)fusion->pd_seq_sync
- [(instance->pd_seq_map_id - 1) & 1];
- mr_device_priv_data->is_tm_capable =
- pd_sync->seq[pd_index].capability.tmCapable;
- } else {
+ if (MEGASAS_IS_LOGICAL(sdev)) {
device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+ sdev->id;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+ if (ld >= instance->fw_supported_vd_count)
+ return;
raid = MR_LdRaidGet(ld, local_map_ptr);
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable;
+ } else if (instance->use_seqnum_jbod_fp) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ pd_sync = (void *)fusion->pd_seq_sync
+ [(instance->pd_seq_map_id - 1) & 1];
+ mr_device_priv_data->is_tm_capable =
+ pd_sync->seq[pd_index].capability.tmCapable;
}
}
-static void megasas_set_device_queue_depth(struct scsi_device *sdev)
+/*
+ * megasas_set_nvme_device_properties -
+ * set nomerges=2
+ * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
+ * set maximum io transfer = MDTS of NVME device provided by MR firmware.
+ *
+ * MR firmware provides value in KB. Caller of this function converts
+ * kb into bytes.
+ *
+ * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
+ * MR firmware provides value 128 as (32 * 4K) = 128K.
+ *
+ * @sdev: scsi device
+ * @max_io_size: maximum io transfer size
+ *
+ */
+static inline void
+megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
{
- u16 pd_index = 0;
- int ret = DCMD_FAILED;
struct megasas_instance *instance;
+ u32 mr_nvme_pg_size;
- instance = megasas_lookup_instance(sdev->host->host_no);
+ instance = (struct megasas_instance *)sdev->host->hostdata;
+ mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+ MR_DEFAULT_NVME_PAGE_SIZE);
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
- pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+ blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
- if (instance->pd_info) {
- mutex_lock(&instance->hba_mutex);
- ret = megasas_get_pd_info(instance, pd_index);
- mutex_unlock(&instance->hba_mutex);
- }
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
+ blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
+}
- if (ret != DCMD_SUCCESS)
- return;
- if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+/*
+ * megasas_set_static_target_properties -
+ * Device property set by driver are static and it is not required to be
+ * updated after OCR.
+ *
+ * set io timeout
+ * set device queue depth
+ * set nvme device properties. see - megasas_set_nvme_device_properties
+ *
+ * @sdev: scsi device
+ * @is_target_prop true, if fw provided target properties.
+ */
+static void megasas_set_static_target_properties(struct scsi_device *sdev,
+ bool is_target_prop)
+{
+ u16 target_index = 0;
+ u8 interface_type;
+ u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
+ u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
+ u32 tgt_device_qd;
+ struct megasas_instance *instance;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
- switch (instance->pd_list[pd_index].interface) {
- case SAS_PD:
- scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
- break;
+ instance = megasas_lookup_instance(sdev->host->host_no);
+ mr_device_priv_data = sdev->hostdata;
+ interface_type = mr_device_priv_data->interface_type;
- case SATA_PD:
- scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
- break;
+ /*
+ * The RAID firmware may require extended timeouts.
+ */
+ blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
- default:
- scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
- }
- }
+ target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+
+ switch (interface_type) {
+ case SAS_PD:
+ device_qd = MEGASAS_SAS_QD;
+ break;
+ case SATA_PD:
+ device_qd = MEGASAS_SATA_QD;
+ break;
+ case NVME_PD:
+ device_qd = MEGASAS_NVME_QD;
+ break;
+ }
+
+ if (is_target_prop) {
+ tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
+ if (tgt_device_qd &&
+ (tgt_device_qd <= instance->host->can_queue))
+ device_qd = tgt_device_qd;
+
+ /* max_io_size_kb will be set to non zero for
+ * nvme based vd and syspd.
+ */
+ max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
}
+
+ if (instance->nvme_page_size && max_io_size_kb)
+ megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
+
+ scsi_change_queue_depth(sdev, device_qd);
+
}
@@ -1817,11 +1899,12 @@ static int megasas_slave_configure(struct scsi_device *sdev)
{
u16 pd_index = 0;
struct megasas_instance *instance;
+ int ret_target_prop = DCMD_FAILED;
+ bool is_target_prop = false;
instance = megasas_lookup_instance(sdev->host->host_no);
if (instance->pd_list_not_supported) {
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
- sdev->type == TYPE_DISK) {
+ if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
if (instance->pd_list[pd_index].driveState !=
@@ -1829,14 +1912,25 @@ static int megasas_slave_configure(struct scsi_device *sdev)
return -ENXIO;
}
}
- megasas_set_device_queue_depth(sdev);
- megasas_update_sdev_properties(sdev);
- /*
- * The RAID firmware may require extended timeouts.
+ mutex_lock(&instance->hba_mutex);
+ /* Send DCMD to Firmware and cache the information */
+ if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
+ megasas_get_pd_info(instance, sdev);
+
+ /* Some ventura firmware may not have instance->nvme_page_size set.
+ * Do not send MR_DCMD_DRV_GET_TARGET_PROP
*/
- blk_queue_rq_timeout(sdev->request_queue,
- scmd_timeout * HZ);
+ if ((instance->tgt_prop) && (instance->nvme_page_size))
+ ret_target_prop = megasas_get_target_prop(instance, sdev);
+
+ is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+ megasas_set_static_target_properties(sdev, is_target_prop);
+
+ mutex_unlock(&instance->hba_mutex);
+
+ /* This sdev property may change post OCR */
+ megasas_set_dynamic_target_properties(sdev);
return 0;
}
@@ -1848,7 +1942,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
struct MR_PRIV_DEVICE *mr_device_priv_data;
instance = megasas_lookup_instance(sdev->host->host_no);
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ if (!MEGASAS_IS_LOGICAL(sdev)) {
/*
* Open the OS scan to the SYSTEM PD
*/
@@ -2483,7 +2577,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
struct megasas_cmd, list);
list_del_init(&reset_cmd->list);
if (reset_cmd->scmd) {
- reset_cmd->scmd->result = DID_RESET << 16;
+ reset_cmd->scmd->result = DID_REQUEUE << 16;
dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
reset_index, reset_cmd,
reset_cmd->scmd->cmnd[0]);
@@ -2651,6 +2745,24 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
}
/**
+ * megasas_dump_frame - This function will dump MPT/MFI frame
+ */
+static inline void
+megasas_dump_frame(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ printk(KERN_INFO "IO request frame:\n\t");
+ for (i = 0; i < sz / sizeof(__le32); i++) {
+ if (i && ((i % 8) == 0))
+ printk("\n\t");
+ printk("%08x ", le32_to_cpu(mfp[i]));
+ }
+ printk("\n");
+}
+
+/**
* megasas_reset_bus_host - Bus & host reset handler entry point
*/
static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
@@ -2660,12 +2772,26 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ scmd_printk(KERN_INFO, scmd,
+ "Controller reset is requested due to IO timeout\n"
+ "SCSI command pointer: (%p)\t SCSI host state: %d\t"
+ " SCSI host busy: %d\t FW outstanding: %d\n",
+ scmd, scmd->device->host->shost_state,
+ atomic_read((atomic_t *)&scmd->device->host->host_busy),
+ atomic_read(&instance->fw_outstanding));
+
/*
* First wait for all commands to complete
*/
- if (instance->ctrl_context)
- ret = megasas_reset_fusion(scmd->device->host, 1);
- else
+ if (instance->ctrl_context) {
+ struct megasas_cmd_fusion *cmd;
+ cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
+ if (cmd)
+ megasas_dump_frame(cmd->io_request,
+ sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+ ret = megasas_reset_fusion(scmd->device->host,
+ SCSIIO_TIMEOUT_OCR);
+ } else
ret = megasas_generic_reset(scmd);
return ret;
@@ -3343,7 +3469,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
{
struct megasas_cmd *cmd;
int i;
- u32 max_cmd = instance->max_fw_cmds;
+ u16 max_cmd = instance->max_fw_cmds;
u32 defer_index;
unsigned long flags;
@@ -3719,7 +3845,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
static void megasas_teardown_frame_pool(struct megasas_instance *instance)
{
int i;
- u32 max_cmd = instance->max_mfi_cmds;
+ u16 max_cmd = instance->max_mfi_cmds;
struct megasas_cmd *cmd;
if (!instance->frame_dma_pool)
@@ -3763,9 +3889,8 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance)
static int megasas_create_frame_pool(struct megasas_instance *instance)
{
int i;
- u32 max_cmd;
+ u16 max_cmd;
u32 sge_sz;
- u32 total_sz;
u32 frame_count;
struct megasas_cmd *cmd;
@@ -3793,12 +3918,13 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
* Total 192 byte (3 MFI frame of 64 byte)
*/
frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
- total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+ instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
/*
* Use DMA pool facility provided by PCI layer
*/
instance->frame_dma_pool = pci_pool_create("megasas frame pool",
- instance->pdev, total_sz, 256, 0);
+ instance->pdev, instance->mfi_frame_size,
+ 256, 0);
if (!instance->frame_dma_pool) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
@@ -3842,7 +3968,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
return -ENOMEM;
}
- memset(cmd->frame, 0, total_sz);
+ memset(cmd->frame, 0, instance->mfi_frame_size);
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
if (!instance->ctrl_context && reset_devices)
@@ -3897,7 +4023,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
{
int i;
int j;
- u32 max_cmd;
+ u16 max_cmd;
struct megasas_cmd *cmd;
struct fusion_context *fusion;
@@ -3974,18 +4100,22 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
return INITIATE_OCR;
}
-static int
-megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
+static void
+megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
{
int ret;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ u16 device_id = 0;
+
+ device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
cmd = megasas_get_cmd(instance);
if (!cmd) {
dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
- return -ENOMEM;
+ return;
}
dcmd = &cmd->frame->dcmd;
@@ -4012,7 +4142,9 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
switch (ret) {
case DCMD_SUCCESS:
- instance->pd_list[device_id].interface =
+ mr_device_priv_data = sdev->hostdata;
+ le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
+ mr_device_priv_data->interface_type =
instance->pd_info->state.ddf.pdType.intf;
break;
@@ -4039,7 +4171,7 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
- return ret;
+ return;
}
/*
* megasas_get_pd_list_info - Returns FW's pd_list structure
@@ -4418,8 +4550,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
static void megasas_update_ext_vd_details(struct megasas_instance *instance)
{
struct fusion_context *fusion;
- u32 old_map_sz;
- u32 new_map_sz;
+ u32 ventura_map_sz = 0;
fusion = instance->ctrl_context;
/* For MFI based controllers return dummy success */
@@ -4449,21 +4580,27 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
"Legacy(64 VD) firmware");
- old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- (instance->fw_supported_vd_count - 1));
- new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
- fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- (instance->drv_supported_vd_count - 1));
-
- fusion->max_map_sz = max(old_map_sz, new_map_sz);
+ if (instance->max_raid_mapsize) {
+ ventura_map_sz = instance->max_raid_mapsize *
+ MR_MIN_MAP_SIZE; /* 64k */
+ fusion->current_map_sz = ventura_map_sz;
+ fusion->max_map_sz = ventura_map_sz;
+ } else {
+ fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->fw_supported_vd_count - 1));
+ fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
+ fusion->max_map_sz =
+ max(fusion->old_map_sz, fusion->new_map_sz);
- if (instance->supportmax256vd)
- fusion->current_map_sz = new_map_sz;
- else
- fusion->current_map_sz = old_map_sz;
+ if (instance->supportmax256vd)
+ fusion->current_map_sz = fusion->new_map_sz;
+ else
+ fusion->current_map_sz = fusion->old_map_sz;
+ }
+ /* irrespective of FW raid maps, driver raid map is constant */
+ fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
}
/**
@@ -4533,6 +4670,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+ le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
/* Update the latest Ext VD info.
* From Init path, store current firmware details.
@@ -4542,6 +4680,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
megasas_update_ext_vd_details(instance);
instance->use_seqnum_jbod_fp =
ctrl_info->adapterOperations3.useSeqNumJbodFP;
+ instance->support_morethan256jbod =
+ ctrl_info->adapter_operations4.support_pd_map_target_id;
/*Check whether controller is iMR or MR */
instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
@@ -4989,13 +5129,13 @@ skip_alloc:
static int megasas_init_fw(struct megasas_instance *instance)
{
u32 max_sectors_1;
- u32 max_sectors_2;
- u32 tmp_sectors, msix_enable, scratch_pad_2;
+ u32 max_sectors_2, tmp_sectors, msix_enable;
+ u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info = NULL;
unsigned long bar_list;
- int i, loop, fw_msix_count = 0;
+ int i, j, loop, fw_msix_count = 0;
struct IOV_111 *iovPtr;
struct fusion_context *fusion;
@@ -5020,34 +5160,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
reg_set = instance->reg_set;
- switch (instance->pdev->device) {
- case PCI_DEVICE_ID_LSI_FUSION:
- case PCI_DEVICE_ID_LSI_PLASMA:
- case PCI_DEVICE_ID_LSI_INVADER:
- case PCI_DEVICE_ID_LSI_FURY:
- case PCI_DEVICE_ID_LSI_INTRUDER:
- case PCI_DEVICE_ID_LSI_INTRUDER_24:
- case PCI_DEVICE_ID_LSI_CUTLASS_52:
- case PCI_DEVICE_ID_LSI_CUTLASS_53:
+ if (fusion)
instance->instancet = &megasas_instance_template_fusion;
- break;
- case PCI_DEVICE_ID_LSI_SAS1078R:
- case PCI_DEVICE_ID_LSI_SAS1078DE:
- instance->instancet = &megasas_instance_template_ppc;
- break;
- case PCI_DEVICE_ID_LSI_SAS1078GEN2:
- case PCI_DEVICE_ID_LSI_SAS0079GEN2:
- instance->instancet = &megasas_instance_template_gen2;
- break;
- case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
- case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
- instance->instancet = &megasas_instance_template_skinny;
- break;
- case PCI_DEVICE_ID_LSI_SAS1064R:
- case PCI_DEVICE_ID_DELL_PERC5:
- default:
- instance->instancet = &megasas_instance_template_xscale;
- break;
+ else {
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_SAS1078R:
+ case PCI_DEVICE_ID_LSI_SAS1078DE:
+ instance->instancet = &megasas_instance_template_ppc;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+ case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+ instance->instancet = &megasas_instance_template_gen2;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+ case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+ instance->instancet = &megasas_instance_template_skinny;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1064R:
+ case PCI_DEVICE_ID_DELL_PERC5:
+ default:
+ instance->instancet = &megasas_instance_template_xscale;
+ instance->pd_list_not_supported = 1;
+ break;
+ }
}
if (megasas_transition_to_ready(instance, 0)) {
@@ -5066,13 +5201,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
goto fail_ready_state;
}
- /*
- * MSI-X host index 0 is common for all adapter.
- * It is used for all MPT based Adapters.
- */
- instance->reply_post_host_index_addr[0] =
- (u32 __iomem *)((u8 __iomem *)instance->reg_set +
- MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+ if (instance->is_ventura) {
+ scratch_pad_3 =
+ readl(&instance->reg_set->outbound_scratch_pad_3);
+ instance->max_raid_mapsize = ((scratch_pad_3 >>
+ MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
+ MR_MAX_RAID_MAP_SIZE_MASK);
+ }
/* Check if MSI-X is supported while in ready state */
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
@@ -5092,6 +5227,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msix_vectors = ((scratch_pad_2
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+ if (instance->msix_vectors > 16)
+ instance->msix_combined = true;
+
if (rdpq_enable)
instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
@@ -5125,6 +5263,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
else
instance->msix_vectors = 0;
}
+ /*
+ * MSI-X host index 0 is common for all adapter.
+ * It is used for all MPT based Adapters.
+ */
+ if (instance->msix_combined) {
+ instance->reply_post_host_index_addr[0] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
+ } else {
+ instance->reply_post_host_index_addr[0] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+ }
+
i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
if (i < 0)
goto fail_setup_irqs;
@@ -5155,6 +5307,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
+ if (instance->is_ventura) {
+ scratch_pad_4 =
+ readl(&instance->reg_set->outbound_scratch_pad_4);
+ if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
+ MR_DEFAULT_NVME_PAGE_SHIFT)
+ instance->nvme_page_size =
+ (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
+
+ dev_info(&instance->pdev->dev,
+ "NVME page size\t: (%d)\n", instance->nvme_page_size);
+ }
+
if (instance->msix_vectors ?
megasas_setup_irqs_msix(instance, 1) :
megasas_setup_irqs_ioapic(instance))
@@ -5173,13 +5337,43 @@ static int megasas_init_fw(struct megasas_instance *instance)
(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
if (megasas_get_pd_list(instance) < 0) {
dev_err(&instance->pdev->dev, "failed to get PD list\n");
- goto fail_get_pd_list;
+ goto fail_get_ld_pd_list;
}
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+ /* stream detection initialization */
+ if (instance->is_ventura && fusion) {
+ fusion->stream_detect_by_ld =
+ kzalloc(sizeof(struct LD_STREAM_DETECT *)
+ * MAX_LOGICAL_DRIVES_EXT,
+ GFP_KERNEL);
+ if (!fusion->stream_detect_by_ld) {
+ dev_err(&instance->pdev->dev,
+ "unable to allocate stream detection for pool of LDs\n");
+ goto fail_get_ld_pd_list;
+ }
+ for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
+ fusion->stream_detect_by_ld[i] =
+ kmalloc(sizeof(struct LD_STREAM_DETECT),
+ GFP_KERNEL);
+ if (!fusion->stream_detect_by_ld[i]) {
+ dev_err(&instance->pdev->dev,
+ "unable to allocate stream detect by LD\n ");
+ for (j = 0; j < i; ++j)
+ kfree(fusion->stream_detect_by_ld[j]);
+ kfree(fusion->stream_detect_by_ld);
+ fusion->stream_detect_by_ld = NULL;
+ goto fail_get_ld_pd_list;
+ }
+ fusion->stream_detect_by_ld[i]->mru_bit_map
+ = MR_STREAM_BITMAP;
+ }
+ }
+
if (megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
+ goto fail_get_ld_pd_list;
/*
* Compute the max allowed sectors per IO: The controller info has two
@@ -5296,7 +5490,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
return 0;
-fail_get_pd_list:
+fail_get_ld_pd_list:
instance->instancet->disable_intr(instance);
fail_init_adapter:
megasas_destroy_irqs(instance);
@@ -5309,9 +5503,11 @@ fail_ready_state:
instance->ctrl_info = NULL;
iounmap(instance->reg_set);
- fail_ioremap:
+fail_ioremap:
pci_release_selected_regions(instance->pdev, 1<<instance->bar);
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
return -EINVAL;
}
@@ -5531,6 +5727,98 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
return 0;
}
+/* megasas_get_target_prop - Send DCMD with below details to firmware.
+ *
+ * This DCMD will fetch few properties of LD/system PD defined
+ * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
+ *
+ * DCMD send by drivers whenever new target is added to the OS.
+ *
+ * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
+ * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
+ * 0 = system PD, 1 = LD.
+ * dcmd.mbox.s[1] - TargetID for LD/system PD.
+ * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
+ *
+ * @instance: Adapter soft state
+ * @sdev: OS provided scsi device
+ *
+ * Returns 0 on success non-zero on failure.
+ */
+static int
+megasas_get_target_prop(struct megasas_instance *instance,
+ struct scsi_device *sdev)
+{
+ int ret;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ u16 targetId = (sdev->channel % 2) + sdev->id;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev,
+ "Failed to get cmd %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
+
+ dcmd->mbox.s[1] = cpu_to_le16(targetId);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len =
+ cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(instance->tgt_prop_h);
+ dcmd->sgl.sge32[0].length =
+ cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance,
+ cmd, MFI_IO_TIMEOUT_SECS);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ switch (ret) {
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev,
+ "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ break;
+
+ default:
+ megasas_return_cmd(instance, cmd);
+ }
+ if (ret != DCMD_SUCCESS)
+ dev_err(&instance->pdev->dev,
+ "return from %s %d return value %d\n",
+ __func__, __LINE__, ret);
+
+ return ret;
+}
+
/**
* megasas_start_aen - Subscribes to AEN during driver load time
* @instance: Adapter soft state
@@ -5714,6 +6002,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->pdev = pdev;
switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_VENTURA:
+ case PCI_DEVICE_ID_LSI_HARPOON:
+ case PCI_DEVICE_ID_LSI_TOMCAT:
+ case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
+ case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
+ instance->is_ventura = true;
case PCI_DEVICE_ID_LSI_FUSION:
case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
@@ -5723,21 +6017,17 @@ static int megasas_probe_one(struct pci_dev *pdev,
case PCI_DEVICE_ID_LSI_CUTLASS_52:
case PCI_DEVICE_ID_LSI_CUTLASS_53:
{
- instance->ctrl_context_pages =
- get_order(sizeof(struct fusion_context));
- instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
- instance->ctrl_context_pages);
- if (!instance->ctrl_context) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
- "memory for Fusion context info\n");
+ if (megasas_alloc_fusion_context(instance)) {
+ megasas_free_fusion_context(instance);
goto fail_alloc_dma_buf;
}
fusion = instance->ctrl_context;
- memset(fusion, 0,
- ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
+
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
fusion->adapter_type = THUNDERBOLT_SERIES;
+ else if (instance->is_ventura)
+ fusion->adapter_type = VENTURA_SERIES;
else
fusion->adapter_type = INVADER_SERIES;
}
@@ -5799,9 +6089,17 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->pd_info = pci_alloc_consistent(pdev,
sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+ instance->pd_info = pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+ instance->tgt_prop = pci_alloc_consistent(pdev,
+ sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+
if (!instance->pd_info)
dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+ if (!instance->tgt_prop)
+ dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
+
instance->crash_dump_buf = pci_alloc_consistent(pdev,
CRASH_DMA_BUF_SIZE,
&instance->crash_dump_h);
@@ -5823,6 +6121,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
+ spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
mutex_init(&instance->reset_mutex);
@@ -5945,6 +6244,10 @@ fail_alloc_dma_buf:
pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
instance->pd_info,
instance->pd_info_h);
+ if (instance->tgt_prop)
+ pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ instance->tgt_prop,
+ instance->tgt_prop_h);
if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
@@ -6217,6 +6520,10 @@ fail_init_mfi:
pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
instance->pd_info,
instance->pd_info_h);
+ if (instance->tgt_prop)
+ pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ instance->tgt_prop,
+ instance->tgt_prop_h);
if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
@@ -6330,6 +6637,14 @@ skip_firing_dcmds:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
+ if (instance->is_ventura) {
+ for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
+ kfree(fusion->stream_detect_by_ld[i]);
+ kfree(fusion->stream_detect_by_ld);
+ fusion->stream_detect_by_ld = NULL;
+ }
+
+
if (instance->ctrl_context) {
megasas_release_fusion(instance);
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
@@ -6350,8 +6665,7 @@ skip_firing_dcmds:
fusion->pd_seq_sync[i],
fusion->pd_seq_phys[i]);
}
- free_pages((ulong)instance->ctrl_context,
- instance->ctrl_context_pages);
+ megasas_free_fusion_context(instance);
} else {
megasas_release_mfi(instance);
pci_free_consistent(pdev, sizeof(u32),
@@ -6367,11 +6681,14 @@ skip_firing_dcmds:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail, instance->evt_detail_h);
-
if (instance->pd_info)
pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
instance->pd_info,
instance->pd_info_h);
+ if (instance->tgt_prop)
+ pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ instance->tgt_prop,
+ instance->tgt_prop_h);
if (instance->vf_affiliation)
pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6570,6 +6887,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
+ megasas_return_cmd(instance, cmd);
+ return -1;
+ }
+ }
+
if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
error = megasas_set_crash_dump_params_ioctl(cmd);
megasas_return_cmd(instance, cmd);
@@ -6678,7 +7002,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
ioc->sense_off);
- if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+ if (copy_to_user((void __user *)((unsigned long)
+ get_unaligned((unsigned long *)sense_ptr)),
sense, ioc->sense_len)) {
dev_err(&instance->pdev->dev, "Failed to copy out to user "
"sense data\n");
@@ -7047,6 +7372,13 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
megasas_sysfs_set_dbg_lvl);
+static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
+{
+ sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+}
+
static void
megasas_aen_polling(struct work_struct *work)
{
@@ -7151,10 +7483,8 @@ megasas_aen_polling(struct work_struct *work)
else
scsi_device_put(sdev1);
} else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ if (sdev1)
+ megasas_remove_scsi_device(sdev1);
}
}
}
@@ -7171,10 +7501,8 @@ megasas_aen_polling(struct work_struct *work)
else
scsi_device_put(sdev1);
} else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ if (sdev1)
+ megasas_remove_scsi_device(sdev1);
}
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index f237d0003df3..62affa76133d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -77,7 +77,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
#endif
#define TRUE 1
-#define SPAN_DEBUG 0
#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
#define SPAN_INVALID 0xff
@@ -155,12 +154,17 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
return map->raidMap.devHndlInfo[pd].curDevHdl;
}
+static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return map->raidMap.devHndlInfo[pd].interfaceType;
+}
+
u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
{
return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
}
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldTgtIdToLd[ldTgtId];
}
@@ -179,18 +183,108 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
- int i;
+ int i, j;
u16 ld_count;
+ struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
+ struct MR_FW_RAID_MAP_EXT *fw_map_ext;
+ struct MR_RAID_MAP_DESC_TABLE *desc_table;
struct MR_DRV_RAID_MAP_ALL *drv_map =
fusion->ld_drv_map[(instance->map_id & 1)];
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+ void *raid_map_data = NULL;
+
+ memset(drv_map, 0, fusion->drv_map_sz);
+ memset(pDrvRaidMap->ldTgtIdToLd,
+ 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
+
+ if (instance->max_raid_mapsize) {
+ fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+ desc_table =
+ (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
+ if (desc_table != fw_map_dyn->raid_map_desc_table)
+ dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
+ desc_table, fw_map_dyn->raid_map_desc_table);
+
+ ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
+ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+ pDrvRaidMap->fpPdIoTimeoutSec =
+ fw_map_dyn->fp_pd_io_timeout_sec;
+ pDrvRaidMap->totalSize =
+ cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL));
+ /* point to actual data starting point*/
+ raid_map_data = (void *)fw_map_dyn +
+ le32_to_cpu(fw_map_dyn->desc_table_offset) +
+ le32_to_cpu(fw_map_dyn->desc_table_size);
+
+ for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
+ switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
+ case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
+ fw_map_dyn->dev_hndl_info =
+ (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+ memcpy(pDrvRaidMap->devHndlInfo,
+ fw_map_dyn->dev_hndl_info,
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ le32_to_cpu(desc_table->raid_map_desc_elements));
+ break;
+ case RAID_MAP_DESC_TYPE_TGTID_INFO:
+ fw_map_dyn->ld_tgt_id_to_ld =
+ (u16 *)(raid_map_data +
+ le32_to_cpu(desc_table->raid_map_desc_offset));
+ for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
+ pDrvRaidMap->ldTgtIdToLd[j] =
+ le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
+ }
+ break;
+ case RAID_MAP_DESC_TYPE_ARRAY_INFO:
+ fw_map_dyn->ar_map_info =
+ (struct MR_ARRAY_INFO *)
+ (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+ memcpy(pDrvRaidMap->arMapInfo,
+ fw_map_dyn->ar_map_info,
+ sizeof(struct MR_ARRAY_INFO) *
+ le32_to_cpu(desc_table->raid_map_desc_elements));
+ break;
+ case RAID_MAP_DESC_TYPE_SPAN_INFO:
+ fw_map_dyn->ld_span_map =
+ (struct MR_LD_SPAN_MAP *)
+ (raid_map_data +
+ le32_to_cpu(desc_table->raid_map_desc_offset));
+ memcpy(pDrvRaidMap->ldSpanMap,
+ fw_map_dyn->ld_span_map,
+ sizeof(struct MR_LD_SPAN_MAP) *
+ le32_to_cpu(desc_table->raid_map_desc_elements));
+ break;
+ default:
+ dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
+ fw_map_dyn->desc_table_num_elements);
+ }
+ ++desc_table;
+ }
+
+ } else if (instance->supportmax256vd) {
+ fw_map_ext =
+ (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+ ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
+ if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
+ dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
+ return;
+ }
+
+ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+ pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
+ for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u16)fw_map_ext->ldTgtIdToLd[i];
+ memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
+ sizeof(struct MR_LD_SPAN_MAP) * ld_count);
+ memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
+ sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
+ memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
- if (instance->supportmax256vd) {
- memcpy(fusion->ld_drv_map[instance->map_id & 1],
- fusion->ld_map[instance->map_id & 1],
- fusion->current_map_sz);
/* New Raid map will not set totalSize, so keep expected value
* for legacy code in ValidateMapInfo
*/
@@ -201,50 +295,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
fusion->ld_map[(instance->map_id & 1)];
pFwRaidMap = &fw_map_old->raidMap;
ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
-
-#if VD_EXT_DEBUG
- for (i = 0; i < ld_count; i++) {
- dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
- "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
- instance->unique_id, i,
- fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
- fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
- fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
- }
-#endif
-
- memset(drv_map, 0, fusion->drv_map_sz);
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
pDrvRaidMap->ldTgtIdToLd[i] =
(u8)pFwRaidMap->ldTgtIdToLd[i];
- for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
- i < MAX_LOGICAL_DRIVES_EXT; i++)
- pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
for (i = 0; i < ld_count; i++) {
pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
-#if VD_EXT_DEBUG
- dev_dbg(&instance->pdev->dev,
- "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
- "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
- "size 0x%x\n", i, i,
- pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
- pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
- (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
- dev_dbg(&instance->pdev->dev,
- "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
- "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
- "size 0x%x\n", i, i,
- pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
- pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
- (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
- dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
- "raid map %p LD RAID MAP %p/%p\n", drv_map,
- pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
- &pDrvRaidMap->ldSpanMap[i].ldRaid);
-#endif
}
memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
@@ -265,7 +323,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
struct LD_LOAD_BALANCE_INFO *lbInfo;
PLD_SPAN_INFO ldSpanInfo;
struct MR_LD_RAID *raid;
- u16 ldCount, num_lds;
+ u16 num_lds, i;
u16 ld;
u32 expected_size;
@@ -279,7 +337,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
lbInfo = fusion->load_balance_info;
ldSpanInfo = fusion->log_to_span;
- if (instance->supportmax256vd)
+ if (instance->max_raid_mapsize)
+ expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
+ else if (instance->supportmax256vd)
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
else
expected_size =
@@ -287,8 +347,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
- dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
- (unsigned int) expected_size);
+ dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
+ le32_to_cpu(pDrvRaidMap->totalSize));
+ dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
+ (unsigned int)expected_size);
dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
le32_to_cpu(pDrvRaidMap->totalSize));
@@ -298,15 +360,23 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
if (instance->UnevenSpanSupport)
mr_update_span_set(drv_map, ldSpanInfo);
- mr_update_load_balance_params(drv_map, lbInfo);
+ if (lbInfo)
+ mr_update_load_balance_params(drv_map, lbInfo);
num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
/*Convert Raid capability values to CPU arch */
- for (ldCount = 0; ldCount < num_lds; ldCount++) {
- ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
+ ld = MR_TargetIdToLdGet(i, drv_map);
+
+ /* For non existing VDs, iterate to next VD*/
+ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ continue;
+
raid = MR_LdRaidGet(ld, drv_map);
le32_to_cpus((u32 *)&raid->capability);
+
+ num_lds--;
}
return 1;
@@ -348,91 +418,6 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
/*
******************************************************************************
*
-* Function to print info about span set created in driver from FW raid map
-*
-* Inputs :
-* map - LD map
-* ldSpanInfo - ldSpanInfo per HBA instance
-*/
-#if SPAN_DEBUG
-static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
- PLD_SPAN_INFO ldSpanInfo)
-{
-
- u8 span;
- u32 element;
- struct MR_LD_RAID *raid;
- LD_SPAN_SET *span_set;
- struct MR_QUAD_ELEMENT *quad;
- int ldCount;
- u16 ld;
-
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
- ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
- continue;
- raid = MR_LdRaidGet(ld, map);
- dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
- ld, raid->spanDepth);
- for (span = 0; span < raid->spanDepth; span++)
- dev_dbg(&instance->pdev->dev, "Span=%x,"
- " number of quads=%x\n", span,
- le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements));
- for (element = 0; element < MAX_QUAD_DEPTH; element++) {
- span_set = &(ldSpanInfo[ld].span_set[element]);
- if (span_set->span_row_data_width == 0)
- break;
-
- dev_dbg(&instance->pdev->dev, "Span Set %x:"
- "width=%x, diff=%x\n", element,
- (unsigned int)span_set->span_row_data_width,
- (unsigned int)span_set->diff);
- dev_dbg(&instance->pdev->dev, "logical LBA"
- "start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)span_set->log_start_lba,
- (long unsigned int)span_set->log_end_lba);
- dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
- " end=0x%08lx\n",
- (long unsigned int)span_set->span_row_start,
- (long unsigned int)span_set->span_row_end);
- dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
- " end=0x%08lx\n",
- (long unsigned int)span_set->data_row_start,
- (long unsigned int)span_set->data_row_end);
- dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
- " end=0x%08lx\n",
- (long unsigned int)span_set->data_strip_start,
- (long unsigned int)span_set->data_strip_end);
-
- for (span = 0; span < raid->spanDepth; span++) {
- if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements) >=
- element + 1) {
- quad = &map->raidMap.ldSpanMap[ld].
- spanBlock[span].block_span_info.
- quad[element];
- dev_dbg(&instance->pdev->dev, "Span=%x,"
- "Quad=%x, diff=%x\n", span,
- element, le32_to_cpu(quad->diff));
- dev_dbg(&instance->pdev->dev,
- "offset_in_span=0x%08lx\n",
- (long unsigned int)le64_to_cpu(quad->offsetInSpan));
- dev_dbg(&instance->pdev->dev,
- "logical start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)le64_to_cpu(quad->logStart),
- (long unsigned int)le64_to_cpu(quad->logEnd));
- }
- }
- }
- }
- return 0;
-}
-#endif
-
-/*
-******************************************************************************
-*
* This routine calculates the Span block for given row using spanset.
*
* Inputs :
@@ -543,19 +528,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
else
break;
}
-#if SPAN_DEBUG
- dev_info(&instance->pdev->dev, "Strip 0x%llx,"
- "span_set_Strip 0x%llx, span_set_Row 0x%llx"
- "data width 0x%llx span offset 0x%x\n", strip,
- (unsigned long long)span_set_Strip,
- (unsigned long long)span_set_Row,
- (unsigned long long)span_set->span_row_data_width,
- span_offset);
- dev_info(&instance->pdev->dev, "For strip 0x%llx"
- "row is 0x%llx\n", strip,
- (unsigned long long) span_set->data_row_start +
- (unsigned long long) span_set_Row + (span_offset - 1));
-#endif
+
retval = (span_set->data_row_start + span_set_Row +
(span_offset - 1));
return retval;
@@ -672,11 +645,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
else
break;
}
-#if SPAN_DEBUG
- dev_info(&instance->pdev->dev, "get_arm_from_strip:"
- "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
- (long unsigned int)strip, (strip_offset - span_offset));
-#endif
+
retval = (strip_offset - span_offset);
return retval;
}
@@ -737,16 +706,18 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- u32 pd, arRef;
+ u32 pd, arRef, r1_alt_pd;
u8 physArm, span;
u64 row;
u8 retval = TRUE;
u64 *pdBlock = &io_info->pdBlock;
__le16 *pDevHandle = &io_info->devHandle;
+ u8 *pPdInterface = &io_info->pd_interface;
u32 logArm, rowMod, armQ, arm;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
+ *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
/*Get row and span from io_info for Uneven Span IO.*/
row = io_info->start_row;
@@ -772,27 +743,46 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
arRef = MR_LdSpanArrayGet(ld, span, map);
pd = MR_ArPdGet(arRef, physArm, map);
- if (pd != MR_PD_INVALID)
+ if (pd != MR_PD_INVALID) {
*pDevHandle = MR_PdDevHandleGet(pd, map);
- else {
- *pDevHandle = cpu_to_le16(MR_PD_INVALID);
+ *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+ /* get second pd also for raid 1/10 fast path writes*/
+ if (instance->is_ventura &&
+ (raid->level == 1) &&
+ !io_info->isRead) {
+ r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (r1_alt_pd != MR_PD_INVALID)
+ io_info->r1_alt_dev_handle =
+ MR_PdDevHandleGet(r1_alt_pd, map);
+ }
+ } else {
if ((raid->level >= 5) &&
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
((fusion->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
- pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
physArm = physArm + 1;
pd = MR_ArPdGet(arRef, physArm, map);
- if (pd != MR_PD_INVALID)
+ if (pd != MR_PD_INVALID) {
*pDevHandle = MR_PdDevHandleGet(pd, map);
+ *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+ }
}
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
- physArm;
- io_info->span_arm = pRAID_Context->spanArm;
+ if (instance->is_ventura) {
+ ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ io_info->span_arm =
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ } else {
+ pRAID_Context->span_arm =
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ io_info->span_arm = pRAID_Context->span_arm;
+ }
+ io_info->pd_after_lb = pd;
return retval;
}
@@ -819,16 +809,17 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
- u32 pd, arRef;
+ u32 pd, arRef, r1_alt_pd;
u8 physArm, span;
u64 row;
u8 retval = TRUE;
u64 *pdBlock = &io_info->pdBlock;
__le16 *pDevHandle = &io_info->devHandle;
+ u8 *pPdInterface = &io_info->pd_interface;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
-
+ *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
row = mega_div64_32(stripRow, raid->rowDataSize);
@@ -867,31 +858,49 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
arRef = MR_LdSpanArrayGet(ld, span, map);
pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
- if (pd != MR_PD_INVALID)
+ if (pd != MR_PD_INVALID) {
/* Get dev handle from Pd. */
*pDevHandle = MR_PdDevHandleGet(pd, map);
- else {
- /* set dev handle as invalid. */
- *pDevHandle = cpu_to_le16(MR_PD_INVALID);
+ *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+ /* get second pd also for raid 1/10 fast path writes*/
+ if (instance->is_ventura &&
+ (raid->level == 1) &&
+ !io_info->isRead) {
+ r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (r1_alt_pd != MR_PD_INVALID)
+ io_info->r1_alt_dev_handle =
+ MR_PdDevHandleGet(r1_alt_pd, map);
+ }
+ } else {
if ((raid->level >= 5) &&
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
((fusion->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
- pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
physArm = physArm + 1;
pd = MR_ArPdGet(arRef, physArm, map);
- if (pd != MR_PD_INVALID)
+ if (pd != MR_PD_INVALID) {
/* Get dev handle from Pd */
*pDevHandle = MR_PdDevHandleGet(pd, map);
+ *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+ }
}
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
- physArm;
- io_info->span_arm = pRAID_Context->spanArm;
+ if (instance->is_ventura) {
+ ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ io_info->span_arm =
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ } else {
+ pRAID_Context->span_arm =
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ io_info->span_arm = pRAID_Context->span_arm;
+ }
+ io_info->pd_after_lb = pd;
return retval;
}
@@ -912,7 +921,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
{
struct fusion_context *fusion;
struct MR_LD_RAID *raid;
- u32 ld, stripSize, stripe_mask;
+ u32 stripSize, stripe_mask;
u64 endLba, endStrip, endRow, start_row, start_strip;
u64 regStart;
u32 regSize;
@@ -924,6 +933,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
u8 retval = 0;
u8 startlba_span = SPAN_INVALID;
u64 *pdBlock = &io_info->pdBlock;
+ u16 ld;
ldStartBlock = io_info->ldStartBlock;
numBlocks = io_info->numBlocks;
@@ -935,6 +945,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
ld = MR_TargetIdToLdGet(ldTgtId, map);
raid = MR_LdRaidGet(ld, map);
+ /*check read ahead bit*/
+ io_info->ra_capable = raid->capability.ra_capable;
/*
* if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
@@ -996,17 +1008,6 @@ MR_BuildRaidContext(struct megasas_instance *instance,
}
io_info->start_span = startlba_span;
io_info->start_row = start_row;
-#if SPAN_DEBUG
- dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
- "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
- " span 0x%x\n", __func__, __LINE__,
- (unsigned long long)start_row,
- (unsigned long long)start_strip,
- (unsigned long long)endStrip, startlba_span);
- dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
- "Start span 0x%x\n", (unsigned long long)start_row,
- (unsigned long long)endRow, startlba_span);
-#endif
} else {
start_row = mega_div64_32(start_strip, raid->rowDataSize);
endRow = mega_div64_32(endStrip, raid->rowDataSize);
@@ -1093,20 +1094,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
regSize += stripSize;
}
- pRAID_Context->timeoutValue =
+ pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd ?
raid->fpIoTimeoutForLd :
map->raidMap.fpPdIoTimeoutSec);
if (fusion->adapter_type == INVADER_SERIES)
- pRAID_Context->regLockFlags = (isRead) ?
+ pRAID_Context->reg_lock_flags = (isRead) ?
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
- else
- pRAID_Context->regLockFlags = (isRead) ?
+ else if (!instance->is_ventura)
+ pRAID_Context->reg_lock_flags = (isRead) ?
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
- pRAID_Context->VirtualDiskTgtId = raid->targetId;
- pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
- pRAID_Context->regLockLength = cpu_to_le32(regSize);
- pRAID_Context->configSeqNum = raid->seqNum;
+ pRAID_Context->virtual_disk_tgt_id = raid->targetId;
+ pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart);
+ pRAID_Context->reg_lock_length = cpu_to_le32(regSize);
+ pRAID_Context->config_seq_num = raid->seqNum;
/* save pointer to raid->LUN array */
*raidLUN = raid->LUN;
@@ -1122,7 +1123,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
ref_in_start_stripe, io_info,
pRAID_Context, map);
/* If IO on an invalid Pd, then FP is not possible.*/
- if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
+ if (io_info->devHandle == MR_DEVHANDLE_INVALID)
io_info->fpOkForIo = FALSE;
return retval;
} else if (isRead) {
@@ -1140,12 +1141,6 @@ MR_BuildRaidContext(struct megasas_instance *instance,
return TRUE;
}
}
-
-#if SPAN_DEBUG
- /* Just for testing what arm we get for strip.*/
- if (io_info->IoforUnevenSpan)
- get_arm_from_strip(instance, ld, start_strip, map);
-#endif
return TRUE;
}
@@ -1259,10 +1254,6 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
break;
}
}
-#if SPAN_DEBUG
- getSpanInfo(map, ldSpanInfo);
-#endif
-
}
void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
@@ -1293,11 +1284,12 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
}
u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
- struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+ struct LD_LOAD_BALANCE_INFO *lbInfo,
+ struct IO_REQUEST_INFO *io_info,
+ struct MR_DRV_RAID_MAP_ALL *drv_map)
{
- struct fusion_context *fusion;
struct MR_LD_RAID *raid;
- struct MR_DRV_RAID_MAP_ALL *drv_map;
+ u16 pd1_dev_handle;
u16 pend0, pend1, ld;
u64 diff0, diff1;
u8 bestArm, pd0, pd1, span, arm;
@@ -1310,9 +1302,6 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
>> RAID_CTX_SPANARM_SPAN_SHIFT);
arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
-
- fusion = instance->ctrl_context;
- drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
raid = MR_LdRaidGet(ld, drv_map);
span_row_size = instance->UnevenSpanSupport ?
@@ -1323,47 +1312,52 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
(arm + 1 - span_row_size) : arm + 1, drv_map);
- /* get the pending cmds for the data and mirror arms */
- pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
- pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
+ /* Get PD1 Dev Handle */
+
+ pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
- /* Determine the disk whose head is nearer to the req. block */
- diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
- diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
- bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+ if (pd1_dev_handle == MR_DEVHANDLE_INVALID) {
+ bestArm = arm;
+ } else {
+ /* get the pending cmds for the data and mirror arms */
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
- if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
- (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
- bestArm ^= 1;
+ /* Determine the disk whose head is nearer to the req. block */
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+ bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+
+ /* Make balance count from 16 to 4 to
+ * keep driver in sync with Firmware
+ */
+ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
+ (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
+ bestArm ^= 1;
+
+ /* Update the last accessed block on the correct pd */
+ io_info->span_arm =
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+ io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+ }
- /* Update the last accessed block on the correct pd */
- io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
- io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
-#if SPAN_DEBUG
- if (arm != bestArm)
- dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
- "occur - span 0x%x arm 0x%x bestArm 0x%x "
- "io_info->span_arm 0x%x\n",
- span, arm, bestArm, io_info->span_arm);
-#endif
return io_info->pd_after_lb;
}
__le16 get_updated_dev_handle(struct megasas_instance *instance,
- struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+ struct LD_LOAD_BALANCE_INFO *lbInfo,
+ struct IO_REQUEST_INFO *io_info,
+ struct MR_DRV_RAID_MAP_ALL *drv_map)
{
u8 arm_pd;
__le16 devHandle;
- struct fusion_context *fusion;
- struct MR_DRV_RAID_MAP_ALL *drv_map;
-
- fusion = instance->ctrl_context;
- drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
/* get best new arm (PD ID) */
- arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info);
+ arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map);
devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+ io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
+
return devHandle;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 24778ba4b6e8..29650ba669da 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -47,6 +47,7 @@
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/poll.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -181,32 +182,44 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
cmd->scmd = NULL;
- memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+ memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+ cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+ cmd->cmd_completed = false;
}
/**
* megasas_fire_cmd_fusion - Sends command to the FW
+ * @instance: Adapter soft state
+ * @req_desc: 32bit or 64bit Request descriptor
+ *
+ * Perform PCI Write. Ventura supports 32 bit Descriptor.
+ * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
*/
+
static void
megasas_fire_cmd_fusion(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
+ if (instance->is_ventura)
+ writel(le32_to_cpu(req_desc->u.low),
+ &instance->reg_set->inbound_single_queue_port);
+ else {
#if defined(writeq) && defined(CONFIG_64BIT)
- u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
- le32_to_cpu(req_desc->u.low));
+ u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
+ le32_to_cpu(req_desc->u.low));
- writeq(req_data, &instance->reg_set->inbound_low_queue_port);
+ writeq(req_data, &instance->reg_set->inbound_low_queue_port);
#else
- unsigned long flags;
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- writel(le32_to_cpu(req_desc->u.low),
- &instance->reg_set->inbound_low_queue_port);
- writel(le32_to_cpu(req_desc->u.high),
- &instance->reg_set->inbound_high_queue_port);
- mmiowb();
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel(le32_to_cpu(req_desc->u.low),
+ &instance->reg_set->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc->u.high),
+ &instance->reg_set->inbound_high_queue_port);
+ mmiowb();
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
+ }
}
/**
@@ -229,7 +242,10 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
reg_set = instance->reg_set;
- cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
+ /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
+ if (!instance->is_ventura)
+ cur_max_fw_cmds =
+ readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
if (dual_qdepth_disable || !cur_max_fw_cmds)
cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
@@ -243,7 +259,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
if (fw_boot_context == OCR_CONTEXT) {
cur_max_fw_cmds = cur_max_fw_cmds - 1;
- if (cur_max_fw_cmds <= instance->max_fw_cmds) {
+ if (cur_max_fw_cmds < instance->max_fw_cmds) {
instance->cur_can_queue =
cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
MEGASAS_FUSION_IOCTL_CMDS);
@@ -255,7 +271,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
instance->ldio_threshold = ldio_threshold;
if (!instance->is_rdpq)
- instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
+ instance->max_fw_cmds =
+ min_t(u16, instance->max_fw_cmds, 1024);
if (reset_devices)
instance->max_fw_cmds = min(instance->max_fw_cmds,
@@ -271,7 +288,14 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
(MEGASAS_FUSION_INTERNAL_CMDS +
MEGASAS_FUSION_IOCTL_CMDS);
instance->cur_can_queue = instance->max_scsi_cmds;
+ instance->host->can_queue = instance->cur_can_queue;
}
+
+ if (instance->is_ventura)
+ instance->max_mpt_cmds =
+ instance->max_fw_cmds * RAID_1_PEER_CMDS;
+ else
+ instance->max_mpt_cmds = instance->max_fw_cmds;
}
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
@@ -285,7 +309,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
struct megasas_cmd_fusion *cmd;
/* SG, Sense */
- for (i = 0; i < instance->max_fw_cmds; i++) {
+ for (i = 0; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
if (cmd) {
if (cmd->sg_frame)
@@ -329,7 +353,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
/* cmd_list */
- for (i = 0; i < instance->max_fw_cmds; i++)
+ for (i = 0; i < instance->max_mpt_cmds; i++)
kfree(fusion->cmd_list[i]);
kfree(fusion->cmd_list);
@@ -343,7 +367,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
{
int i;
- u32 max_cmd;
+ u16 max_cmd;
struct fusion_context *fusion;
struct megasas_cmd_fusion *cmd;
@@ -353,7 +377,8 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
fusion->sg_dma_pool =
pci_pool_create("mr_sg", instance->pdev,
- instance->max_chain_frame_sz, 4, 0);
+ instance->max_chain_frame_sz,
+ MR_DEFAULT_NVME_PAGE_SIZE, 0);
/* SCSI_SENSE_BUFFERSIZE = 96 bytes */
fusion->sense_dma_pool =
pci_pool_create("mr_sense", instance->pdev,
@@ -381,33 +406,47 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
return -ENOMEM;
}
}
+
+ /* create sense buffer for the raid 1/10 fp */
+ for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+ if (!cmd->sense) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
return 0;
}
int
megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
{
- u32 max_cmd, i;
+ u32 max_mpt_cmd, i;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
- max_cmd = instance->max_fw_cmds;
+ max_mpt_cmd = instance->max_mpt_cmds;
/*
* fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
- fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
- GFP_KERNEL);
+ fusion->cmd_list =
+ kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd,
+ GFP_KERNEL);
if (!fusion->cmd_list) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- for (i = 0; i < max_cmd; i++) {
+ for (i = 0; i < max_mpt_cmd; i++) {
fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
GFP_KERNEL);
if (!fusion->cmd_list[i]) {
@@ -539,7 +578,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
}
fusion->rdpq_virt[i].RDPQBaseAddress =
- fusion->reply_frames_desc_phys[i];
+ cpu_to_le64(fusion->reply_frames_desc_phys[i]);
reply_desc = fusion->reply_frames_desc[i];
for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
@@ -642,13 +681,14 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
*/
/* SMID 0 is reserved. Set SMID/index from 1 */
- for (i = 0; i < instance->max_fw_cmds; i++) {
+ for (i = 0; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
cmd->index = i + 1;
cmd->scmd = NULL;
- cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
+ cmd->sync_cmd_idx =
+ (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
(i - instance->max_scsi_cmds) :
(u32)ULONG_MAX; /* Set to Invalid */
cmd->instance = instance;
@@ -658,6 +698,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
memset(cmd->io_request, 0,
sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
cmd->io_request_phys_addr = io_req_base_phys + offset;
+ cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
}
if (megasas_create_sg_sense_fusion(instance))
@@ -725,6 +766,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
const char *sys_info;
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
+ unsigned long flags;
fusion = instance->ctrl_context;
@@ -781,6 +823,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+ IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
@@ -796,7 +839,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
/* driver support Extended MSIX */
- if (fusion->adapter_type == INVADER_SERIES)
+ if (fusion->adapter_type >= INVADER_SERIES)
drv_ops->mfi_capabilities.support_additional_msix = 1;
/* driver supports HA / Remote LUN over Fast Path interface */
drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
@@ -813,6 +856,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
drv_ops->mfi_capabilities.support_qd_throttling = 1;
+ drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -850,7 +894,14 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
break;
}
- megasas_fire_cmd_fusion(instance, &req_desc);
+ /* For Ventura also IOC INIT required 64 bit Descriptor write. */
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel(le32_to_cpu(req_desc.u.low),
+ &instance->reg_set->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc.u.high),
+ &instance->reg_set->inbound_high_queue_port);
+ mmiowb();
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
@@ -1009,11 +1060,6 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
memset(ci, 0, fusion->max_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-#if VD_EXT_DEBUG
- dev_dbg(&instance->pdev->dev,
- "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
- __func__, cpu_to_le32(size_map_info));
-#endif
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -1065,10 +1111,11 @@ megasas_get_map_info(struct megasas_instance *instance)
int
megasas_sync_map_info(struct megasas_instance *instance)
{
- int ret = 0, i;
+ int i;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- u32 size_sync_info, num_lds;
+ u16 num_lds;
+ u32 size_sync_info;
struct fusion_context *fusion;
struct MR_LD_TARGET_SYNC *ci = NULL;
struct MR_DRV_RAID_MAP_ALL *map;
@@ -1134,7 +1181,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
instance->instancet->issue_dcmd(instance, cmd);
- return ret;
+ return 0;
}
/*
@@ -1220,7 +1267,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u32 max_cmd, scratch_pad_2;
+ u16 max_cmd;
+ u32 scratch_pad_2;
int i = 0, count;
fusion = instance->ctrl_context;
@@ -1230,13 +1278,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
/*
- * Reduce the max supported cmds by 1. This is to ensure that the
- * reply_q_sz (1 more than the max cmd that driver may send)
- * does not exceed max cmds that the FW can support
- */
- instance->max_fw_cmds = instance->max_fw_cmds-1;
-
- /*
* Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
*/
instance->max_mfi_cmds =
@@ -1247,12 +1288,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
fusion->request_alloc_sz =
- sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
+ sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
*(fusion->reply_q_depth);
fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
- (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
- (max_cmd + 1)); /* Extra 1 for SMID 0 */
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+ * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
@@ -1302,7 +1343,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
fusion->last_reply_idx[i] = 0;
/*
- * For fusion adapters, 3 commands for IOCTL and 5 commands
+ * For fusion adapters, 3 commands for IOCTL and 8 commands
* for driver's internal DCMDs.
*/
instance->max_scsi_cmds = instance->max_fw_cmds -
@@ -1331,6 +1372,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
}
instance->flag_ieee = 1;
+ instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT;
fusion->fast_path_io = 0;
fusion->drv_map_pages = get_order(fusion->drv_map_sz);
@@ -1388,96 +1430,348 @@ fail_alloc_mfi_cmds:
*/
void
-map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
+map_cmd_status(struct fusion_context *fusion,
+ struct scsi_cmnd *scmd, u8 status, u8 ext_status,
+ u32 data_length, u8 *sense)
{
+ u8 cmd_type;
+ int resid;
+ cmd_type = megasas_cmd_type(scmd);
switch (status) {
case MFI_STAT_OK:
- cmd->scmd->result = DID_OK << 16;
+ scmd->result = DID_OK << 16;
break;
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_LD_INIT_IN_PROGRESS:
- cmd->scmd->result = (DID_ERROR << 16) | ext_status;
+ scmd->result = (DID_ERROR << 16) | ext_status;
break;
case MFI_STAT_SCSI_DONE_WITH_ERROR:
- cmd->scmd->result = (DID_OK << 16) | ext_status;
+ scmd->result = (DID_OK << 16) | ext_status;
if (ext_status == SAM_STAT_CHECK_CONDITION) {
- memset(cmd->scmd->sense_buffer, 0,
+ memset(scmd->sense_buffer, 0,
SCSI_SENSE_BUFFERSIZE);
- memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ memcpy(scmd->sense_buffer, sense,
SCSI_SENSE_BUFFERSIZE);
- cmd->scmd->result |= DRIVER_SENSE << 24;
+ scmd->result |= DRIVER_SENSE << 24;
}
+
+ /*
+ * If the IO request is partially completed, then MR FW will
+ * update "io_request->DataLength" field with actual number of
+ * bytes transferred.Driver will set residual bytes count in
+ * SCSI command structure.
+ */
+ resid = (scsi_bufflen(scmd) - data_length);
+ scsi_set_resid(scmd, resid);
+
+ if (resid &&
+ ((cmd_type == READ_WRITE_LDIO) ||
+ (cmd_type == READ_WRITE_SYSPDIO)))
+ scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len"
+ " requested/completed 0x%x/0x%x\n",
+ status, scsi_bufflen(scmd), data_length);
break;
case MFI_STAT_LD_OFFLINE:
case MFI_STAT_DEVICE_NOT_FOUND:
- cmd->scmd->result = DID_BAD_TARGET << 16;
+ scmd->result = DID_BAD_TARGET << 16;
break;
case MFI_STAT_CONFIG_SEQ_MISMATCH:
- cmd->scmd->result = DID_IMM_RETRY << 16;
+ scmd->result = DID_IMM_RETRY << 16;
break;
default:
- dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
- cmd->scmd->result = DID_ERROR << 16;
+ scmd->result = DID_ERROR << 16;
break;
}
}
/**
+ * megasas_is_prp_possible -
+ * Checks if native NVMe PRPs can be built for the IO
+ *
+ * @instance: Adapter soft state
+ * @scmd: SCSI command from the mid-layer
+ * @sge_count: scatter gather element count.
+ *
+ * Returns: true: PRPs can be built
+ * false: IEEE SGLs needs to be built
+ */
+static bool
+megasas_is_prp_possible(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd, int sge_count)
+{
+ struct fusion_context *fusion;
+ int i;
+ u32 data_length = 0;
+ struct scatterlist *sg_scmd;
+ bool build_prp = false;
+ u32 mr_nvme_pg_size;
+
+ mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+ MR_DEFAULT_NVME_PAGE_SIZE);
+ fusion = instance->ctrl_context;
+ data_length = scsi_bufflen(scmd);
+ sg_scmd = scsi_sglist(scmd);
+
+ /*
+ * NVMe uses one PRP for each page (or part of a page)
+ * look at the data length - if 4 pages or less then IEEE is OK
+ * if > 5 pages then we need to build a native SGL
+ * if > 4 and <= 5 pages, then check physical address of 1st SG entry
+ * if this first size in the page is >= the residual beyond 4 pages
+ * then use IEEE, otherwise use native SGL
+ */
+
+ if (data_length > (mr_nvme_pg_size * 5)) {
+ build_prp = true;
+ } else if ((data_length > (mr_nvme_pg_size * 4)) &&
+ (data_length <= (mr_nvme_pg_size * 5))) {
+ /* check if 1st SG entry size is < residual beyond 4 pages */
+ if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4)))
+ build_prp = true;
+ }
+
+/*
+ * Below code detects gaps/holes in IO data buffers.
+ * What does holes/gaps mean?
+ * Any SGE except first one in a SGL starts at non NVME page size
+ * aligned address OR Any SGE except last one in a SGL ends at
+ * non NVME page size boundary.
+ *
+ * Driver has already informed block layer by setting boundary rules for
+ * bio merging done at NVME page size boundary calling kernel API
+ * blk_queue_virt_boundary inside slave_config.
+ * Still there is possibility of IO coming with holes to driver because of
+ * IO merging done by IO scheduler.
+ *
+ * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
+ * IO scheduling so no IO merging.
+ *
+ * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
+ * then sending IOs with holes.
+ *
+ * Though driver can request block layer to disable IO merging by calling-
+ * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
+ * user may tune sysfs parameter- nomerges again to 0 or 1.
+ *
+ * If in future IO scheduling is enabled with SCSI BLK MQ,
+ * this algorithm to detect holes will be required in driver
+ * for SCSI BLK MQ enabled case as well.
+ *
+ *
+ */
+ scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
+ if ((i != 0) && (i != (sge_count - 1))) {
+ if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
+ mega_mod64(sg_dma_address(sg_scmd),
+ mr_nvme_pg_size)) {
+ build_prp = false;
+ atomic_inc(&instance->sge_holes_type1);
+ break;
+ }
+ }
+
+ if ((sge_count > 1) && (i == 0)) {
+ if ((mega_mod64((sg_dma_address(sg_scmd) +
+ sg_dma_len(sg_scmd)),
+ mr_nvme_pg_size))) {
+ build_prp = false;
+ atomic_inc(&instance->sge_holes_type2);
+ break;
+ }
+ }
+
+ if ((sge_count > 1) && (i == (sge_count - 1))) {
+ if (mega_mod64(sg_dma_address(sg_scmd),
+ mr_nvme_pg_size)) {
+ build_prp = false;
+ atomic_inc(&instance->sge_holes_type3);
+ break;
+ }
+ }
+ }
+
+ return build_prp;
+}
+
+/**
+ * megasas_make_prp_nvme -
+ * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ *
+ * @instance: Adapter soft state
+ * @scmd: SCSI command from the mid-layer
+ * @sgl_ptr: SGL to be filled in
+ * @cmd: Fusion command frame
+ * @sge_count: scatter gather element count.
+ *
+ * Returns: true: PRPs are built
+ * false: IEEE SGLs needs to be built
+ */
+static bool
+megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+ struct megasas_cmd_fusion *cmd, int sge_count)
+{
+ int sge_len, offset, num_prp_in_chain = 0;
+ struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl;
+ u64 *ptr_sgl;
+ dma_addr_t ptr_sgl_phys;
+ u64 sge_addr;
+ u32 page_mask, page_mask_result;
+ struct scatterlist *sg_scmd;
+ u32 first_prp_len;
+ bool build_prp = false;
+ int data_len = scsi_bufflen(scmd);
+ struct fusion_context *fusion;
+ u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+ MR_DEFAULT_NVME_PAGE_SIZE);
+
+ fusion = instance->ctrl_context;
+
+ build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
+
+ if (!build_prp)
+ return false;
+
+ /*
+ * Nvme has a very convoluted prp format. One prp is required
+ * for each page or partial page. Driver need to split up OS sg_list
+ * entries if it is longer than one page or cross a page
+ * boundary. Driver also have to insert a PRP list pointer entry as
+ * the last entry in each physical page of the PRP list.
+ *
+ * NOTE: The first PRP "entry" is actually placed in the first
+ * SGL entry in the main message as IEEE 64 format. The 2nd
+ * entry in the main message is the chain element, and the rest
+ * of the PRP entries are built in the contiguous pcie buffer.
+ */
+ page_mask = mr_nvme_pg_size - 1;
+ ptr_sgl = (u64 *)cmd->sg_frame;
+ ptr_sgl_phys = cmd->sg_frame_phys_addr;
+ memset(ptr_sgl, 0, instance->max_chain_frame_sz);
+
+ /* Build chain frame element which holds all prps except first*/
+ main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
+ ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
+
+ main_chain_element->Address = cpu_to_le64(ptr_sgl_phys);
+ main_chain_element->NextChainOffset = 0;
+ main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
+
+ /* Build first prp, sge need not to be page aligned*/
+ ptr_first_sgl = sgl_ptr;
+ sg_scmd = scsi_sglist(scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+
+ offset = (u32)(sge_addr & page_mask);
+ first_prp_len = mr_nvme_pg_size - offset;
+
+ ptr_first_sgl->Address = cpu_to_le64(sge_addr);
+ ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
+
+ data_len -= first_prp_len;
+
+ if (sge_len > first_prp_len) {
+ sge_addr += first_prp_len;
+ sge_len -= first_prp_len;
+ } else if (sge_len == first_prp_len) {
+ sg_scmd = sg_next(sg_scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+ }
+
+ for (;;) {
+ offset = (u32)(sge_addr & page_mask);
+
+ /* Put PRP pointer due to page boundary*/
+ page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
+ if (unlikely(!page_mask_result)) {
+ scmd_printk(KERN_NOTICE,
+ scmd, "page boundary ptr_sgl: 0x%p\n",
+ ptr_sgl);
+ ptr_sgl_phys += 8;
+ *ptr_sgl = cpu_to_le64(ptr_sgl_phys);
+ ptr_sgl++;
+ num_prp_in_chain++;
+ }
+
+ *ptr_sgl = cpu_to_le64(sge_addr);
+ ptr_sgl++;
+ ptr_sgl_phys += 8;
+ num_prp_in_chain++;
+
+ sge_addr += mr_nvme_pg_size;
+ sge_len -= mr_nvme_pg_size;
+ data_len -= mr_nvme_pg_size;
+
+ if (data_len <= 0)
+ break;
+
+ if (sge_len > 0)
+ continue;
+
+ sg_scmd = sg_next(sg_scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+ }
+
+ main_chain_element->Length =
+ cpu_to_le32(num_prp_in_chain * sizeof(u64));
+
+ atomic_inc(&instance->prp_sgl);
+ return build_prp;
+}
+
+/**
* megasas_make_sgl_fusion - Prepares 32-bit SGL
* @instance: Adapter soft state
* @scp: SCSI command from the mid-layer
* @sgl_ptr: SGL to be filled in
* @cmd: cmd we are working on
+ * @sge_count sge count
*
- * If successful, this function returns the number of SG elements.
*/
-static int
+static void
megasas_make_sgl_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
- struct megasas_cmd_fusion *cmd)
+ struct megasas_cmd_fusion *cmd, int sge_count)
{
- int i, sg_processed, sge_count;
+ int i, sg_processed;
struct scatterlist *os_sgl;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (fusion->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
sgl_ptr_end->Flags = 0;
}
- sge_count = scsi_dma_map(scp);
-
- BUG_ON(sge_count < 0);
-
- if (sge_count > instance->max_num_sge || !sge_count)
- return sge_count;
-
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0;
- if (fusion->adapter_type == INVADER_SERIES)
+ if (fusion->adapter_type >= INVADER_SERIES)
if (i == sge_count - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
sgl_ptr++;
-
sg_processed = i + 1;
if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
(sge_count > fusion->max_sge_in_main_msg)) {
struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (fusion->adapter_type >= INVADER_SERIES) {
if ((le16_to_cpu(cmd->io_request->IoFlags) &
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1493,7 +1787,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sg_chain = sgl_ptr;
/* Prepare chain element */
sg_chain->NextChainOffset = 0;
- if (fusion->adapter_type == INVADER_SERIES)
+ if (fusion->adapter_type >= INVADER_SERIES)
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags =
@@ -1507,6 +1801,45 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
memset(sgl_ptr, 0, instance->max_chain_frame_sz);
}
}
+ atomic_inc(&instance->ieee_sgl);
+}
+
+/**
+ * megasas_make_sgl - Build Scatter Gather List(SGLs)
+ * @scp: SCSI command pointer
+ * @instance: Soft instance of controller
+ * @cmd: Fusion command pointer
+ *
+ * This function will build sgls based on device type.
+ * For nvme drives, there is different way of building sgls in nvme native
+ * format- PRPs(Physical Region Page).
+ *
+ * Returns the number of sg lists actually used, zero if the sg lists
+ * is NULL, or -ENOMEM if the mapping failed
+ */
+static
+int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd_fusion *cmd)
+{
+ int sge_count;
+ bool build_prp = false;
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64;
+
+ sge_count = scsi_dma_map(scp);
+
+ if ((sge_count > instance->max_num_sge) || (sge_count <= 0))
+ return sge_count;
+
+ sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL;
+ if ((le16_to_cpu(cmd->io_request->IoFlags) &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
+ (cmd->pd_interface == NVME_PD))
+ build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64,
+ cmd, sge_count);
+
+ if (!build_prp)
+ megasas_make_sgl_fusion(instance, scp, sgl_chain64,
+ cmd, sge_count);
return sge_count;
}
@@ -1525,7 +1858,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
{
struct MR_LD_RAID *raid;
- u32 ld;
+ u16 ld;
u64 start_blk = io_info->pdBlock;
u8 *cdb = io_request->CDB.CDB32;
u32 num_blocks = io_info->numBlocks;
@@ -1574,6 +1907,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+ MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
} else {
io_request->EEDPFlags = cpu_to_le16(
@@ -1688,6 +2022,166 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
}
/**
+ * megasas_stream_detect - stream detection on read and and write IOs
+ * @instance: Adapter soft state
+ * @cmd: Command to be prepared
+ * @io_info: IO Request info
+ *
+ */
+
+/** stream detection on read and and write IOs */
+static void megasas_stream_detect(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd,
+ struct IO_REQUEST_INFO *io_info)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ u32 device_id = io_info->ldTgtId;
+ struct LD_STREAM_DETECT *current_ld_sd
+ = fusion->stream_detect_by_ld[device_id];
+ u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num;
+ u32 shifted_values, unshifted_values;
+ u32 index_value_mask, shifted_values_mask;
+ int i;
+ bool is_read_ahead = false;
+ struct STREAM_DETECT *current_sd;
+ /* find possible stream */
+ for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
+ stream_num = (*track_stream >>
+ (i * BITS_PER_INDEX_STREAM)) &
+ STREAM_MASK;
+ current_sd = &current_ld_sd->stream_track[stream_num];
+ /* if we found a stream, update the raid
+ * context and also update the mruBitMap
+ */
+ /* boundary condition */
+ if ((current_sd->next_seq_lba) &&
+ (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
+ (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
+ (current_sd->is_read == io_info->isRead)) {
+
+ if ((io_info->ldStartBlock != current_sd->next_seq_lba) &&
+ ((!io_info->isRead) || (!is_read_ahead)))
+ /*
+ * Once the API availible we need to change this.
+ * At this point we are not allowing any gap
+ */
+ continue;
+
+ SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
+ current_sd->next_seq_lba =
+ io_info->ldStartBlock + io_info->numBlocks;
+ /*
+ * update the mruBitMap LRU
+ */
+ shifted_values_mask =
+ (1 << i * BITS_PER_INDEX_STREAM) - 1;
+ shifted_values = ((*track_stream & shifted_values_mask)
+ << BITS_PER_INDEX_STREAM);
+ index_value_mask =
+ STREAM_MASK << i * BITS_PER_INDEX_STREAM;
+ unshifted_values =
+ *track_stream & ~(shifted_values_mask |
+ index_value_mask);
+ *track_stream =
+ unshifted_values | shifted_values | stream_num;
+ return;
+ }
+ }
+ /*
+ * if we did not find any stream, create a new one
+ * from the least recently used
+ */
+ stream_num = (*track_stream >>
+ ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
+ STREAM_MASK;
+ current_sd = &current_ld_sd->stream_track[stream_num];
+ current_sd->is_read = io_info->isRead;
+ current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
+ *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
+ return;
+}
+
+/**
+ * megasas_set_raidflag_cpu_affinity - This function sets the cpu
+ * affinity (cpu of the controller) and raid_flags in the raid context
+ * based on IO type.
+ *
+ * @praid_context: IO RAID context
+ * @raid: LD raid map
+ * @fp_possible: Is fast path possible?
+ * @is_read: Is read IO?
+ *
+ */
+static void
+megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
+ struct MR_LD_RAID *raid, bool fp_possible,
+ u8 is_read, u32 scsi_buff_len)
+{
+ u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
+ struct RAID_CONTEXT_G35 *rctx_g35;
+
+ rctx_g35 = &praid_context->raid_context_g35;
+ if (fp_possible) {
+ if (is_read) {
+ if ((raid->cpuAffinity.pdRead.cpu0) &&
+ (raid->cpuAffinity.pdRead.cpu1))
+ cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+ else if (raid->cpuAffinity.pdRead.cpu1)
+ cpu_sel = MR_RAID_CTX_CPUSEL_1;
+ } else {
+ if ((raid->cpuAffinity.pdWrite.cpu0) &&
+ (raid->cpuAffinity.pdWrite.cpu1))
+ cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+ else if (raid->cpuAffinity.pdWrite.cpu1)
+ cpu_sel = MR_RAID_CTX_CPUSEL_1;
+ /* Fast path cache by pass capable R0/R1 VD */
+ if ((raid->level <= 1) &&
+ (raid->capability.fp_cache_bypass_capable)) {
+ rctx_g35->routing_flags |=
+ (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT);
+ rctx_g35->raid_flags =
+ (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+ }
+ }
+ } else {
+ if (is_read) {
+ if ((raid->cpuAffinity.ldRead.cpu0) &&
+ (raid->cpuAffinity.ldRead.cpu1))
+ cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+ else if (raid->cpuAffinity.ldRead.cpu1)
+ cpu_sel = MR_RAID_CTX_CPUSEL_1;
+ } else {
+ if ((raid->cpuAffinity.ldWrite.cpu0) &&
+ (raid->cpuAffinity.ldWrite.cpu1))
+ cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+ else if (raid->cpuAffinity.ldWrite.cpu1)
+ cpu_sel = MR_RAID_CTX_CPUSEL_1;
+
+ if (is_stream_detected(rctx_g35) &&
+ (raid->level == 5) &&
+ (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
+ (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
+ cpu_sel = MR_RAID_CTX_CPUSEL_0;
+ }
+ }
+
+ rctx_g35->routing_flags |=
+ (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
+
+ /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+ * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
+ * IO Subtype is not bitmap.
+ */
+ if ((raid->level == 1) && (!is_read)) {
+ if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+ praid_context->raid_context_g35.raid_flags =
+ (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+ }
+}
+
+/**
* megasas_build_ldio_fusion - Prepares IOs to devices
* @instance: Adapter soft state
* @scp: SCSI command
@@ -1701,29 +2195,36 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
{
- u8 fp_possible;
+ bool fp_possible;
+ u16 ld;
u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
+ u32 scsi_buff_len;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
u8 *raidLUN;
+ unsigned long spinlock_flags;
+ union RAID_CONTEXT_UNION *praid_context;
+ struct MR_LD_RAID *raid = NULL;
+ struct MR_PRIV_DEVICE *mrdev_priv;
device_id = MEGASAS_DEV_INDEX(scp);
fusion = instance->ctrl_context;
io_request = cmd->io_request;
- io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
- io_request->RaidContext.status = 0;
- io_request->RaidContext.exStatus = 0;
+ io_request->RaidContext.raid_context.virtual_disk_tgt_id =
+ cpu_to_le16(device_id);
+ io_request->RaidContext.raid_context.status = 0;
+ io_request->RaidContext.raid_context.ex_status = 0;
req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
start_lba_lo = 0;
start_lba_hi = 0;
- fp_possible = 0;
+ fp_possible = false;
/*
* 6-byte READ(0x08) or WRITE(0x0A) cdb
@@ -1779,22 +2280,27 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
io_info.numBlocks = datalength;
io_info.ldTgtId = device_id;
- io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
+ io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+ scsi_buff_len = scsi_bufflen(scp);
+ io_request->DataLength = cpu_to_le32(scsi_buff_len);
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
io_info.isRead = 1;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+ ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
- instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
- io_request->RaidContext.regLockFlags = 0;
- fp_possible = 0;
+ if (ld < instance->fw_supported_vd_count)
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+
+ if (!raid || (!fusion->fast_path_io)) {
+ io_request->RaidContext.raid_context.reg_lock_flags = 0;
+ fp_possible = false;
} else {
if (MR_BuildRaidContext(instance, &io_info,
- &io_request->RaidContext,
+ &io_request->RaidContext.raid_context,
local_map_ptr, &raidLUN))
- fp_possible = io_info.fpOkForIo;
+ fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
/* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
@@ -1803,6 +2309,54 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
raw_smp_processor_id() % instance->msix_vectors : 0;
+ praid_context = &io_request->RaidContext;
+
+ if (instance->is_ventura) {
+ spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
+ megasas_stream_detect(instance, cmd, &io_info);
+ spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
+ /* In ventura if stream detected for a read and it is read ahead
+ * capable make this IO as LDIO
+ */
+ if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
+ io_info.isRead && io_info.ra_capable)
+ fp_possible = false;
+
+ /* FP for Optimal raid level 1.
+ * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
+ * are built by the driver as LD I/Os.
+ * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
+ * (there is never a reason to process these as buffered writes)
+ * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
+ * with the SLD bit asserted.
+ */
+ if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
+ mrdev_priv = scp->device->hostdata;
+
+ if (atomic_inc_return(&instance->fw_outstanding) >
+ (instance->host->can_queue)) {
+ fp_possible = false;
+ atomic_dec(&instance->fw_outstanding);
+ } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
+ atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
+ fp_possible = false;
+ atomic_dec(&instance->fw_outstanding);
+ if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+ atomic_set(&mrdev_priv->r1_ldio_hint,
+ instance->r1_ldio_hint_default);
+ }
+ }
+
+ /* If raid is NULL, set CPU affinity to default CPU0 */
+ if (raid)
+ megasas_set_raidflag_cpu_affinity(praid_context,
+ raid, fp_possible, io_info.isRead,
+ scsi_buff_len);
+ else
+ praid_context->raid_context_g35.routing_flags |=
+ (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
+ }
+
if (fp_possible) {
megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
local_map_ptr, start_lba_lo);
@@ -1811,29 +2365,52 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (fusion->adapter_type == INVADER_SERIES) {
- if (io_request->RaidContext.regLockFlags ==
+ if (io_request->RaidContext.raid_context.reg_lock_flags ==
REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.Type = MPI2_TYPE_CUDA;
- io_request->RaidContext.nseg = 0x1;
+ io_request->RaidContext.raid_context.type
+ = MPI2_TYPE_CUDA;
+ io_request->RaidContext.raid_context.nseg = 0x1;
io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- io_request->RaidContext.regLockFlags |=
+ io_request->RaidContext.raid_context.reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ } else if (instance->is_ventura) {
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (1 << RAID_CONTEXT_NSEG_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ io_request->RaidContext.raid_context_g35.routing_flags |=
+ (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ io_request->IoFlags |=
+ cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
}
- if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
- (io_info.isRead)) {
+ if (fusion->load_balance_info &&
+ (fusion->load_balance_info[device_id].loadBalanceFlag) &&
+ (io_info.isRead)) {
io_info.devHandle =
get_updated_dev_handle(instance,
&fusion->load_balance_info[device_id],
- &io_info);
+ &io_info, local_map_ptr);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
+ if (instance->is_ventura)
+ io_request->RaidContext.raid_context_g35.span_arm
+ = io_info.span_arm;
+ else
+ io_request->RaidContext.raid_context.span_arm
+ = io_info.span_arm;
+
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ if (instance->is_ventura)
+ cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
+ else
+ cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+
if ((raidLUN[0] == 1) &&
(local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
instance->dev_handle = !(instance->dev_handle);
@@ -1843,28 +2420,39 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
io_request->DevHandle = io_info.devHandle;
+ cmd->pd_interface = io_info.pd_interface;
/* populate the LUN field */
memcpy(io_request->LUN, raidLUN, 8);
} else {
- io_request->RaidContext.timeoutValue =
+ io_request->RaidContext.raid_context.timeout_value =
cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (fusion->adapter_type == INVADER_SERIES) {
if (io_info.do_fp_rlbypass ||
- (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
+ (io_request->RaidContext.raid_context.reg_lock_flags
+ == REGION_TYPE_UNUSED))
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.Type = MPI2_TYPE_CUDA;
- io_request->RaidContext.regLockFlags |=
+ io_request->RaidContext.raid_context.type
+ = MPI2_TYPE_CUDA;
+ io_request->RaidContext.raid_context.reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
- io_request->RaidContext.nseg = 0x1;
+ io_request->RaidContext.raid_context.nseg = 0x1;
+ } else if (instance->is_ventura) {
+ io_request->RaidContext.raid_context_g35.routing_flags |=
+ (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (1 << RAID_CONTEXT_NSEG_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
}
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = cpu_to_le16(device_id);
+
} /* Not FP */
}
@@ -1881,27 +2469,26 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
{
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
- u16 pd_index = 0;
+ u16 ld;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context;
u8 span, physArm;
__le16 devHandle;
- u32 ld, arRef, pd;
+ u32 arRef, pd;
struct MR_LD_RAID *raid;
struct RAID_CONTEXT *pRAID_Context;
u8 fp_possible = 1;
io_request = cmd->io_request;
device_id = MEGASAS_DEV_INDEX(scmd);
- pd_index = MEGASAS_PD_INDEX(scmd);
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
/* get RAID_Context pointer */
- pRAID_Context = &io_request->RaidContext;
+ pRAID_Context = &io_request->RaidContext.raid_context;
/* Check with FW team */
- pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
- pRAID_Context->regLockRowLBA = 0;
- pRAID_Context->regLockLength = 0;
+ pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+ pRAID_Context->reg_lock_row_lba = 0;
+ pRAID_Context->reg_lock_length = 0;
if (fusion->fast_path_io && (
device_id < instance->fw_supported_vd_count)) {
@@ -1909,10 +2496,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
if (ld >= instance->fw_supported_vd_count)
fp_possible = 0;
-
- raid = MR_LdRaidGet(ld, local_map_ptr);
- if (!(raid->capability.fpNonRWCapable))
- fp_possible = 0;
+ else {
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+ if (!(raid->capability.fpNonRWCapable))
+ fp_possible = 0;
+ }
} else
fp_possible = 0;
@@ -1920,7 +2508,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = cpu_to_le16(device_id);
io_request->LUN[1] = scmd->device->lun;
- pRAID_Context->timeoutValue =
+ pRAID_Context->timeout_value =
cpu_to_le16 (scmd->request->timeout / HZ);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
@@ -1928,9 +2516,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
} else {
/* set RAID context values */
- pRAID_Context->configSeqNum = raid->seqNum;
- pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
- pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
+ pRAID_Context->config_seq_num = raid->seqNum;
+ if (!instance->is_ventura)
+ pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
+ pRAID_Context->timeout_value =
+ cpu_to_le16(raid->fpIoTimeoutForLd);
/* get the DevHandle for the PD (since this is
fpNonRWCapable, this is a single disk RAID0) */
@@ -1965,7 +2555,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
*/
static void
megasas_build_syspd_fusion(struct megasas_instance *instance,
- struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
+ struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
+ bool fp_possible)
{
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
@@ -1975,22 +2566,25 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
struct RAID_CONTEXT *pRAID_Context;
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
struct fusion_context *fusion = instance->ctrl_context;
pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
device_id = MEGASAS_DEV_INDEX(scmd);
pd_index = MEGASAS_PD_INDEX(scmd);
os_timeout_value = scmd->request->timeout / HZ;
+ mr_device_priv_data = scmd->device->hostdata;
+ cmd->pd_interface = mr_device_priv_data->interface_type;
io_request = cmd->io_request;
/* get RAID_Context pointer */
- pRAID_Context = &io_request->RaidContext;
- pRAID_Context->regLockFlags = 0;
- pRAID_Context->regLockRowLBA = 0;
- pRAID_Context->regLockLength = 0;
+ pRAID_Context = &io_request->RaidContext.raid_context;
+ pRAID_Context->reg_lock_flags = 0;
+ pRAID_Context->reg_lock_row_lba = 0;
+ pRAID_Context->reg_lock_length = 0;
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
io_request->LUN[1] = scmd->device->lun;
- pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
/* If FW supports PD sequence number */
@@ -1999,24 +2593,38 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
/* TgtId must be incremented by 255 as jbod seq number is index
* below raid map
*/
- pRAID_Context->VirtualDiskTgtId =
- cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
- pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
+ /* More than 256 PD/JBOD support for Ventura */
+ if (instance->support_morethan256jbod)
+ pRAID_Context->virtual_disk_tgt_id =
+ pd_sync->seq[pd_index].pd_target_id;
+ else
+ pRAID_Context->virtual_disk_tgt_id =
+ cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
+ pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- pRAID_Context->regLockFlags |=
- (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
- pRAID_Context->Type = MPI2_TYPE_CUDA;
- pRAID_Context->nseg = 0x1;
+ if (instance->is_ventura) {
+ io_request->RaidContext.raid_context_g35.routing_flags |=
+ (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (1 << RAID_CONTEXT_NSEG_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ } else {
+ pRAID_Context->type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
+ pRAID_Context->reg_lock_flags |=
+ (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ }
} else if (fusion->fast_path_io) {
- pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
- pRAID_Context->configSeqNum = 0;
+ pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+ pRAID_Context->config_seq_num = 0;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
} else {
/* Want to send all IO via FW path */
- pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
- pRAID_Context->configSeqNum = 0;
+ pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+ pRAID_Context->config_seq_num = 0;
io_request->DevHandle = cpu_to_le16(0xFFFF);
}
@@ -2032,17 +2640,17 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
- pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
+ pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
} else {
/* system pd Fast Path */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
timeout_limit = (scmd->device->type == TYPE_DISK) ?
255 : 0xFFFF;
- pRAID_Context->timeoutValue =
+ pRAID_Context->timeout_value =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type == INVADER_SERIES)
+ if (fusion->adapter_type >= INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
@@ -2066,9 +2674,11 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
{
- u16 sge_count;
+ int sge_count;
u8 cmd_type;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ mr_device_priv_data = scp->device->hostdata;
/* Zero out some fields so they don't get reused */
memset(io_request->LUN, 0x0, 8);
@@ -2078,9 +2688,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->Control = 0;
io_request->EEDPBlockSize = 0;
io_request->ChainOffset = 0;
- io_request->RaidContext.RAIDFlags = 0;
- io_request->RaidContext.Type = 0;
- io_request->RaidContext.nseg = 0;
+ io_request->RaidContext.raid_context.raid_flags = 0;
+ io_request->RaidContext.raid_context.type = 0;
+ io_request->RaidContext.raid_context.nseg = 0;
memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
/*
@@ -2097,12 +2707,14 @@ megasas_build_io_fusion(struct megasas_instance *instance,
megasas_build_ld_nonrw_fusion(instance, scp, cmd);
break;
case READ_WRITE_SYSPDIO:
+ megasas_build_syspd_fusion(instance, scp, cmd, true);
+ break;
case NON_READ_WRITE_SYSPDIO:
- if (instance->secure_jbod_support &&
- (cmd_type == NON_READ_WRITE_SYSPDIO))
- megasas_build_syspd_fusion(instance, scp, cmd, 0);
+ if (instance->secure_jbod_support ||
+ mr_device_priv_data->is_tm_capable)
+ megasas_build_syspd_fusion(instance, scp, cmd, false);
else
- megasas_build_syspd_fusion(instance, scp, cmd, 1);
+ megasas_build_syspd_fusion(instance, scp, cmd, true);
break;
default:
break;
@@ -2112,23 +2724,27 @@ megasas_build_io_fusion(struct megasas_instance *instance,
* Construct SGL
*/
- sge_count =
- megasas_make_sgl_fusion(instance, scp,
- (struct MPI25_IEEE_SGE_CHAIN64 *)
- &io_request->SGL, cmd);
+ sge_count = megasas_make_sgl(instance, scp, cmd);
- if (sge_count > instance->max_num_sge) {
- dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
- "max (0x%x) allowed\n", sge_count,
- instance->max_num_sge);
+ if (sge_count > instance->max_num_sge || (sge_count < 0)) {
+ dev_err(&instance->pdev->dev,
+ "%s %d sge_count (%d) is out of range. Range is: 0-%d\n",
+ __func__, __LINE__, sge_count, instance->max_num_sge);
return 1;
}
- /* numSGE store lower 8 bit of sge_count.
- * numSGEExt store higher 8 bit of sge_count
- */
- io_request->RaidContext.numSGE = sge_count;
- io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
+ if (instance->is_ventura) {
+ set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
+ cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
+ cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
+ } else {
+ /* numSGE store lower 8 bit of sge_count.
+ * numSGEExt store higher 8 bit of sge_count
+ */
+ io_request->RaidContext.raid_context.num_sge = sge_count;
+ io_request->RaidContext.raid_context.num_sge_ext =
+ (u8)(sge_count >> 8);
+ }
io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
@@ -2149,25 +2765,61 @@ megasas_build_io_fusion(struct megasas_instance *instance,
return 0;
}
-union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
{
u8 *p;
struct fusion_context *fusion;
- if (index >= instance->max_fw_cmds) {
- dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
- "descriptor for scsi%d\n", index,
- instance->host->host_no);
- return NULL;
- }
fusion = instance->ctrl_context;
- p = fusion->req_frames_desc
- +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
+ p = fusion->req_frames_desc +
+ sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index;
return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
}
+
+/* megasas_prepate_secondRaid1_IO
+ * It prepares the raid 1 second IO
+ */
+void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd,
+ struct megasas_cmd_fusion *r1_cmd)
+{
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
+ struct fusion_context *fusion;
+ fusion = instance->ctrl_context;
+ req_desc = cmd->request_desc;
+ /* copy the io request frame as well as 8 SGEs data for r1 command*/
+ memcpy(r1_cmd->io_request, cmd->io_request,
+ (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
+ memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
+ (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
+ /*sense buffer is different for r1 command*/
+ r1_cmd->io_request->SenseBufferLowAddress =
+ cpu_to_le32(r1_cmd->sense_phys_addr);
+ r1_cmd->scmd = cmd->scmd;
+ req_desc2 = megasas_get_request_descriptor(instance,
+ (r1_cmd->index - 1));
+ req_desc2->Words = 0;
+ r1_cmd->request_desc = req_desc2;
+ req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index);
+ req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
+ r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
+ r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
+ r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
+ cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+ cpu_to_le16(r1_cmd->index);
+ r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+ cpu_to_le16(cmd->index);
+ /*MSIxIndex of both commands request descriptors should be same*/
+ r1_cmd->request_desc->SCSIIO.MSIxIndex =
+ cmd->request_desc->SCSIIO.MSIxIndex;
+ /*span arm is different for r1 cmd*/
+ r1_cmd->io_request->RaidContext.raid_context_g35.span_arm =
+ cmd->io_request->RaidContext.raid_context_g35.span_arm + 1;
+}
+
/**
* megasas_build_and_issue_cmd_fusion -Main routine for building and
* issuing non IOCTL cmd
@@ -2178,7 +2830,7 @@ static u32
megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scmd)
{
- struct megasas_cmd_fusion *cmd;
+ struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u32 index;
struct fusion_context *fusion;
@@ -2193,13 +2845,22 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+ if (atomic_inc_return(&instance->fw_outstanding) >
+ instance->host->can_queue) {
+ atomic_dec(&instance->fw_outstanding);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
+ if (!cmd) {
+ atomic_dec(&instance->fw_outstanding);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
index = cmd->index;
req_desc = megasas_get_request_descriptor(instance, index-1);
- if (!req_desc)
- return SCSI_MLQUEUE_HOST_BUSY;
req_desc->Words = 0;
cmd->request_desc = req_desc;
@@ -2208,6 +2869,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
megasas_return_cmd_fusion(instance, cmd);
dev_err(&instance->pdev->dev, "Error building command\n");
cmd->request_desc = NULL;
+ atomic_dec(&instance->fw_outstanding);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -2218,18 +2880,92 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
cmd->io_request->ChainOffset != 0xF)
dev_err(&instance->pdev->dev, "The chain offset value is not "
"correct : %x\n", cmd->io_request->ChainOffset);
+ /*
+ * if it is raid 1/10 fp write capable.
+ * try to get second command from pool and construct it.
+ * From FW, it has confirmed that lba values of two PDs
+ * corresponds to single R1/10 LD are always same
+ *
+ */
+ /* driver side count always should be less than max_fw_cmds
+ * to get new command
+ */
+ if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
+ r1_cmd = megasas_get_cmd_fusion(instance,
+ (scmd->request->tag + instance->max_fw_cmds));
+ megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
+ }
+
/*
* Issue the command to the FW
*/
- atomic_inc(&instance->fw_outstanding);
megasas_fire_cmd_fusion(instance, req_desc);
+ if (r1_cmd)
+ megasas_fire_cmd_fusion(instance, r1_cmd->request_desc);
+
+
return 0;
}
/**
+ * megasas_complete_r1_command -
+ * completes R1 FP write commands which has valid peer smid
+ * @instance: Adapter soft state
+ * @cmd_fusion: MPT command frame
+ *
+ */
+static inline void
+megasas_complete_r1_command(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd)
+{
+ u8 *sense, status, ex_status;
+ u32 data_length;
+ u16 peer_smid;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *r1_cmd = NULL;
+ struct scsi_cmnd *scmd_local = NULL;
+ struct RAID_CONTEXT_G35 *rctx_g35;
+
+ rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
+ fusion = instance->ctrl_context;
+ peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
+
+ r1_cmd = fusion->cmd_list[peer_smid - 1];
+ scmd_local = cmd->scmd;
+ status = rctx_g35->status;
+ ex_status = rctx_g35->ex_status;
+ data_length = cmd->io_request->DataLength;
+ sense = cmd->sense;
+
+ cmd->cmd_completed = true;
+
+ /* Check if peer command is completed or not*/
+ if (r1_cmd->cmd_completed) {
+ rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35;
+ if (rctx_g35->status != MFI_STAT_OK) {
+ status = rctx_g35->status;
+ ex_status = rctx_g35->ex_status;
+ data_length = r1_cmd->io_request->DataLength;
+ sense = r1_cmd->sense;
+ }
+
+ megasas_return_cmd_fusion(instance, r1_cmd);
+ map_cmd_status(fusion, scmd_local, status, ex_status,
+ le32_to_cpu(data_length), sense);
+ if (instance->ldio_threshold &&
+ megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+ atomic_dec(&instance->ldio_outstanding);
+ scmd_local->SCp.ptr = NULL;
+ megasas_return_cmd_fusion(instance, cmd);
+ scsi_dma_unmap(scmd_local);
+ scmd_local->scsi_done(scmd_local);
+ }
+}
+
+/**
* complete_cmd_fusion - Completes command
* @instance: Adapter soft state
* Completes all commands that is in reply descriptor queue
@@ -2244,8 +2980,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
struct megasas_cmd *cmd_mfi;
struct megasas_cmd_fusion *cmd_fusion;
u16 smid, num_completed;
- u8 reply_descript_type;
- u32 status, extStatus, device_id;
+ u8 reply_descript_type, *sense, status, extStatus;
+ u32 device_id, data_length;
union desc_value d_val;
struct LD_LOAD_BALANCE_INFO *lbinfo;
int threshold_reply_count = 0;
@@ -2275,20 +3011,17 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
d_val.u.high != cpu_to_le32(UINT_MAX)) {
- smid = le16_to_cpu(reply_desc->SMID);
+ smid = le16_to_cpu(reply_desc->SMID);
cmd_fusion = fusion->cmd_list[smid - 1];
-
- scsi_io_req =
- (struct MPI2_RAID_SCSI_IO_REQUEST *)
- cmd_fusion->io_request;
-
- if (cmd_fusion->scmd)
- cmd_fusion->scmd->SCp.ptr = NULL;
+ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
scmd_local = cmd_fusion->scmd;
- status = scsi_io_req->RaidContext.status;
- extStatus = scsi_io_req->RaidContext.exStatus;
+ status = scsi_io_req->RaidContext.raid_context.status;
+ extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
+ sense = cmd_fusion->sense;
+ data_length = scsi_io_req->DataLength;
switch (scsi_io_req->Function) {
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -2303,37 +3036,33 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
break;
case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
/* Update load balancing info */
- device_id = MEGASAS_DEV_INDEX(scmd_local);
- lbinfo = &fusion->load_balance_info[device_id];
- if (cmd_fusion->scmd->SCp.Status &
- MEGASAS_LOAD_BALANCE_FLAG) {
+ if (fusion->load_balance_info &&
+ (cmd_fusion->scmd->SCp.Status &
+ MEGASAS_LOAD_BALANCE_FLAG)) {
+ device_id = MEGASAS_DEV_INDEX(scmd_local);
+ lbinfo = &fusion->load_balance_info[device_id];
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
- cmd_fusion->scmd->SCp.Status &=
- ~MEGASAS_LOAD_BALANCE_FLAG;
+ cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
- if (reply_descript_type ==
- MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
- if (megasas_dbg_lvl == 5)
- dev_err(&instance->pdev->dev, "\nFAST Path "
- "IO Success\n");
- }
- /* Fall thru and complete IO */
+ //Fall thru and complete IO
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
- /* Map the FW Cmd Status */
- map_cmd_status(cmd_fusion, status, extStatus);
- scsi_io_req->RaidContext.status = 0;
- scsi_io_req->RaidContext.exStatus = 0;
- if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
- atomic_dec(&instance->ldio_outstanding);
- megasas_return_cmd_fusion(instance, cmd_fusion);
- scsi_dma_unmap(scmd_local);
- scmd_local->scsi_done(scmd_local);
atomic_dec(&instance->fw_outstanding);
-
+ if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
+ map_cmd_status(fusion, scmd_local, status,
+ extStatus, le32_to_cpu(data_length),
+ sense);
+ if (instance->ldio_threshold &&
+ (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
+ atomic_dec(&instance->ldio_outstanding);
+ scmd_local->SCp.ptr = NULL;
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ scsi_dma_unmap(scmd_local);
+ scmd_local->scsi_done(scmd_local);
+ } else /* Optimal VD - R1 FP command completion. */
+ megasas_complete_r1_command(instance, cmd_fusion);
break;
case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
-
/* Poll mode. Dummy free.
* In case of Interrupt mode, caller has reverse check.
*/
@@ -2376,7 +3105,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
* pending to be completed
*/
if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
- if (fusion->adapter_type == INVADER_SERIES)
+ if (instance->msix_combined)
writel(((MSIxIndex & 0x7) << 24) |
fusion->last_reply_idx[MSIxIndex],
instance->reply_post_host_index_addr[MSIxIndex/8]);
@@ -2392,7 +3121,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
return IRQ_NONE;
wmb();
- if (fusion->adapter_type == INVADER_SERIES)
+ if (instance->msix_combined)
writel(((MSIxIndex & 0x7) << 24) |
fusion->last_reply_idx[MSIxIndex],
instance->reply_post_host_index_addr[MSIxIndex/8]);
@@ -2405,6 +3134,22 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
}
/**
+ * megasas_sync_irqs - Synchronizes all IRQs owned by adapter
+ * @instance: Adapter soft state
+ */
+void megasas_sync_irqs(unsigned long instance_addr)
+{
+ u32 count, i;
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ for (i = 0; i < count; i++)
+ synchronize_irq(pci_irq_vector(instance->pdev, i));
+}
+
+/**
* megasas_complete_cmd_dpc_fusion - Completes command
* @instance: Adapter soft state
*
@@ -2489,7 +3234,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
* mfi_cmd: megasas_cmd pointer
*
*/
-u8
+void
build_mpt_mfi_pass_thru(struct megasas_instance *instance,
struct megasas_cmd *mfi_cmd)
{
@@ -2518,7 +3263,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
io_req = cmd->io_request;
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (fusion->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -2539,8 +3284,6 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
-
- return 0;
}
/**
@@ -2552,21 +3295,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *
build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
- union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
u16 index;
- if (build_mpt_mfi_pass_thru(instance, cmd)) {
- dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
- return NULL;
- }
-
+ build_mpt_mfi_pass_thru(instance, cmd);
index = cmd->context.smid;
req_desc = megasas_get_request_descriptor(instance, index - 1);
- if (!req_desc)
- return NULL;
-
req_desc->Words = 0;
req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2582,21 +3318,16 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
* @cmd: mfi cmd pointer
*
*/
-int
+void
megasas_issue_dcmd_fusion(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
req_desc = build_mpt_cmd(instance, cmd);
- if (!req_desc) {
- dev_info(&instance->pdev->dev, "Failed from %s %d\n",
- __func__, __LINE__);
- return DCMD_NOT_FIRED;
- }
megasas_fire_cmd_fusion(instance, req_desc);
- return DCMD_SUCCESS;
+ return;
}
/**
@@ -2771,6 +3502,14 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
" will reset adapter scsi%d.\n",
instance->host->host_no);
megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+ if (instance->requestorId && reason) {
+ dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
+ " state while polling during"
+ " I/O timeout handling for %d\n",
+ instance->host->host_no);
+ *convert = 1;
+ }
+
retval = 1;
goto out;
}
@@ -2790,7 +3529,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
}
/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
- if (instance->requestorId && reason) {
+ if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) {
if (instance->hb_host_mem->HB.fwCounter !=
instance->hb_host_mem->HB.driverCounter) {
instance->hb_host_mem->HB.driverCounter =
@@ -3030,12 +3769,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
req_desc = megasas_get_request_descriptor(instance,
(cmd_fusion->index - 1));
- if (!req_desc) {
- dev_err(&instance->pdev->dev, "Failed from %s %d\n",
- __func__, __LINE__);
- megasas_return_cmd(instance, cmd_mfi);
- return -ENOMEM;
- }
cmd_fusion->request_desc = req_desc;
req_desc->Words = 0;
@@ -3092,7 +3825,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
break;
else {
instance->instancet->disable_intr(instance);
- msleep(1000);
+ megasas_sync_irqs((unsigned long)instance);
megasas_complete_cmd_dpc_fusion
((unsigned long)instance);
instance->instancet->enable_intr(instance);
@@ -3173,13 +3906,13 @@ static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
instance = (struct megasas_instance *)sdev->host->hostdata;
fusion = instance->ctrl_context;
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ if (!MEGASAS_IS_LOGICAL(sdev)) {
if (instance->use_seqnum_jbod_fp) {
- pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
- sdev->id;
- pd_sync = (void *)fusion->pd_seq_sync
- [(instance->pd_seq_map_id - 1) & 1];
- devhandle = pd_sync->seq[pd_index].devHandle;
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+ + sdev->id;
+ pd_sync = (void *)fusion->pd_seq_sync
+ [(instance->pd_seq_map_id - 1) & 1];
+ devhandle = pd_sync->seq[pd_index].devHandle;
} else
sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
" without JBOD MAP support from %s %d\n", __func__, __LINE__);
@@ -3212,6 +3945,9 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
fusion = instance->ctrl_context;
+ scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
"SCSI host:%d\n", instance->host->host_no);
@@ -3292,6 +4028,9 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
fusion = instance->ctrl_context;
+ sdev_printk(KERN_INFO, scmd->device,
+ "target reset called for scmd(%p)\n", scmd);
+
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
"SCSI host:%d\n", instance->host->host_no);
@@ -3362,7 +4101,7 @@ int megasas_check_mpio_paths(struct megasas_instance *instance,
struct scsi_cmnd *scmd)
{
struct megasas_instance *peer_instance = NULL;
- int retval = (DID_RESET << 16);
+ int retval = (DID_REQUEUE << 16);
if (instance->peerIsPresent) {
peer_instance = megasas_get_peer_instance(instance);
@@ -3377,9 +4116,9 @@ int megasas_check_mpio_paths(struct megasas_instance *instance,
/* Core fusion reset function */
int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
{
- int retval = SUCCESS, i, convert = 0;
+ int retval = SUCCESS, i, j, convert = 0;
struct megasas_instance *instance;
- struct megasas_cmd_fusion *cmd_fusion;
+ struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
struct fusion_context *fusion;
u32 abs_state, status_reg, reset_adapter;
u32 io_timeout_in_crash_mode = 0;
@@ -3440,7 +4179,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
instance->instancet->disable_intr(instance);
- msleep(1000);
+ megasas_sync_irqs((unsigned long)instance);
/* First try waiting for commands to complete */
if (megasas_wait_for_outstanding_fusion(instance, reason,
@@ -3451,23 +4190,40 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (convert)
reason = 0;
+ if (megasas_dbg_lvl & OCR_LOGS)
+ dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
+
/* Now return commands back to the OS */
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
+ /*check for extra commands issued by driver*/
+ if (instance->is_ventura) {
+ r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
+ megasas_return_cmd_fusion(instance, r1_cmd);
+ }
scmd_local = cmd_fusion->scmd;
if (cmd_fusion->scmd) {
+ if (megasas_dbg_lvl & OCR_LOGS) {
+ sdev_printk(KERN_INFO,
+ cmd_fusion->scmd->device, "SMID: 0x%x\n",
+ cmd_fusion->index);
+ scsi_print_command(cmd_fusion->scmd);
+ }
+
scmd_local->result =
megasas_check_mpio_paths(instance,
scmd_local);
- if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+ if (instance->ldio_threshold &&
+ megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
atomic_dec(&instance->ldio_outstanding);
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
scmd_local->scsi_done(scmd_local);
- atomic_dec(&instance->fw_outstanding);
}
}
+ atomic_set(&instance->fw_outstanding, 0);
+
status_reg = instance->instancet->read_fw_status_reg(
instance->reg_set);
abs_state = status_reg & MFI_STATE_MASK;
@@ -3528,11 +4284,13 @@ transition_to_ready:
__func__, __LINE__);
megaraid_sas_kill_hba(instance);
retval = FAILED;
+ goto out;
}
/* Reset load balance info */
- memset(fusion->load_balance_info, 0,
- sizeof(struct LD_LOAD_BALANCE_INFO)
- *MAX_LOGICAL_DRIVES_EXT);
+ if (fusion->load_balance_info)
+ memset(fusion->load_balance_info, 0,
+ (sizeof(struct LD_LOAD_BALANCE_INFO) *
+ MAX_LOGICAL_DRIVES_EXT));
if (!megasas_get_map_info(instance))
megasas_sync_map_info(instance);
@@ -3540,7 +4298,17 @@ transition_to_ready:
megasas_setup_jbod_map(instance);
shost_for_each_device(sdev, shost)
- megasas_update_sdev_properties(sdev);
+ megasas_set_dynamic_target_properties(sdev);
+
+ /* reset stream detection array */
+ if (instance->is_ventura) {
+ for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
+ memset(fusion->stream_detect_by_ld[j],
+ 0, sizeof(struct LD_STREAM_DETECT));
+ fusion->stream_detect_by_ld[j]->mru_bit_map
+ = MR_STREAM_BITMAP;
+ }
+ }
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
@@ -3676,6 +4444,64 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
megasas_reset_fusion(instance->host, 0);
}
+/* Allocate fusion context */
+int
+megasas_alloc_fusion_context(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+
+ instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
+ instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ instance->ctrl_context_pages);
+ if (!instance->ctrl_context) {
+ /* fall back to using vmalloc for fusion_context */
+ instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
+ if (!instance->ctrl_context) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ fusion = instance->ctrl_context;
+
+ fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(struct LD_LOAD_BALANCE_INFO));
+ fusion->load_balance_info =
+ (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ fusion->load_balance_info_pages);
+ if (!fusion->load_balance_info) {
+ fusion->load_balance_info = vzalloc(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(struct LD_LOAD_BALANCE_INFO));
+ if (!fusion->load_balance_info)
+ dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, "
+ "continuing without Load Balance support\n");
+ }
+
+ return 0;
+}
+
+void
+megasas_free_fusion_context(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ if (fusion) {
+ if (fusion->load_balance_info) {
+ if (is_vmalloc_addr(fusion->load_balance_info))
+ vfree(fusion->load_balance_info);
+ else
+ free_pages((ulong)fusion->load_balance_info,
+ fusion->load_balance_info_pages);
+ }
+
+ if (is_vmalloc_addr(fusion))
+ vfree(fusion);
+ else
+ free_pages((ulong)fusion,
+ instance->ctrl_context_pages);
+ }
+}
+
struct megasas_instance_template megasas_instance_template_fusion = {
.enable_intr = megasas_enable_intr_fusion,
.disable_intr = megasas_disable_intr_fusion,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index e3bee04c1eb1..d78d76112501 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -59,6 +59,8 @@
#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
+#define MR_RL_WRITE_THROUGH_MODE 0x00
+#define MR_RL_WRITE_BACK_MODE 0x01
/* T10 PI defines */
#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
@@ -81,6 +83,11 @@
enum MR_RAID_FLAGS_IO_SUB_TYPE {
MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+ MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2,
+ MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
+ MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
+ MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
+ MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
};
/*
@@ -94,11 +101,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
#define THRESHOLD_REPLY_COUNT 50
+#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
enum MR_FUSION_ADAPTER_TYPE {
THUNDERBOLT_SERIES = 0,
INVADER_SERIES = 1,
+ VENTURA_SERIES = 2,
};
/*
@@ -108,29 +117,133 @@ enum MR_FUSION_ADAPTER_TYPE {
struct RAID_CONTEXT {
#if defined(__BIG_ENDIAN_BITFIELD)
- u8 nseg:4;
- u8 Type:4;
+ u8 nseg:4;
+ u8 type:4;
#else
- u8 Type:4;
- u8 nseg:4;
+ u8 type:4;
+ u8 nseg:4;
#endif
- u8 resvd0;
- __le16 timeoutValue;
- u8 regLockFlags;
- u8 resvd1;
- __le16 VirtualDiskTgtId;
- __le64 regLockRowLBA;
- __le32 regLockLength;
- __le16 nextLMId;
- u8 exStatus;
- u8 status;
- u8 RAIDFlags;
- u8 numSGE;
- __le16 configSeqNum;
- u8 spanArm;
- u8 priority;
- u8 numSGEExt;
- u8 resvd2;
+ u8 resvd0;
+ __le16 timeout_value;
+ u8 reg_lock_flags;
+ u8 resvd1;
+ __le16 virtual_disk_tgt_id;
+ __le64 reg_lock_row_lba;
+ __le32 reg_lock_length;
+ __le16 next_lmid;
+ u8 ex_status;
+ u8 status;
+ u8 raid_flags;
+ u8 num_sge;
+ __le16 config_seq_num;
+ u8 span_arm;
+ u8 priority;
+ u8 num_sge_ext;
+ u8 resvd2;
+};
+
+/*
+ * Raid Context structure which describes ventura MegaRAID specific
+ * IO Paramenters ,This resides at offset 0x60 where the SGL normally
+ * starts in MPT IO Frames
+ */
+struct RAID_CONTEXT_G35 {
+ #define RAID_CONTEXT_NSEG_MASK 0x00F0
+ #define RAID_CONTEXT_NSEG_SHIFT 4
+ #define RAID_CONTEXT_TYPE_MASK 0x000F
+ #define RAID_CONTEXT_TYPE_SHIFT 0
+ u16 nseg_type;
+ u16 timeout_value; /* 0x02 -0x03 */
+ u16 routing_flags; // 0x04 -0x05 routing flags
+ u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
+ u64 reg_lock_row_lba; /* 0x08 - 0x0F */
+ u32 reg_lock_length; /* 0x10 - 0x13 */
+ union {
+ u16 next_lmid; /* 0x14 - 0x15 */
+ u16 peer_smid; /* used for the raid 1/10 fp writes */
+ } smid;
+ u8 ex_status; /* 0x16 : OUT */
+ u8 status; /* 0x17 status */
+ u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
+ * resvd[3:1], preferredCpu[0]
+ */
+ u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
+ u16 config_seq_num; /* 0x1A -0x1B */
+ union {
+ /*
+ * Bit format:
+ * ---------------------------------
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * ---------------------------------
+ * Byte0 | numSGE[7]- numSGE[0] |
+ * ---------------------------------
+ * Byte1 |SD | resvd | numSGE 8-11 |
+ * --------------------------------
+ */
+ #define NUM_SGE_MASK_LOWER 0xFF
+ #define NUM_SGE_MASK_UPPER 0x0F
+ #define NUM_SGE_SHIFT_UPPER 8
+ #define STREAM_DETECT_SHIFT 7
+ #define STREAM_DETECT_MASK 0x80
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
+ u16 stream_detected:1;
+ u16 reserved:3;
+ u16 num_sge:12;
+#else
+ u16 num_sge:12;
+ u16 reserved:3;
+ u16 stream_detected:1;
+#endif
+ } bits;
+ u8 bytes[2];
+ } u;
+ u8 resvd2[2]; /* 0x1E-0x1F */
+};
+
+#define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1
+#define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2
+#define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3
+#define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4
+#define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5
+#define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6
+#define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7
+#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8
+#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00
+#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12
+#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000
+
+static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
+ u16 sge_count)
+{
+ rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
+ rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
+ & NUM_SGE_MASK_UPPER);
+}
+
+static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
+{
+ u16 sge_count;
+
+ sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
+ << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
+ return sge_count;
+}
+
+#define SET_STREAM_DETECTED(rctx_g35) \
+ (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
+
+#define CLEAR_STREAM_DETECTED(rctx_g35) \
+ (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
+
+static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
+{
+ return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
+}
+
+union RAID_CONTEXT_UNION {
+ struct RAID_CONTEXT raid_context;
+ struct RAID_CONTEXT_G35 raid_context_g35;
};
#define RAID_CTX_SPANARM_ARM_SHIFT (0)
@@ -139,6 +252,14 @@ struct RAID_CONTEXT {
#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+/* number of bits per index in U32 TrackStream */
+#define BITS_PER_INDEX_STREAM 4
+#define INVALID_STREAM_NUM 16
+#define MR_STREAM_BITMAP 0x76543210
+#define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1)
+#define ZERO_LAST_STREAM 0x0fffffff
+#define MAX_STREAMS_TRACKED 8
+
/*
* define region lock types
*/
@@ -175,6 +296,8 @@ enum REGION_TYPE {
#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+/* EEDP escape mode */
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
@@ -407,7 +530,7 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
u8 LUN[8]; /* 0x34 */
__le32 Control; /* 0x3C */
union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
- struct RAID_CONTEXT RaidContext; /* 0x60 */
+ union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
union MPI2_SGE_IO_UNION SGL; /* 0x80 */
};
@@ -563,7 +686,7 @@ struct MPI2_IOC_INIT_REQUEST {
__le16 HeaderVersion; /* 0x0E */
u32 Reserved5; /* 0x10 */
__le16 Reserved6; /* 0x14 */
- u8 Reserved7; /* 0x16 */
+ u8 HostPageSize; /* 0x16 */
u8 HostMSIxVectors; /* 0x17 */
__le16 Reserved8; /* 0x18 */
__le16 SystemRequestFrameSize; /* 0x1A */
@@ -579,6 +702,7 @@ struct MPI2_IOC_INIT_REQUEST {
/* mrpriv defines */
#define MR_PD_INVALID 0xFFFF
+#define MR_DEVHANDLE_INVALID 0xFFFF
#define MAX_SPAN_DEPTH 8
#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
@@ -586,16 +710,20 @@ struct MPI2_IOC_INIT_REQUEST {
#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
#define MAX_LOGICAL_DRIVES 64
#define MAX_LOGICAL_DRIVES_EXT 256
+#define MAX_LOGICAL_DRIVES_DYN 512
#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
#define MAX_ARRAYS 128
#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
#define MAX_ARRAYS_EXT 256
#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
+#define MAX_API_ARRAYS_DYN 512
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
+#define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103
#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
@@ -603,7 +731,7 @@ struct MPI2_IOC_INIT_REQUEST {
struct MR_DEV_HANDLE_INFO {
__le16 curDevHdl;
u8 validHandles;
- u8 reserved;
+ u8 interfaceType;
__le16 devHandle[2];
};
@@ -640,10 +768,56 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_SPAN_INFO block_span_info;
};
+#define MR_RAID_CTX_CPUSEL_0 0
+#define MR_RAID_CTX_CPUSEL_1 1
+#define MR_RAID_CTX_CPUSEL_2 2
+#define MR_RAID_CTX_CPUSEL_3 3
+#define MR_RAID_CTX_CPUSEL_FCFS 0xF
+
+struct MR_CPU_AFFINITY_MASK {
+ union {
+ struct {
+#ifndef MFI_BIG_ENDIAN
+ u8 hw_path:1;
+ u8 cpu0:1;
+ u8 cpu1:1;
+ u8 cpu2:1;
+ u8 cpu3:1;
+ u8 reserved:3;
+#else
+ u8 reserved:3;
+ u8 cpu3:1;
+ u8 cpu2:1;
+ u8 cpu1:1;
+ u8 cpu0:1;
+ u8 hw_path:1;
+#endif
+ };
+ u8 core_mask;
+ };
+};
+
+struct MR_IO_AFFINITY {
+ union {
+ struct {
+ struct MR_CPU_AFFINITY_MASK pdRead;
+ struct MR_CPU_AFFINITY_MASK pdWrite;
+ struct MR_CPU_AFFINITY_MASK ldRead;
+ struct MR_CPU_AFFINITY_MASK ldWrite;
+ };
+ u32 word;
+ };
+ u8 maxCores; /* Total cores + HW Path in ROC */
+ u8 reserved[3];
+};
+
struct MR_LD_RAID {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved4:5;
+ u32 reserved4:2;
+ u32 fp_cache_bypass_capable:1;
+ u32 fp_rmw_capable:1;
+ u32 disable_coalescing:1;
u32 fpBypassRegionLock:1;
u32 tmCapable:1;
u32 fpNonRWCapable:1;
@@ -654,11 +828,13 @@ struct MR_LD_RAID {
u32 encryptionType:8;
u32 pdPiMode:4;
u32 ldPiMode:4;
- u32 reserved5:3;
+ u32 reserved5:2;
+ u32 ra_capable:1;
u32 fpCapable:1;
#else
u32 fpCapable:1;
- u32 reserved5:3;
+ u32 ra_capable:1;
+ u32 reserved5:2;
u32 ldPiMode:4;
u32 pdPiMode:4;
u32 encryptionType:8;
@@ -669,7 +845,10 @@ struct MR_LD_RAID {
u32 fpNonRWCapable:1;
u32 tmCapable:1;
u32 fpBypassRegionLock:1;
- u32 reserved4:5;
+ u32 disable_coalescing:1;
+ u32 fp_rmw_capable:1;
+ u32 fp_cache_bypass_capable:1;
+ u32 reserved4:2;
#endif
} capability;
__le32 reserved6;
@@ -696,7 +875,36 @@ struct MR_LD_RAID {
u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
- u8 reserved3[0x80-0x2D]; /* 0x2D */
+ /* Ox2D This LD accept priority boost of this type */
+ u8 ld_accept_priority_type;
+ u8 reserved2[2]; /* 0x2E - 0x2F */
+ /* 0x30 - 0x33, Logical block size for the LD */
+ u32 logical_block_length;
+ struct {
+#ifndef MFI_BIG_ENDIAN
+ /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
+ u32 ld_pi_exp:4;
+ /* 0x34, LOGICAL BLOCKS PER PHYSICAL
+ * BLOCK EXPONENT from READ CAPACITY 16
+ */
+ u32 ld_logical_block_exp:4;
+ u32 reserved1:24; /* 0x34 */
+#else
+ u32 reserved1:24; /* 0x34 */
+ /* 0x34, LOGICAL BLOCKS PER PHYSICAL
+ * BLOCK EXPONENT from READ CAPACITY 16
+ */
+ u32 ld_logical_block_exp:4;
+ /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
+ u32 ld_pi_exp:4;
+#endif
+ }; /* 0x34 - 0x37 */
+ /* 0x38 - 0x3f, This will determine which
+ * core will process LD IO and PD IO.
+ */
+ struct MR_IO_AFFINITY cpuAffinity;
+ /* Bit definiations are specified by MR_IO_AFFINITY */
+ u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */
};
struct MR_LD_SPAN_MAP {
@@ -735,6 +943,7 @@ struct IO_REQUEST_INFO {
u16 ldTgtId;
u8 isRead;
__le16 devHandle;
+ u8 pd_interface;
u64 pdBlock;
u8 fpOkForIo;
u8 IoforUnevenSpan;
@@ -743,6 +952,8 @@ struct IO_REQUEST_INFO {
u64 start_row;
u8 span_arm; /* span[7:5], arm[4:0] */
u8 pd_after_lb;
+ u16 r1_alt_dev_handle; /* raid 1/10 only */
+ bool ra_capable;
};
struct MR_LD_TARGET_SYNC {
@@ -751,6 +962,91 @@ struct MR_LD_TARGET_SYNC {
__le16 seqNum;
};
+/*
+ * RAID Map descriptor Types.
+ * Each element should uniquely idetify one data structure in the RAID map
+ */
+enum MR_RAID_MAP_DESC_TYPE {
+ /* MR_DEV_HANDLE_INFO data */
+ RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0,
+ /* target to Ld num Index map */
+ RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1,
+ /* MR_ARRAY_INFO data */
+ RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2,
+ /* MR_LD_SPAN_MAP data */
+ RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3,
+ RAID_MAP_DESC_TYPE_COUNT,
+};
+
+/*
+ * This table defines the offset, size and num elements of each descriptor
+ * type in the RAID Map buffer
+ */
+struct MR_RAID_MAP_DESC_TABLE {
+ /* Raid map descriptor type */
+ u32 raid_map_desc_type;
+ /* Offset into the RAID map buffer where
+ * descriptor data is saved
+ */
+ u32 raid_map_desc_offset;
+ /* total size of the
+ * descriptor buffer
+ */
+ u32 raid_map_desc_buffer_size;
+ /* Number of elements contained in the
+ * descriptor buffer
+ */
+ u32 raid_map_desc_elements;
+};
+
+/*
+ * Dynamic Raid Map Structure.
+ */
+struct MR_FW_RAID_MAP_DYNAMIC {
+ u32 raid_map_size; /* total size of RAID Map structure */
+ u32 desc_table_offset;/* Offset of desc table into RAID map*/
+ u32 desc_table_size; /* Total Size of desc table */
+ /* Total Number of elements in the desc table */
+ u32 desc_table_num_elements;
+ u64 reserved1;
+ u32 reserved2[3]; /*future use */
+ /* timeout value used by driver in FP IOs */
+ u8 fp_pd_io_timeout_sec;
+ u8 reserved3[3];
+ /* when this seqNum increments, driver needs to
+ * release RMW buffers asap
+ */
+ u32 rmw_fp_seq_num;
+ u16 ld_count; /* count of lds. */
+ u16 ar_count; /* count of arrays */
+ u16 span_count; /* count of spans */
+ u16 reserved4[3];
+/*
+ * The below structure of pointers is only to be used by the driver.
+ * This is added in the ,API to reduce the amount of code changes
+ * needed in the driver to support dynamic RAID map Firmware should
+ * not update these pointers while preparing the raid map
+ */
+ union {
+ struct {
+ struct MR_DEV_HANDLE_INFO *dev_hndl_info;
+ u16 *ld_tgt_id_to_ld;
+ struct MR_ARRAY_INFO *ar_map_info;
+ struct MR_LD_SPAN_MAP *ld_span_map;
+ };
+ u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
+ };
+/*
+ * RAID Map descriptor table defines the layout of data in the RAID Map.
+ * The size of the descriptor table itself could change.
+ */
+ /* Variable Size descriptor Table. */
+ struct MR_RAID_MAP_DESC_TABLE
+ raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
+ /* Variable Size buffer containing all data */
+ u32 raid_map_desc_data[1];
+}; /* Dynamicaly sized RAID MAp structure */
+
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
@@ -759,6 +1055,16 @@ struct MR_LD_TARGET_SYNC {
#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+#define MPI2_SGE_FLAGS_SHIFT (0x02)
+#define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0)
+#define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00)
+#define IEEE_SGE_FLAGS_FORMAT_NVME (0x02)
+
+#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
+#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
+
struct megasas_register_set;
struct megasas_instance;
@@ -795,6 +1101,10 @@ struct megasas_cmd_fusion {
u32 index;
u8 pd_r1_lb;
struct completion done;
+ u8 pd_interface;
+ u16 r1_alt_dev_handle; /* raid 1/10 only*/
+ bool cmd_completed; /* raid 1/10 fp writes status holder */
+
};
struct LD_LOAD_BALANCE_INFO {
@@ -856,9 +1166,10 @@ struct MR_DRV_RAID_MAP {
__le16 spanCount;
__le16 reserve3;
- struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
- u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
- struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_DEV_HANDLE_INFO
+ devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
+ u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
struct MR_LD_SPAN_MAP ldSpanMap[1];
};
@@ -870,7 +1181,7 @@ struct MR_DRV_RAID_MAP {
struct MR_DRV_RAID_MAP_ALL {
struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
} __packed;
@@ -919,7 +1230,8 @@ struct MR_PD_CFG_SEQ {
u8 reserved:7;
#endif
} capability;
- u8 reserved[3];
+ u8 reserved;
+ u16 pd_target_id;
} __packed;
struct MR_PD_CFG_SEQ_NUM_SYNC {
@@ -928,6 +1240,30 @@ struct MR_PD_CFG_SEQ_NUM_SYNC {
struct MR_PD_CFG_SEQ seq[1];
} __packed;
+/* stream detection */
+struct STREAM_DETECT {
+ u64 next_seq_lba; /* next LBA to match sequential access */
+ struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
+ struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
+ u32 count_cmds_in_stream; /* count of host commands in this stream */
+ u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
+ u8 is_read; /* SCSI OpCode for this stream */
+ u8 group_depth; /* total number of host commands in group */
+ /* TRUE if cannot add any more commands to this group */
+ bool group_flush;
+ u8 reserved[7]; /* pad to 64-bit alignment */
+};
+
+struct LD_STREAM_DETECT {
+ bool write_back; /* TRUE if WB, FALSE if WT */
+ bool fp_write_enabled;
+ bool members_ssds;
+ bool fp_cache_bypass_capable;
+ u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
+ /* this is the array of stream detect structures (one per stream) */
+ struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
+};
+
struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
u64 RDPQBaseAddress;
u32 Reserved1;
@@ -965,7 +1301,7 @@ struct fusion_context {
u8 chain_offset_io_request;
u8 chain_offset_mfi_pthru;
- struct MR_FW_RAID_MAP_ALL *ld_map[2];
+ struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
dma_addr_t ld_map_phys[2];
/*Non dma-able memory. Driver local copy.*/
@@ -973,14 +1309,18 @@ struct fusion_context {
u32 max_map_sz;
u32 current_map_sz;
+ u32 old_map_sz;
+ u32 new_map_sz;
u32 drv_map_sz;
u32 drv_map_pages;
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
u8 fast_path_io;
- struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+ struct LD_LOAD_BALANCE_INFO *load_balance_info;
+ u32 load_balance_info_pages;
LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
u8 adapter_type;
+ struct LD_STREAM_DETECT **stream_detect_by_ld;
};
union desc_value {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 8bae305bc156..af4be403582e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -624,6 +624,8 @@ typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
/* defines for ReasonCode field */
#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
+#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01)
+#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02)
/*Hard Reset Received Event data */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index f00ef88a378a..a3fe1fb55c17 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1040,6 +1040,25 @@ _base_interrupt(int irq, void *bus_id)
reply_q->reply_post_free[reply_q->reply_post_host_index].
Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
completed_cmds++;
+ /* Update the reply post host index after continuously
+ * processing the threshold number of Reply Descriptors.
+ * So that FW can find enough entries to post the Reply
+ * Descriptors in the reply descriptor post queue.
+ */
+ if (completed_cmds > ioc->hba_queue_depth/3) {
+ if (ioc->combined_reply_queue) {
+ writel(reply_q->reply_post_host_index |
+ ((msix_index & 7) <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT),
+ ioc->replyPostRegisterIndex[msix_index/8]);
+ } else {
+ writel(reply_q->reply_post_host_index |
+ (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT),
+ &ioc->chip->ReplyPostHostIndex);
+ }
+ completed_cmds = 1;
+ }
if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
goto out;
if (!reply_q->reply_post_host_index)
@@ -5522,6 +5541,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
ioc->non_operational_loop = 0;
+ ioc->got_task_abort_from_ioctl = 0;
return 0;
out_free_resources:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 394fe1338d09..4ab634fc27df 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "14.101.00.00"
-#define MPT3SAS_MAJOR_VERSION 14
-#define MPT3SAS_MINOR_VERSION 101
+#define MPT3SAS_DRIVER_VERSION "15.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 15
+#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
* @eedp_enable: eedp support enable bit
* @eedp_type: 0(type_1), 1(type_2), 2(type_3)
* @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
*/
struct MPT3SAS_DEVICE {
struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
u8 ignore_delay_remove;
/* Iopriority Command Handling */
u8 ncq_prio_enable;
+ /*
+ * Bug workaround for SATL handling: the mpt2/3sas firmware
+ * doesn't return BUSY or TASK_SET_FULL for subsequent
+ * commands while a SATL pass through is in operation as the
+ * spec requires, it simply does nothing with them until the
+ * pass through completes, causing them possibly to timeout if
+ * the passthrough is a long executing command (like format or
+ * secure erase). This variable allows us to do the right
+ * thing while a SATL command is pending.
+ */
+ unsigned long ata_command_pending;
};
@@ -988,6 +1000,7 @@ struct MPT3SAS_ADAPTER {
u8 broadcast_aen_busy;
u16 broadcast_aen_pending;
u8 shost_recovery;
+ u8 got_task_abort_from_ioctl;
struct mutex reset_in_progress_mutex;
spinlock_t ioc_reset_in_progress_lock;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 95f0f24bac05..02fe1c4aae2f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -826,16 +826,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
"TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
ioc->name,
le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
-
+ ioc->got_task_abort_from_ioctl = 1;
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
mpt3sas_base_free_smid(ioc, smid);
+ ioc->got_task_abort_from_ioctl = 0;
goto out;
}
}
+ ioc->got_task_abort_from_ioctl = 0;
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
dtmprintk(ioc, pr_info(MPT3SAS_FMT
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b5c966e319d3..46e866c36c8a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -51,6 +51,7 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
#include <linux/raid_class.h>
@@ -1074,6 +1075,26 @@ _scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+ * __scsih_scsi_lookup_get_clear - returns scmd entry without
+ * holding any lock.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will dereference the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid)
+{
+ struct scsi_cmnd *scmd = NULL;
+
+ swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
+
+ return scmd;
+}
+
+/**
* _scsih_scsi_lookup_get_clear - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
@@ -1088,8 +1109,7 @@ _scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
struct scsi_cmnd *scmd;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- scmd = ioc->scsi_lookup[smid - 1].scmd;
- ioc->scsi_lookup[smid - 1].scmd = NULL;
+ scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return scmd;
@@ -3899,9 +3919,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
}
}
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
{
- return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+ return 0;
+
+ if (pending)
+ return test_and_set_bit(0, &priv->ata_command_pending);
+
+ clear_bit(0, &priv->ata_command_pending);
+ return 0;
}
/**
@@ -3925,9 +3954,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
if (!scmd)
continue;
count++;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device,
- SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpt3sas_base_free_smid(ioc, smid);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
@@ -4063,13 +4090,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
- /*
- * Lock the device for any subsequent command until command is
- * done.
- */
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_block(scmd->device);
-
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4103,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4644,14 +4677,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
+ unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+
+ if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
+ ioc->got_task_abort_from_ioctl)
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ else
+ scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
+
if (scmd == NULL)
return 1;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4703,6 +4742,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+ /* In case of bogus fw or device, we could end up having
+ * unaligned partial completion. We can force alignment here,
+ * then scsi-ml does not need to handle this misbehavior.
+ */
+ sector_sz = scmd->device->sector_size;
+ if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
+ xfer_cnt % sector_sz)) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+ xfer_cnt, sector_sz);
+ xfer_cnt = round_down(xfer_cnt, sector_sz);
+ }
+
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
@@ -8016,15 +8069,24 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
ActiveCableEventData =
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
- if (ActiveCableEventData->ReasonCode ==
- MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
- pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
- ioc->name, ActiveCableEventData->ReceptacleID);
- pr_info("cannot be powered and devices connected to this active cable");
- pr_info("will not be seen. This active cable");
- pr_info("requires %d mW of power",
- ActiveCableEventData->ActiveCablePowerRequirement);
+ switch (ActiveCableEventData->ReasonCode) {
+ case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
+ pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable"
+ " requires %d mW of power\n", ioc->name,
+ ActiveCableEventData->ReceptacleID,
+ ActiveCableEventData->ActiveCablePowerRequirement);
+ pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected"
+ " to this active cable will not be seen\n",
+ ioc->name, ActiveCableEventData->ReceptacleID);
+ break;
+
+ case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
+ pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable",
+ ioc->name, ActiveCableEventData->ReceptacleID);
+ pr_notice(" is not running at an optimal speed(12 Gb/s)\n");
+ break;
}
+
break;
default: /* ignore the rest */
@@ -8734,6 +8796,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
switch (hba_mpi_version) {
case MPI2_VERSION:
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
/* Use mpt2sas driver host template for SAS 2.0 HBA's */
shost = scsi_host_alloc(&mpt2sas_driver_template,
sizeof(struct MPT3SAS_ADAPTER));
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 7f1d5785bc30..e7a7a704a315 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -2057,10 +2057,10 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->name, __func__,
le16_to_cpu(mpi_reply->ResponseDataLength)));
- memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
- req->sense_len = sizeof(*mpi_reply);
- req->resid_len = 0;
- rsp->resid_len -=
+ memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply));
+ scsi_req(req)->sense_len = sizeof(*mpi_reply);
+ scsi_req(req)->resid_len = 0;
+ scsi_req(rsp)->resid_len -=
le16_to_cpu(mpi_reply->ResponseDataLength);
/* check if the resp needs to be copied from the allocated
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 39285070f3b5..247df5e79b71 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2225,15 +2225,12 @@ static struct scsi_host_template mvumi_template = {
.name = "Marvell Storage Controller",
.slave_configure = mvumi_slave_configure,
.queuecommand = mvumi_queue_command,
+ .eh_timed_out = mvumi_timed_out,
.eh_host_reset_handler = mvumi_host_reset,
.bios_param = mvumi_bios_param,
.this_id = -1,
};
-static struct scsi_transport_template mvumi_transport_template = {
- .eh_timed_out = mvumi_timed_out,
-};
-
static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
{
void *base = NULL;
@@ -2451,7 +2448,6 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
host->max_id = mhba->max_target_id;
host->max_cmd_len = MAX_COMMAND_SIZE;
- host->transportt = &mvumi_transport_template;
ret = scsi_add_host(host, &mhba->pdev->dev);
if (ret) {
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index ef99f62831fb..30b905080c61 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -48,6 +48,7 @@
#include <scsi/osd_sense.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_request.h>
#include "osd_debug.h"
@@ -477,11 +478,13 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
{
or->async_error = error;
or->req_errors = req->errors ? : error;
- or->sense_len = req->sense_len;
+ or->sense_len = scsi_req(req)->sense_len;
+ if (or->sense_len)
+ memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
if (or->out.req)
- or->out.residual = or->out.req->resid_len;
+ or->out.residual = scsi_req(or->out.req)->resid_len;
if (or->in.req)
- or->in.residual = or->in.req->resid_len;
+ or->in.residual = scsi_req(or->in.req)->resid_len;
}
int osd_execute_request(struct osd_request *or)
@@ -1562,10 +1565,11 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
struct bio *bio = oii->bio;
int ret;
- req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ flags);
if (IS_ERR(req))
return req;
- blk_rq_set_block_pc(req);
+ scsi_req_init(req);
for_each_bio(bio) {
struct bio *bounce_bio = bio;
@@ -1599,8 +1603,6 @@ static int _init_blk_request(struct osd_request *or,
req->timeout = or->timeout;
req->retries = or->retries;
- req->sense = or->sense;
- req->sense_len = 0;
if (has_out) {
or->out.req = req;
@@ -1612,7 +1614,7 @@ static int _init_blk_request(struct osd_request *or,
ret = PTR_ERR(req);
goto out;
}
- blk_rq_set_block_pc(req);
+ scsi_req_init(req);
or->in.req = or->request->next_rq = req;
}
} else if (has_in)
@@ -1699,8 +1701,8 @@ int osd_finalize_request(struct osd_request *or,
osd_sec_sign_cdb(&or->cdb, cap_key);
- or->request->cmd = or->cdb.buff;
- or->request->cmd_len = _osd_req_cdb_len(or);
+ scsi_req(or->request)->cmd = or->cdb.buff;
+ scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or);
return 0;
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index e8196c55b633..451de6c5e3c9 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -322,6 +322,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
/* Wakeup from interrupt */
static void osst_end_async(struct request *req, int update)
{
+ struct scsi_request *rq = scsi_req(req);
struct osst_request *SRpnt = req->end_io_data;
struct osst_tape *STp = SRpnt->stp;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
@@ -330,6 +331,8 @@ static void osst_end_async(struct request *req, int update)
#if DEBUG
STp->write_pending = 0;
#endif
+ if (rq->sense_len)
+ memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -357,17 +360,20 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
int use_sg, int timeout, int retries)
{
struct request *req;
+ struct scsi_request *rq;
struct page **pages = NULL;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
int write = (data_direction == DMA_TO_DEVICE);
- req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
+ req = blk_get_request(SRpnt->stp->device->request_queue,
+ write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
- blk_rq_set_block_pc(req);
+ rq = scsi_req(req);
+ scsi_req_init(req);
req->rq_flags |= RQF_QUIET;
SRpnt->bio = NULL;
@@ -404,11 +410,9 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
goto free_req;
}
- req->cmd_len = cmd_len;
- memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
- memcpy(req->cmd, cmd, req->cmd_len);
- req->sense = SRpnt->sense;
- req->sense_len = 0;
+ rq->cmd_len = cmd_len;
+ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+ memcpy(rq->cmd, cmd, rq->cmd_len);
req->timeout = timeout;
req->retries = retries;
req->end_io_data = SRpnt;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 9fc675f57e33..417368ccb686 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -888,7 +888,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
u32 i = 0, j = 0;
u32 number_of_intr;
int flag = 0;
- u32 max_entry;
int rc;
static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
@@ -900,18 +899,14 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
flag &= ~IRQF_SHARED;
}
- max_entry = sizeof(pm8001_ha->msix_entries) /
- sizeof(pm8001_ha->msix_entries[0]);
- for (i = 0; i < max_entry ; i++)
- pm8001_ha->msix_entries[i].entry = i;
- rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries,
- number_of_intr);
- pm8001_ha->number_of_intr = number_of_intr;
- if (rc)
+ rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
+ number_of_intr, PCI_IRQ_MSIX);
+ if (rc < 0)
return rc;
+ pm8001_ha->number_of_intr = number_of_intr;
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "pci_enable_msix_exact request ret:%d no of intr %d\n",
+ "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
rc, pm8001_ha->number_of_intr));
for (i = 0; i < number_of_intr; i++) {
@@ -920,15 +915,15 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->irq_vector[i].irq_id = i;
pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
- rc = request_irq(pm8001_ha->msix_entries[i].vector,
+ rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
pm8001_interrupt_handler_msix, flag,
intr_drvname[i], &(pm8001_ha->irq_vector[i]));
if (rc) {
for (j = 0; j < i; j++) {
- free_irq(pm8001_ha->msix_entries[j].vector,
+ free_irq(pci_irq_vector(pm8001_ha->pdev, i),
&(pm8001_ha->irq_vector[i]));
}
- pci_disable_msix(pm8001_ha->pdev);
+ pci_free_irq_vectors(pm8001_ha->pdev);
break;
}
}
@@ -1102,11 +1097,10 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
#ifdef PM8001_USE_MSIX
for (i = 0; i < pm8001_ha->number_of_intr; i++)
- synchronize_irq(pm8001_ha->msix_entries[i].vector);
+ synchronize_irq(pci_irq_vector(pdev, i));
for (i = 0; i < pm8001_ha->number_of_intr; i++)
- free_irq(pm8001_ha->msix_entries[i].vector,
- &(pm8001_ha->irq_vector[i]));
- pci_disable_msix(pdev);
+ free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
+ pci_free_irq_vectors(pdev);
#else
free_irq(pm8001_ha->irq, sha);
#endif
@@ -1152,11 +1146,10 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
#ifdef PM8001_USE_MSIX
for (i = 0; i < pm8001_ha->number_of_intr; i++)
- synchronize_irq(pm8001_ha->msix_entries[i].vector);
+ synchronize_irq(pci_irq_vector(pdev, i));
for (i = 0; i < pm8001_ha->number_of_intr; i++)
- free_irq(pm8001_ha->msix_entries[i].vector,
- &(pm8001_ha->irq_vector[i]));
- pci_disable_msix(pdev);
+ free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
+ pci_free_irq_vectors(pdev);
#else
free_irq(pm8001_ha->irq, sha);
#endif
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6628cc38316c..e81a8fa7ef1a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -521,8 +521,6 @@ struct pm8001_hba_info {
struct pm8001_device *devices;
struct pm8001_ccb_info *ccb_info;
#ifdef PM8001_USE_MSIX
- struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC];
- /*for msi-x interrupt*/
int number_of_intr;/*will be used in remove()*/
#endif
#ifdef PM8001_USE_TASKLET
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 337982cf3d63..49e70a383afa 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4587,16 +4587,14 @@ static void pmcraid_tasklet_function(unsigned long instance)
static
void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
{
+ struct pci_dev *pdev = pinstance->pdev;
int i;
for (i = 0; i < pinstance->num_hrrq; i++)
- free_irq(pinstance->hrrq_vector[i].vector,
- &(pinstance->hrrq_vector[i]));
+ free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
- if (pinstance->interrupt_mode) {
- pci_disable_msix(pinstance->pdev);
- pinstance->interrupt_mode = 0;
- }
+ pinstance->interrupt_mode = 0;
+ pci_free_irq_vectors(pdev);
}
/**
@@ -4609,60 +4607,52 @@ void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
static int
pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
{
- int rc;
struct pci_dev *pdev = pinstance->pdev;
+ unsigned int irq_flag = PCI_IRQ_LEGACY, flag;
+ int num_hrrq, rc, i;
+ irq_handler_t isr;
- if ((pmcraid_enable_msix) &&
- (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) {
- int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
- struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
- int i;
- for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
- entries[i].entry = i;
-
- num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
- if (num_hrrq < 0)
- goto pmcraid_isr_legacy;
-
- for (i = 0; i < num_hrrq; i++) {
- pinstance->hrrq_vector[i].hrrq_id = i;
- pinstance->hrrq_vector[i].drv_inst = pinstance;
- pinstance->hrrq_vector[i].vector = entries[i].vector;
- rc = request_irq(pinstance->hrrq_vector[i].vector,
- pmcraid_isr_msix, 0,
- PMCRAID_DRIVER_NAME,
- &(pinstance->hrrq_vector[i]));
-
- if (rc) {
- int j;
- for (j = 0; j < i; j++)
- free_irq(entries[j].vector,
- &(pinstance->hrrq_vector[j]));
- pci_disable_msix(pdev);
- goto pmcraid_isr_legacy;
- }
- }
+ if (pmcraid_enable_msix)
+ irq_flag |= PCI_IRQ_MSIX;
- pinstance->num_hrrq = num_hrrq;
+ num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS,
+ irq_flag);
+ if (num_hrrq < 0)
+ return num_hrrq;
+
+ if (pdev->msix_enabled) {
+ flag = 0;
+ isr = pmcraid_isr_msix;
+ } else {
+ flag = IRQF_SHARED;
+ isr = pmcraid_isr;
+ }
+
+ for (i = 0; i < num_hrrq; i++) {
+ struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i];
+
+ vec->hrrq_id = i;
+ vec->drv_inst = pinstance;
+ rc = request_irq(pci_irq_vector(pdev, i), isr, flag,
+ PMCRAID_DRIVER_NAME, vec);
+ if (rc)
+ goto out_unwind;
+ }
+
+ pinstance->num_hrrq = num_hrrq;
+ if (pdev->msix_enabled) {
pinstance->interrupt_mode = 1;
iowrite32(DOORBELL_INTR_MODE_MSIX,
pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
- goto pmcraid_isr_out;
}
-pmcraid_isr_legacy:
- /* If MSI-X registration failed fallback to legacy mode, where
- * only one hrrq entry will be used
- */
- pinstance->hrrq_vector[0].hrrq_id = 0;
- pinstance->hrrq_vector[0].drv_inst = pinstance;
- pinstance->hrrq_vector[0].vector = pdev->irq;
- pinstance->num_hrrq = 1;
-
- rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
- PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
-pmcraid_isr_out:
+ return 0;
+
+out_unwind:
+ while (--i > 0)
+ free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
+ pci_free_irq_vectors(pdev);
return rc;
}
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index e1d150f3fd4d..568b18a2f47d 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -628,7 +628,6 @@ struct pmcraid_interrupts {
/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
struct pmcraid_isr_param {
struct pmcraid_instance *drv_inst;
- u16 vector; /* allocated msi-x vector */
u8 hrrq_id; /* hrrq entry index */
};
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
index 2bdedb9c39bc..8fd28b056f73 100644
--- a/drivers/scsi/qedi/qedi_dbg.c
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -52,7 +52,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
vaf.va = &va;
if (!(qedi_dbg_log & QEDI_LOG_WARN))
- return;
+ goto ret;
if (likely(qedi) && likely(qedi->pdev))
pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
@@ -60,6 +60,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
else
pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ret:
va_end(va);
}
@@ -80,7 +81,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
vaf.va = &va;
if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
- return;
+ goto ret;
if (likely(qedi) && likely(qedi->pdev))
pr_notice("[%s]:[%s:%d]:%d: %pV",
@@ -89,6 +90,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
else
pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ret:
va_end(va);
}
@@ -109,7 +111,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
vaf.va = &va;
if (!(qedi_dbg_log & level))
- return;
+ goto ret;
if (likely(qedi) && likely(qedi->pdev))
pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
@@ -117,6 +119,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
else
pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ret:
va_end(va);
}
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index d6a205433b66..b9f79d36142d 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -48,6 +48,7 @@ struct scsi_host_template qedi_host_template = {
.name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
.proc_name = QEDI_MODULE_NAME,
.queuecommand = iscsi_queuecommand,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
@@ -453,13 +454,9 @@ static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
if (rval) {
rval = -ENXIO;
QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
- goto update_conn_err;
}
kfree(conn_info);
- rval = 0;
-
-update_conn_err:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 47eb4d545d13..f201f4099620 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
ssize_t rval = 0;
+ mutex_lock(&ha->optrom_mutex);
+
if (ha->optrom_state != QLA_SREADING)
- return 0;
+ goto out;
- mutex_lock(&ha->optrom_mutex);
rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
ha->optrom_region_size);
+
+out:
mutex_unlock(&ha->optrom_mutex);
return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
- if (ha->optrom_state != QLA_SWRITING)
+ mutex_lock(&ha->optrom_mutex);
+
+ if (ha->optrom_state != QLA_SWRITING) {
+ mutex_unlock(&ha->optrom_mutex);
return -EINVAL;
- if (off > ha->optrom_region_size)
+ }
+ if (off > ha->optrom_region_size) {
+ mutex_unlock(&ha->optrom_mutex);
return -ERANGE;
+ }
if (off + count > ha->optrom_region_size)
count = ha->optrom_region_size - off;
- mutex_lock(&ha->optrom_mutex);
memcpy(&ha->optrom_buffer[off], buf, count);
mutex_unlock(&ha->optrom_mutex);
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
int type;
- int rval = 0;
port_id_t did;
type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
- rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+ qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
return count;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 1bf8061ff803..40ca75bbcb9d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -921,7 +921,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(response) + sizeof(uint8_t);
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
sizeof(struct fc_bsg_reply);
memcpy(fw_sts_ptr, response, sizeof(response));
fw_sts_ptr += sizeof(response);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index f7df01b76714..2f14adfab018 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1556,7 +1556,8 @@ typedef struct {
struct atio {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
};
@@ -2247,7 +2248,7 @@ struct ct_fdmiv2_hba_attr {
uint32_t num_ports;
uint8_t fabric_name[WWN_SIZE];
uint8_t bios_name[32];
- uint8_t vendor_indentifer[8];
+ uint8_t vendor_identifier[8];
} a;
};
@@ -2422,7 +2423,7 @@ struct ct_sns_req {
} rsnn_nn;
struct {
- uint8_t hba_indentifier[8];
+ uint8_t hba_identifier[8];
} ghat;
struct {
@@ -2732,7 +2733,7 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
-#define QLA_MSIX_DEFAULT 0x00
+#define QLA_BASE_VECTORS 2 /* default + RSP */
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
uint16_t entry;
char name[30];
void *handle;
- struct irq_affinity_notify irq_notify;
int cpuid;
};
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 94e8a8592f69..ee3df8794806 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1939,15 +1939,15 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
/* Vendor Identifier */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
- snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer),
+ snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
"%s", "QLGC");
- alen = strlen(eiter->a.vendor_indentifer);
+ alen = strlen(eiter->a.vendor_identifier);
alen += 4 - (alen & 3);
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
ql_dbg(ql_dbg_disc, vha, 0x20b1,
- "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer);
+ "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 632d5f30386a..7b6317c8c2e9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
/* Wait for soft-reset to complete. */
RD_REG_DWORD(&reg->ctrl_status);
- for (cnt = 0; cnt < 6000000; cnt++) {
+ for (cnt = 0; cnt < 60; cnt++) {
barrier();
if ((RD_REG_DWORD(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->hccr);
RD_REG_WORD(&reg->mailbox0);
- for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5093ca9b02ec..edc2264db45b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
-static void qla_irq_affinity_notify(struct irq_affinity_notify *,
- const cpumask_t *);
-static void qla_irq_affinity_release(struct kref *);
-
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -1472,7 +1468,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count));
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
+ sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
else {
@@ -1486,7 +1483,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt)->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
+ sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
@@ -2496,6 +2494,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
+ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+ pkt->handle == QLA_TGT_SKIP_HANDLE)
+ return;
+
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(ha, sp, res);
@@ -2572,14 +2574,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
- if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
- /* if kernel does not notify qla of IRQ's CPU change,
- * then set it here.
- */
- rsp->msix->cpuid = smp_processor_id();
- ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
- }
-
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -3003,14 +2997,14 @@ struct qla_init_msix_entry {
irq_handler_t handler;
};
-static struct qla_init_msix_entry msix_entries[] = {
+static const struct qla_init_msix_entry msix_entries[] = {
{ "qla2xxx (default)", qla24xx_msix_default },
{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
{ "qla2xxx (atio_q)", qla83xx_msix_atio_q },
{ "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla82xx_msix_entries[] = {
+static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
{ "qla2xxx (default)", qla82xx_msix_default },
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
@@ -3018,13 +3012,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
-#define MIN_MSIX_COUNT 2
int i, ret;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ struct irq_affinity desc = {
+ .pre_vectors = QLA_BASE_VECTORS,
+ };
+
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
+ desc.pre_vectors++;
+
+ ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
+ ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &desc);
- ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, "
@@ -3069,18 +3070,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->have_irq = 0;
qentry->in_use = 0;
qentry->handle = NULL;
- qentry->irq_notify.notify = qla_irq_affinity_notify;
- qentry->irq_notify.release = qla_irq_affinity_release;
- qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
+ for (i = 0; i < QLA_BASE_VECTORS; i++) {
qentry = &ha->msix_entries[i];
qentry->handle = rsp;
rsp->msix = qentry;
scnprintf(qentry->name, sizeof(qentry->name),
- msix_entries[i].name);
+ "%s", msix_entries[i].name);
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3093,18 +3091,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
goto msix_register_fail;
qentry->have_irq = 1;
qentry->in_use = 1;
-
- /* Register for CPU affinity notification. */
- irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
-
- /* Schedule work (ie. trigger a notification) to read cpu
- * mask for this specific irq.
- * kref_get is required because
- * irq_affinity_notify() will do
- * kref_put().
- */
- kref_get(&qentry->irq_notify.kref);
- schedule_work(&qentry->irq_notify.work);
}
/*
@@ -3116,7 +3102,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
rsp->msix = qentry;
qentry->handle = rsp;
scnprintf(qentry->name, sizeof(qentry->name),
- msix_entries[QLA_ATIO_VECTOR].name);
+ "%s", msix_entries[QLA_ATIO_VECTOR].name);
qentry->in_use = 1;
ret = request_irq(qentry->vector,
msix_entries[QLA_ATIO_VECTOR].handler,
@@ -3258,7 +3244,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
* from a probe failure context.
*/
if (!ha->rsp_q_map || !ha->rsp_q_map[0])
- return;
+ goto free_irqs;
rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled) {
@@ -3278,13 +3264,14 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
free_irq(pci_irq_vector(ha->pdev, 0), rsp);
}
+free_irqs:
pci_free_irq_vectors(ha->pdev);
}
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
struct qla_msix_entry *msix, int vector_type)
{
- struct qla_init_msix_entry *intr = &msix_entries[vector_type];
+ const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
@@ -3301,49 +3288,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
msix->handle = qpair;
return ret;
}
-
-
-/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
-static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct qla_msix_entry *e =
- container_of(notify, struct qla_msix_entry, irq_notify);
- struct qla_hw_data *ha;
- struct scsi_qla_host *base_vha;
- struct rsp_que *rsp = e->handle;
-
- /* user is recommended to set mask to just 1 cpu */
- e->cpuid = cpumask_first(mask);
-
- ha = rsp->hw;
- base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host %ld : vector %d cpu %d \n", __func__,
- base_vha->host_no, e->vector, e->cpuid);
-
- if (e->have_irq) {
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
- (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
- ha->tgt.rspq_vector_cpuid = e->cpuid;
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: rspq vector %d cpu %d runtime change\n",
- __func__, base_vha->host_no, e->vector, e->cpuid);
- }
- }
-}
-
-static void qla_irq_affinity_release(struct kref *ref)
-{
- struct irq_affinity_notify *notify =
- container_of(ref, struct irq_affinity_notify, kref);
- struct qla_msix_entry *e =
- container_of(notify, struct qla_msix_entry, irq_notify);
- struct rsp_que *rsp = e->handle;
- struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
-
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: vector %d cpu %d\n", __func__,
- base_vha->host_no, e->vector, e->cpuid);
-}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2819ceb96041..67f64db390b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
-struct rom_cmd {
+static struct rom_cmd {
uint16_t cmd;
} rom_cmds[] = {
{ MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- /* if PCI error, then avoid mbx processing.*/
- if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
+ /* if PCI error, then avoid mbx processing.*/
+ if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x1191,
"PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
- }
+ }
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
} else {
- uint16_t mb0;
- uint32_t ictrl;
+ uint16_t mb[8];
+ uint32_t ictrl, host_status, hccr;
uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
- mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
+ mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
+ mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
+ mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
+ mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
+ mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+ host_status = RD_REG_DWORD(&reg->isp24.host_status);
+ hccr = RD_REG_DWORD(&reg->isp24.hccr);
+
+ ql_log(ql_log_warn, vha, 0x1119,
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
+ command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
+ mb[7], host_status, hccr);
+
} else {
- mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
+ mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
- "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
- "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
/* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- int configured_count;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
"Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
} else {
- configured_count = mcp->mb[11];
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__);
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 02f1de18bc2b..96c33e292eba 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2244,7 +2244,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
memcpy(fstatus.reserved_3,
pkt->reserved_2, 20 * sizeof(uint8_t));
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
sizeof(struct fc_bsg_reply);
memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54380b434b30..0a1723cc08cf 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+const int MD_MIU_TEST_AGT_RDDATA[] = {
+ 0x410000A8, 0x410000AC,
+ 0x410000B8, 0x410000BC
+};
+
static void qla82xx_crb_addr_transform_setup(void)
{
qla82xx_crb_addr_transform(XDMA);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6201dce3553b..77624eac95a4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
-static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
- 0x410000B8, 0x410000BC };
+extern const int MD_MIU_TEST_AGT_RDDATA[4];
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 007192d7bad8..dc1ec9b61027 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -15,6 +15,23 @@
#define TIMEOUT_100_MS 100
+static const uint32_t qla8044_reg_tbl[] = {
+ QLA8044_PEG_HALT_STATUS1,
+ QLA8044_PEG_HALT_STATUS2,
+ QLA8044_PEG_ALIVE_COUNTER,
+ QLA8044_CRB_DRV_ACTIVE,
+ QLA8044_CRB_DEV_STATE,
+ QLA8044_CRB_DRV_STATE,
+ QLA8044_CRB_DRV_SCRATCH,
+ QLA8044_CRB_DEV_PART_INFO1,
+ QLA8044_CRB_IDC_VER_MAJOR,
+ QLA8044_FW_VER_MAJOR,
+ QLA8044_FW_VER_MINOR,
+ QLA8044_FW_VER_SUB,
+ QLA8044_CMDPEG_STATE,
+ QLA8044_ASIC_TEMP,
+};
+
/* 8044 Flash Read/Write functions */
uint32_t
qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 02fe3c4cdf55..83c1b7e17c80 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -535,23 +535,6 @@ enum qla_regs {
#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
#define CRB_CMDPEG_CHECK_DELAY 500
-static const uint32_t qla8044_reg_tbl[] = {
- QLA8044_PEG_HALT_STATUS1,
- QLA8044_PEG_HALT_STATUS2,
- QLA8044_PEG_ALIVE_COUNTER,
- QLA8044_CRB_DRV_ACTIVE,
- QLA8044_CRB_DEV_STATE,
- QLA8044_CRB_DRV_STATE,
- QLA8044_CRB_DRV_SCRATCH,
- QLA8044_CRB_DEV_PART_INFO1,
- QLA8044_CRB_IDC_VER_MAJOR,
- QLA8044_FW_VER_MAJOR,
- QLA8044_FW_VER_MINOR,
- QLA8044_FW_VER_SUB,
- QLA8044_CMDPEG_STATE,
- QLA8044_ASIC_TEMP,
-};
-
/* MiniDump Structures */
/* Driver_code is for driver to write some info about the entry
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8521cfe302e9..d01c90c7dd04 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -262,6 +262,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.name = QLA2XXX_DRIVER_NAME,
.queuecommand = qla2xxx_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = qla2xxx_eh_abort,
.eh_device_reset_handler = qla2xxx_eh_device_reset,
.eh_target_reset_handler = qla2xxx_eh_target_reset,
@@ -466,7 +467,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
continue;
rsp = ha->rsp_q_map[cnt];
- clear_bit(cnt, ha->req_qid_map);
+ clear_bit(cnt, ha->rsp_qid_map);
ha->rsp_q_map[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
@@ -1616,7 +1617,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
/* Don't abort commands in adapter during EEH
* recovery as it's not accessible/responding.
*/
- if (!ha->flags.eeh_busy) {
+ if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
/* Get a reference to the sp and drop the lock.
* The reference ensures this sp->done() call
* - and not the call in qla2xxx_eh_abort() -
@@ -3662,7 +3663,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct6_dsd), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ctx_cachep)
- goto fail_free_gid_list;
+ goto fail_free_srb_mempool;
}
ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
ctx_cachep);
@@ -3815,7 +3816,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
GFP_KERNEL);
if (!ha->loop_id_map)
- goto fail_async_pd;
+ goto fail_loop_id_map;
else {
qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3825,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
return 0;
+fail_loop_id_map:
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
@@ -3851,6 +3854,10 @@ fail_free_ms_iocb:
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
+
+ if (ha->sns_cmd)
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+ ha->sns_cmd, ha->sns_cmd_dma);
fail_dma_pool:
if (IS_QLA82XX(ha) || ql2xenabledif) {
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3875,12 @@ fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_ctx_mempool:
- mempool_destroy(ha->ctx_mempool);
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
fail_free_srb_mempool:
- mempool_destroy(ha->srb_mempool);
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689f5ca9..e4fda84b959e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL;
- uint32_t unpacked_lun, lun = 0;
uint16_t loop_id;
int res = 0;
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
- struct atio_from_isp *a = (struct atio_from_isp *)iocb;
unsigned long flags;
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
"loop_id %d)\n", vha->host_no, sess, sess->port_name,
mcmd, loop_id);
- lun = a->u.isp24.fcp_cmnd.lun;
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
- return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
- iocb, QLA24XX_MGMT_SEND_NACK);
+ return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
#if 0 /* Todo */
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#else
+ if (rc) {
+ }
#endif
goto done;
}
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
if (!vha->flags.online)
return;
- while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+ fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
cnt = pkt->u.raw.entry_count;
- qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
- ha_locked);
+ if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+ /*
+ * This packet is corrupted. The header + payload
+ * can not be trusted. There is no point in passing
+ * it further up.
+ */
+ ql_log(ql_log_warn, vha, 0xffff,
+ "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+ pkt->u.isp24.fcp_hdr.s_id,
+ be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+ le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+ adjust_corrupted_atio(pkt);
+ qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+ } else {
+ qlt_24xx_atio_pkt_all_vps(vha,
+ (struct atio_from_isp *)pkt, ha_locked);
+ }
for (i = 0; i < cnt; i++) {
ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
/* Disable Full Login after LIP */
nv->host_p &= cpu_to_le32(~BIT_10);
+
+ /*
+ * clear BIT 15 explicitly as we have seen at least
+ * a couple of instances where this was set and this
+ * was causing the firmware to not be initialized.
+ */
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
return;
}
- /* out-of-order frames reassembly */
- nv->firmware_options_3 |= BIT_6|BIT_9;
-
if (ha->tgt.enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
/* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
-
/* Disable Full Login after LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+ /*
+ * clear BIT 15 explicitly as we have seen at
+ * least a couple of instances where this was set
+ * and this was causing the firmware to not be
+ * initialized.
+ */
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
if (ql2xtgt_tape_enable)
/* Enable FC tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
return;
}
- /* out-of-order frames reassembly */
- nv->firmware_options_3 |= BIT_6|BIT_9;
-
if (ha->tgt.enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f26c5f60eedd..0824a8164a24 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -427,13 +427,33 @@ struct atio_from_isp {
struct {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN 0x38
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
} __packed;
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+ if (atio->entry_type == ATIO_TYPE7 &&
+ (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
+ return 1;
+ else
+ return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+ atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+ atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 36935c9ed669..8a58ef3adab4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
count++;
}
}
+ } else if (QLA_TGT_MODE_ENABLED() &&
+ ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+ struct qla_hw_data *ha = vha->hw;
+ struct atio *atr = ha->tgt.atio_ring;
+
+ if (atr || !buf) {
+ length = ha->tgt.atio_q_length;
+ qla27xx_insert16(0, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
+ count++;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd026,
"%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
count++;
}
}
+ } else if (QLA_TGT_MODE_ENABLED() &&
+ ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+ struct qla_hw_data *ha = vha->hw;
+ struct atio *atr = ha->tgt.atio_ring_ptr;
+
+ if (atr || !buf) {
+ qla27xx_insert16(0, buf, len);
+ qla27xx_insert16(1, buf, len);
+ qla27xx_insert32(ha->tgt.atio_q_in ?
+ readl(ha->tgt.atio_q_in) : 0, buf, len);
+ count++;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02f,
"%s: unknown queue %x\n", __func__, ent->t274.queue_type);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6643f6fc7795..3084983c1287 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -371,7 +371,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
*/
pr_debug("write_pending aborted cmd[%p] refcount %d "
"transport_state %x, t_state %x, se_cmd_flags %x\n",
- cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd, kref_read(&cmd->se_cmd.cmd_kref),
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
@@ -584,7 +584,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
*/
pr_debug("queue_data_in aborted cmd[%p] refcount %d "
"transport_state %x, t_state %x, se_cmd_flags %x\n",
- cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd, kref_read(&cmd->se_cmd.cmd_kref),
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
{
return sprintf(page,
"TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
- UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
}
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
int ret;
pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
- UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
ret = target_register_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 37e026a4823d..cf8430be183b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -1,7 +1,6 @@
#include <target/target_core_base.h>
#include <linux/btree.h>
-#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
/*
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index aeebefb1e9f8..fc233717355f 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -408,9 +408,6 @@ struct qla4_8xxx_legacy_intr_set {
};
/* MSI-X Support */
-
-#define QLA_MSIX_DEFAULT 0
-#define QLA_MSIX_RSP_Q 1
#define QLA_MSIX_ENTRIES 2
/*
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 9fbb33fc90c7..ac52150d1569 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -9539,15 +9539,15 @@ exit_host_reset:
* driver calls the following device driver's callbacks
*
* - Fatal Errors - link_reset
- * - Non-Fatal Errors - driver's pci_error_detected() which
+ * - Non-Fatal Errors - driver's error_detected() which
* returns CAN_RECOVER, NEED_RESET or DISCONNECT.
*
* PCI AER driver calls
- * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
+ * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled()
* returns RECOVERED or NEED_RESET if fw_hung
* NEED_RESET - driver's slot_reset()
* DISCONNECT - device is dead & cannot recover
- * RECOVERED - driver's pci_resume()
+ * RECOVERED - driver's resume()
*/
static pci_ers_result_t
qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 75455d4dab68..7bfbcfa7af40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -98,176 +98,6 @@ EXPORT_SYMBOL(scsi_sd_probe_domain);
ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
EXPORT_SYMBOL(scsi_sd_pm_domain);
-struct scsi_host_cmd_pool {
- struct kmem_cache *cmd_slab;
- struct kmem_cache *sense_slab;
- unsigned int users;
- char *cmd_name;
- char *sense_name;
- unsigned int slab_flags;
- gfp_t gfp_mask;
-};
-
-static struct scsi_host_cmd_pool scsi_cmd_pool = {
- .cmd_name = "scsi_cmd_cache",
- .sense_name = "scsi_sense_cache",
- .slab_flags = SLAB_HWCACHE_ALIGN,
-};
-
-static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
- .cmd_name = "scsi_cmd_cache(DMA)",
- .sense_name = "scsi_sense_cache(DMA)",
- .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
- .gfp_mask = __GFP_DMA,
-};
-
-static DEFINE_MUTEX(host_cmd_pool_mutex);
-
-/**
- * scsi_host_free_command - internal function to release a command
- * @shost: host to free the command for
- * @cmd: command to release
- *
- * the command must previously have been allocated by
- * scsi_host_alloc_command.
- */
-static void
-scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
- struct scsi_host_cmd_pool *pool = shost->cmd_pool;
-
- if (cmd->prot_sdb)
- kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
- kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
- kmem_cache_free(pool->cmd_slab, cmd);
-}
-
-/**
- * scsi_host_alloc_command - internal function to allocate command
- * @shost: SCSI host whose pool to allocate from
- * @gfp_mask: mask for the allocation
- *
- * Returns a fully allocated command with sense buffer and protection
- * data buffer (where applicable) or NULL on failure
- */
-static struct scsi_cmnd *
-scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
-{
- struct scsi_host_cmd_pool *pool = shost->cmd_pool;
- struct scsi_cmnd *cmd;
-
- cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
- if (!cmd)
- goto fail;
-
- cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
- gfp_mask | pool->gfp_mask);
- if (!cmd->sense_buffer)
- goto fail_free_cmd;
-
- if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
- cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
- if (!cmd->prot_sdb)
- goto fail_free_sense;
- }
-
- return cmd;
-
-fail_free_sense:
- kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
-fail_free_cmd:
- kmem_cache_free(pool->cmd_slab, cmd);
-fail:
- return NULL;
-}
-
-/**
- * __scsi_get_command - Allocate a struct scsi_cmnd
- * @shost: host to transmit command
- * @gfp_mask: allocation mask
- *
- * Description: allocate a struct scsi_cmd from host's slab, recycling from the
- * host's free_list if necessary.
- */
-static struct scsi_cmnd *
-__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
-{
- struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
-
- if (unlikely(!cmd)) {
- unsigned long flags;
-
- spin_lock_irqsave(&shost->free_list_lock, flags);
- if (likely(!list_empty(&shost->free_list))) {
- cmd = list_entry(shost->free_list.next,
- struct scsi_cmnd, list);
- list_del_init(&cmd->list);
- }
- spin_unlock_irqrestore(&shost->free_list_lock, flags);
-
- if (cmd) {
- void *buf, *prot;
-
- buf = cmd->sense_buffer;
- prot = cmd->prot_sdb;
-
- memset(cmd, 0, sizeof(*cmd));
-
- cmd->sense_buffer = buf;
- cmd->prot_sdb = prot;
- }
- }
-
- return cmd;
-}
-
-/**
- * scsi_get_command - Allocate and setup a scsi command block
- * @dev: parent scsi device
- * @gfp_mask: allocator flags
- *
- * Returns: The allocated scsi command structure.
- */
-struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
-{
- struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
- unsigned long flags;
-
- if (unlikely(cmd == NULL))
- return NULL;
-
- cmd->device = dev;
- INIT_LIST_HEAD(&cmd->list);
- INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- spin_lock_irqsave(&dev->list_lock, flags);
- list_add_tail(&cmd->list, &dev->cmd_list);
- spin_unlock_irqrestore(&dev->list_lock, flags);
- cmd->jiffies_at_alloc = jiffies;
- return cmd;
-}
-
-/**
- * __scsi_put_command - Free a struct scsi_cmnd
- * @shost: dev->host
- * @cmd: Command to free
- */
-static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
- unsigned long flags;
-
- if (unlikely(list_empty(&shost->free_list))) {
- spin_lock_irqsave(&shost->free_list_lock, flags);
- if (list_empty(&shost->free_list)) {
- list_add(&cmd->list, &shost->free_list);
- cmd = NULL;
- }
- spin_unlock_irqrestore(&shost->free_list_lock, flags);
- }
-
- if (likely(cmd != NULL))
- scsi_host_free_command(shost, cmd);
-}
-
/**
* scsi_put_command - Free a scsi command block
* @cmd: command block to free
@@ -287,188 +117,6 @@ void scsi_put_command(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&cmd->device->list_lock, flags);
BUG_ON(delayed_work_pending(&cmd->abort_work));
-
- __scsi_put_command(cmd->device->host, cmd);
-}
-
-static struct scsi_host_cmd_pool *
-scsi_find_host_cmd_pool(struct Scsi_Host *shost)
-{
- if (shost->hostt->cmd_size)
- return shost->hostt->cmd_pool;
- if (shost->unchecked_isa_dma)
- return &scsi_cmd_dma_pool;
- return &scsi_cmd_pool;
-}
-
-static void
-scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
-{
- kfree(pool->sense_name);
- kfree(pool->cmd_name);
- kfree(pool);
-}
-
-static struct scsi_host_cmd_pool *
-scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
-{
- struct scsi_host_template *hostt = shost->hostt;
- struct scsi_host_cmd_pool *pool;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return NULL;
-
- pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
- pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
- if (!pool->cmd_name || !pool->sense_name) {
- scsi_free_host_cmd_pool(pool);
- return NULL;
- }
-
- pool->slab_flags = SLAB_HWCACHE_ALIGN;
- if (shost->unchecked_isa_dma) {
- pool->slab_flags |= SLAB_CACHE_DMA;
- pool->gfp_mask = __GFP_DMA;
- }
-
- if (hostt->cmd_size)
- hostt->cmd_pool = pool;
-
- return pool;
-}
-
-static struct scsi_host_cmd_pool *
-scsi_get_host_cmd_pool(struct Scsi_Host *shost)
-{
- struct scsi_host_template *hostt = shost->hostt;
- struct scsi_host_cmd_pool *retval = NULL, *pool;
- size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
-
- /*
- * Select a command slab for this host and create it if not
- * yet existent.
- */
- mutex_lock(&host_cmd_pool_mutex);
- pool = scsi_find_host_cmd_pool(shost);
- if (!pool) {
- pool = scsi_alloc_host_cmd_pool(shost);
- if (!pool)
- goto out;
- }
-
- if (!pool->users) {
- pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
- pool->slab_flags, NULL);
- if (!pool->cmd_slab)
- goto out_free_pool;
-
- pool->sense_slab = kmem_cache_create(pool->sense_name,
- SCSI_SENSE_BUFFERSIZE, 0,
- pool->slab_flags, NULL);
- if (!pool->sense_slab)
- goto out_free_slab;
- }
-
- pool->users++;
- retval = pool;
-out:
- mutex_unlock(&host_cmd_pool_mutex);
- return retval;
-
-out_free_slab:
- kmem_cache_destroy(pool->cmd_slab);
-out_free_pool:
- if (hostt->cmd_size) {
- scsi_free_host_cmd_pool(pool);
- hostt->cmd_pool = NULL;
- }
- goto out;
-}
-
-static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
-{
- struct scsi_host_template *hostt = shost->hostt;
- struct scsi_host_cmd_pool *pool;
-
- mutex_lock(&host_cmd_pool_mutex);
- pool = scsi_find_host_cmd_pool(shost);
-
- /*
- * This may happen if a driver has a mismatched get and put
- * of the command pool; the driver should be implicated in
- * the stack trace
- */
- BUG_ON(pool->users == 0);
-
- if (!--pool->users) {
- kmem_cache_destroy(pool->cmd_slab);
- kmem_cache_destroy(pool->sense_slab);
- if (hostt->cmd_size) {
- scsi_free_host_cmd_pool(pool);
- hostt->cmd_pool = NULL;
- }
- }
- mutex_unlock(&host_cmd_pool_mutex);
-}
-
-/**
- * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
- * @shost: host to allocate the freelist for.
- *
- * Description: The command freelist protects against system-wide out of memory
- * deadlock by preallocating one SCSI command structure for each host, so the
- * system can always write to a swap file on a device associated with that host.
- *
- * Returns: Nothing.
- */
-int scsi_setup_command_freelist(struct Scsi_Host *shost)
-{
- const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
- struct scsi_cmnd *cmd;
-
- spin_lock_init(&shost->free_list_lock);
- INIT_LIST_HEAD(&shost->free_list);
-
- shost->cmd_pool = scsi_get_host_cmd_pool(shost);
- if (!shost->cmd_pool)
- return -ENOMEM;
-
- /*
- * Get one backup command for this host.
- */
- cmd = scsi_host_alloc_command(shost, gfp_mask);
- if (!cmd) {
- scsi_put_host_cmd_pool(shost);
- shost->cmd_pool = NULL;
- return -ENOMEM;
- }
- list_add(&cmd->list, &shost->free_list);
- return 0;
-}
-
-/**
- * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
- * @shost: host whose freelist is going to be destroyed
- */
-void scsi_destroy_command_freelist(struct Scsi_Host *shost)
-{
- /*
- * If cmd_pool is NULL the free list was not initialized, so
- * do not attempt to release resources.
- */
- if (!shost->cmd_pool)
- return;
-
- while (!list_empty(&shost->free_list)) {
- struct scsi_cmnd *cmd;
-
- cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
- list_del_init(&cmd->list);
- scsi_host_free_command(shost, cmd);
- }
- shost->cmd_pool = NULL;
- scsi_put_host_cmd_pool(shost);
}
#ifdef CONFIG_SCSI_LOGGING
@@ -590,7 +238,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
"(result %x)\n", cmd->result));
good_bytes = scsi_bufflen(cmd);
- if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (!blk_rq_is_passthrough(cmd->request)) {
int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd);
if (drv->done)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 03051e12a072..17249c3650fe 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -125,6 +125,7 @@ static const char *sdebug_version_date = "20160430";
#define DEF_OPTS 0
#define DEF_OPT_BLKS 1024
#define DEF_PHYSBLK_EXP 0
+#define DEF_OPT_XFERLEN_EXP 0
#define DEF_PTYPE TYPE_DISK
#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
@@ -590,6 +591,7 @@ static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
static int sdebug_opt_blks = DEF_OPT_BLKS;
static int sdebug_opts = DEF_OPTS;
static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
+static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
static int sdebug_scsi_level = DEF_SCSI_LEVEL;
static int sdebug_sector_size = DEF_SECTOR_SIZE;
@@ -1205,7 +1207,11 @@ static int inquiry_vpd_b0(unsigned char *arr)
memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
/* Optimal transfer length granularity */
- gran = 1 << sdebug_physblk_exp;
+ if (sdebug_opt_xferlen_exp != 0 &&
+ sdebug_physblk_exp < sdebug_opt_xferlen_exp)
+ gran = 1 << sdebug_opt_xferlen_exp;
+ else
+ gran = 1 << sdebug_physblk_exp;
put_unaligned_be16(gran, arr + 2);
/* Maximum Transfer Length */
@@ -4161,6 +4167,7 @@ module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
+module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
@@ -4212,6 +4219,7 @@ MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 996e134d79fa..f2cafae150bc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -279,9 +279,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
- if (host->transportt->eh_timed_out)
- rtn = host->transportt->eh_timed_out(scmd);
- else if (host->hostt->eh_timed_out)
+ if (host->hostt->eh_timed_out)
rtn = host->hostt->eh_timed_out(scmd);
if (rtn == BLK_EH_NOT_HANDLED) {
@@ -1106,7 +1104,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
{
- if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (!blk_rq_is_passthrough(scmd->request)) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
if (sdrv->eh_action)
rtn = sdrv->eh_action(scmd, rtn);
@@ -1746,7 +1744,7 @@ check_type:
* the check condition was retryable.
*/
if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
- scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
+ blk_rq_is_passthrough(scmd->request))
return 1;
else
return 0;
@@ -1968,25 +1966,25 @@ static void eh_lock_door_done(struct request *req, int uptodate)
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
struct request *req;
+ struct scsi_request *rq;
/*
* blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
* request becomes available
*/
- req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+ req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return;
+ rq = scsi_req(req);
+ scsi_req_init(req);
- blk_rq_set_block_pc(req);
-
- req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
- req->cmd[1] = 0;
- req->cmd[2] = 0;
- req->cmd[3] = 0;
- req->cmd[4] = SCSI_REMOVAL_PREVENT;
- req->cmd[5] = 0;
-
- req->cmd_len = COMMAND_SIZE(req->cmd[0]);
+ rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ rq->cmd[1] = 0;
+ rq->cmd[2] = 0;
+ rq->cmd[3] = 0;
+ rq->cmd[4] = SCSI_REMOVAL_PREVENT;
+ rq->cmd[5] = 0;
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
@@ -2331,7 +2329,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
{
struct scsi_cmnd *scmd;
struct Scsi_Host *shost = dev->host;
- struct request req;
+ struct request *rq;
unsigned long flags;
int error = 0, rtn, val;
@@ -2346,14 +2344,16 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
return -EIO;
error = -EIO;
- scmd = scsi_get_command(dev, GFP_KERNEL);
- if (!scmd)
+ rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
+ shost->hostt->cmd_size, GFP_KERNEL);
+ if (!rq)
goto out_put_autopm_host;
+ blk_rq_init(NULL, rq);
- blk_rq_init(NULL, &req);
- scmd->request = &req;
-
- scmd->cmnd = req.cmd;
+ scmd = (struct scsi_cmnd *)(rq + 1);
+ scsi_init_command(dev, scmd);
+ scmd->request = rq;
+ scmd->cmnd = scsi_req(rq)->cmd;
scmd->scsi_done = scsi_reset_provider_done_command;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
@@ -2413,6 +2413,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scsi_run_host_queues(shost);
scsi_put_command(scmd);
+ kfree(rq);
out_put_autopm_host:
scsi_autopm_put_host(shost);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9e1e141af9c..912fbc3b4543 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -37,8 +37,59 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+static struct kmem_cache *scsi_sdb_cache;
+static struct kmem_cache *scsi_sense_cache;
+static struct kmem_cache *scsi_sense_isadma_cache;
+static DEFINE_MUTEX(scsi_sense_cache_mutex);
-struct kmem_cache *scsi_sdb_cache;
+static inline struct kmem_cache *
+scsi_select_sense_cache(struct Scsi_Host *shost)
+{
+ return shost->unchecked_isa_dma ?
+ scsi_sense_isadma_cache : scsi_sense_cache;
+}
+
+static void scsi_free_sense_buffer(struct Scsi_Host *shost,
+ unsigned char *sense_buffer)
+{
+ kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
+}
+
+static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
+ gfp_t gfp_mask, int numa_node)
+{
+ return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
+ numa_node);
+}
+
+int scsi_init_sense_cache(struct Scsi_Host *shost)
+{
+ struct kmem_cache *cache;
+ int ret = 0;
+
+ cache = scsi_select_sense_cache(shost);
+ if (cache)
+ return 0;
+
+ mutex_lock(&scsi_sense_cache_mutex);
+ if (shost->unchecked_isa_dma) {
+ scsi_sense_isadma_cache =
+ kmem_cache_create("scsi_sense_cache(DMA)",
+ SCSI_SENSE_BUFFERSIZE, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
+ if (!scsi_sense_isadma_cache)
+ ret = -ENOMEM;
+ } else {
+ scsi_sense_cache =
+ kmem_cache_create("scsi_sense_cache",
+ SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!scsi_sense_cache)
+ ret = -ENOMEM;
+ }
+
+ mutex_unlock(&scsi_sense_cache_mutex);
+ return ret;
+}
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
@@ -168,22 +219,23 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req_flags_t rq_flags, int *resid)
{
struct request *req;
- int write = (data_direction == DMA_TO_DEVICE);
+ struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
+ req = blk_get_request(sdev->request_queue,
+ data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(req))
return ret;
- blk_rq_set_block_pc(req);
+ rq = scsi_req(req);
+ scsi_req_init(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_RECLAIM))
goto out;
- req->cmd_len = COMMAND_SIZE(cmd[0]);
- memcpy(req->cmd, cmd, req->cmd_len);
- req->sense = sense;
- req->sense_len = 0;
+ rq->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(rq->cmd, cmd, rq->cmd_len);
req->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
@@ -200,11 +252,13 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
- memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
+ if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
+ memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
if (resid)
- *resid = req->resid_len;
+ *resid = rq->resid_len;
+ if (sense && rq->sense_len)
+ memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
ret = req->errors;
out:
blk_put_request(req);
@@ -529,7 +583,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
- if (cmd->request->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(cmd->request)) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
@@ -645,14 +699,13 @@ static bool scsi_end_request(struct request *req, int error,
if (bidi_bytes)
scsi_release_bidi_buffers(cmd);
+ scsi_release_buffers(cmd);
+ scsi_put_command(cmd);
spin_lock_irqsave(q->queue_lock, flags);
blk_finish_request(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_release_buffers(cmd);
-
- scsi_put_command(cmd);
scsi_run_queue(q);
}
@@ -754,18 +807,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
- if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
+ if (blk_rq_is_passthrough(req)) {
if (result) {
- if (sense_valid && req->sense) {
+ if (sense_valid) {
/*
* SG_IO wants current and deferred errors
*/
- int len = 8 + cmd->sense_buffer[7];
-
- if (len > SCSI_SENSE_BUFFERSIZE)
- len = SCSI_SENSE_BUFFERSIZE;
- memcpy(req->sense, cmd->sense_buffer, len);
- req->sense_len = len;
+ scsi_req(req)->sense_len =
+ min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
}
if (!sense_deferred)
error = __scsi_error_from_host_byte(cmd, result);
@@ -775,14 +825,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
req->errors = cmd->result;
- req->resid_len = scsi_get_resid(cmd);
+ scsi_req(req)->resid_len = scsi_get_resid(cmd);
if (scsi_bidi_cmnd(cmd)) {
/*
* Bidi commands Must be complete as a whole,
* both sides at once.
*/
- req->next_rq->resid_len = scsi_in(cmd)->resid;
+ scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
if (scsi_end_request(req, 0, blk_rq_bytes(req),
blk_rq_bytes(req->next_rq)))
BUG();
@@ -790,15 +840,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
/*
- * Certain non BLOCK_PC requests are commands that don't
- * actually transfer anything (FLUSH), so cannot use
+ * Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
error = __scsi_error_from_host_byte(cmd, result);
}
- /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
+ /* no bidi support for !blk_rq_is_passthrough yet */
BUG_ON(blk_bidi_rq(req));
/*
@@ -810,8 +859,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
blk_rq_sectors(req), good_bytes));
/*
- * Recovered errors need reporting, but they're always treated
- * as success, so fiddle the result code here. For BLOCK_PC
+ * Recovered errors need reporting, but they're always treated as
+ * success, so fiddle the result code here. For passthrough requests
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
@@ -825,7 +874,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
else if (!(req->rq_flags & RQF_QUIET))
scsi_print_sense(cmd);
result = 0;
- /* BLOCK_PC may have set error */
+ /* for passthrough error may be set */
error = 0;
}
@@ -1040,7 +1089,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
bool is_mq = (rq->mq_ctx != NULL);
int error;
- BUG_ON(!blk_rq_nr_phys_segments(rq));
+ if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
+ return -EINVAL;
error = scsi_init_sgtable(rq, &cmd->sdb);
if (error)
@@ -1109,42 +1159,33 @@ err_exit:
}
EXPORT_SYMBOL(scsi_init_io);
-static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
- struct request *req)
+void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
- struct scsi_cmnd *cmd;
-
- if (!req->special) {
- /* Bail if we can't get a reference to the device */
- if (!get_device(&sdev->sdev_gendev))
- return NULL;
-
- cmd = scsi_get_command(sdev, GFP_ATOMIC);
- if (unlikely(!cmd)) {
- put_device(&sdev->sdev_gendev);
- return NULL;
- }
- req->special = cmd;
- } else {
- cmd = req->special;
- }
+ void *buf = cmd->sense_buffer;
+ void *prot = cmd->prot_sdb;
+ unsigned long flags;
- /* pull a tag out of the request if we have one */
- cmd->tag = req->tag;
- cmd->request = req;
+ /* zero out the cmd, except for the embedded scsi_request */
+ memset((char *)cmd + sizeof(cmd->req), 0,
+ sizeof(*cmd) - sizeof(cmd->req));
- cmd->cmnd = req->cmd;
- cmd->prot_op = SCSI_PROT_NORMAL;
+ cmd->device = dev;
+ cmd->sense_buffer = buf;
+ cmd->prot_sdb = prot;
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+ cmd->jiffies_at_alloc = jiffies;
- return cmd;
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_add_tail(&cmd->list, &dev->cmd_list);
+ spin_unlock_irqrestore(&dev->list_lock, flags);
}
-static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
+static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
/*
- * BLOCK_PC requests may transfer data, in which case they must
+ * Passthrough requests may transfer data, in which case they must
* a bio attached to them. Or they might contain a SCSI command
* that does not transfer data, in which case they may optionally
* submit a request without an attached bio.
@@ -1159,14 +1200,15 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
}
- cmd->cmd_len = req->cmd_len;
+ cmd->cmd_len = scsi_req(req)->cmd_len;
+ cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}
/*
- * Setup a REQ_TYPE_FS command. These are simple request from filesystems
+ * Setup a normal block command. These are simple request from filesystems
* that still need to be translated to SCSI CDBs from the ULD.
*/
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
@@ -1179,6 +1221,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
return ret;
}
+ cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
memset(cmd->cmnd, 0, BLK_MAX_CDB);
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
@@ -1194,14 +1237,10 @@ static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
- switch (req->cmd_type) {
- case REQ_TYPE_FS:
+ if (blk_rq_is_scsi(req))
+ return scsi_setup_scsi_cmnd(sdev, req);
+ else
return scsi_setup_fs_cmnd(sdev, req);
- case REQ_TYPE_BLOCK_PC:
- return scsi_setup_blk_pc_cmnd(sdev, req);
- default:
- return BLKPREP_KILL;
- }
}
static int
@@ -1297,19 +1336,28 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
int ret;
ret = scsi_prep_state_check(sdev, req);
if (ret != BLKPREP_OK)
goto out;
- cmd = scsi_get_cmd_from_req(sdev, req);
- if (unlikely(!cmd)) {
- ret = BLKPREP_DEFER;
- goto out;
+ if (!req->special) {
+ /* Bail if we can't get a reference to the device */
+ if (unlikely(!get_device(&sdev->sdev_gendev))) {
+ ret = BLKPREP_DEFER;
+ goto out;
+ }
+
+ scsi_init_command(sdev, cmd);
+ req->special = cmd;
}
+ cmd->tag = req->tag;
+ cmd->request = req;
+ cmd->prot_op = SCSI_PROT_NORMAL;
+
ret = scsi_setup_cmnd(sdev, req);
out:
return scsi_prep_return(q, req, ret);
@@ -1826,7 +1874,9 @@ static int scsi_mq_prep_fn(struct request *req)
unsigned char *sense_buf = cmd->sense_buffer;
struct scatterlist *sg;
- memset(cmd, 0, sizeof(struct scsi_cmnd));
+ /* zero out the cmd, except for the embedded scsi_request */
+ memset((char *)cmd + sizeof(cmd->req), 0,
+ sizeof(*cmd) - sizeof(cmd->req));
req->special = cmd;
@@ -1836,7 +1886,6 @@ static int scsi_mq_prep_fn(struct request *req)
cmd->tag = req->tag;
- cmd->cmnd = req->cmd;
cmd->prot_op = SCSI_PROT_NORMAL;
INIT_LIST_HEAD(&cmd->list);
@@ -1911,7 +1960,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
-
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
if (ret != BLK_MQ_RQ_QUEUE_OK)
@@ -1981,21 +2029,24 @@ static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
+ struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
- numa_node);
+ cmd->sense_buffer =
+ scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
+ cmd->req.sense = cmd->sense_buffer;
return 0;
}
static void scsi_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx)
{
+ struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- kfree(cmd->sense_buffer);
+ scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -2028,7 +2079,7 @@ static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return bounce_limit;
}
-static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
+void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{
struct device *dev = shost->dma_dev;
@@ -2063,28 +2114,64 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
*/
blk_queue_dma_alignment(q, 0x03);
}
+EXPORT_SYMBOL_GPL(__scsi_init_queue);
-struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
- request_fn_proc *request_fn)
+static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
- struct request_queue *q;
+ struct Scsi_Host *shost = q->rq_alloc_data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- q = blk_init_queue(request_fn, NULL);
- if (!q)
- return NULL;
- __scsi_init_queue(shost, q);
- return q;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
+ if (!cmd->sense_buffer)
+ goto fail;
+ cmd->req.sense = cmd->sense_buffer;
+
+ if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
+ cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
+ if (!cmd->prot_sdb)
+ goto fail_free_sense;
+ }
+
+ return 0;
+
+fail_free_sense:
+ scsi_free_sense_buffer(shost, cmd->sense_buffer);
+fail:
+ return -ENOMEM;
+}
+
+static void scsi_exit_rq(struct request_queue *q, struct request *rq)
+{
+ struct Scsi_Host *shost = q->rq_alloc_data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (cmd->prot_sdb)
+ kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
+ scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
-EXPORT_SYMBOL(__scsi_alloc_queue);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
+ struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
- q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
+ q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!q)
return NULL;
+ q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
+ q->rq_alloc_data = shost;
+ q->request_fn = scsi_request_fn;
+ q->init_rq_fn = scsi_init_rq;
+ q->exit_rq_fn = scsi_exit_rq;
+
+ if (blk_init_allocated_queue(q) < 0) {
+ blk_cleanup_queue(q);
+ return NULL;
+ }
+ __scsi_init_queue(shost, q);
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_unprep_rq(q, scsi_unprep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
@@ -2208,6 +2295,8 @@ int __init scsi_init_queue(void)
void scsi_exit_queue(void)
{
+ kmem_cache_destroy(scsi_sense_cache);
+ kmem_cache_destroy(scsi_sense_isadma_cache);
kmem_cache_destroy(scsi_sdb_cache);
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 193636a59adf..99bfc985e190 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -30,8 +30,8 @@ extern void scsi_exit_hosts(void);
/* scsi.c */
extern bool scsi_use_blk_mq;
-extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
-extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
+int scsi_init_sense_cache(struct Scsi_Host *shost);
+void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd);
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -96,7 +96,6 @@ extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
struct request_queue;
struct request;
-extern struct kmem_cache *scsi_sdb_cache;
/* scsi_proc.c */
#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 03577bde6ac5..2d753c93e07a 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2055,7 +2055,7 @@ static int fc_vport_match(struct attribute_container *cont,
/**
- * fc_timed_out - FC Transport I/O timeout intercept handler
+ * fc_eh_timed_out - FC Transport I/O timeout intercept handler
* @scmd: The SCSI command which timed out
*
* This routine protects against error handlers getting invoked while a
@@ -2076,8 +2076,8 @@ static int fc_vport_match(struct attribute_container *cont,
* Notes:
* This routine assumes no locks are held on entry.
*/
-static enum blk_eh_timer_return
-fc_timed_out(struct scsi_cmnd *scmd)
+enum blk_eh_timer_return
+fc_eh_timed_out(struct scsi_cmnd *scmd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
@@ -2086,6 +2086,7 @@ fc_timed_out(struct scsi_cmnd *scmd)
return BLK_EH_NOT_HANDLED;
}
+EXPORT_SYMBOL(fc_eh_timed_out);
/*
* Called by fc_user_scan to locate an rport on the shost that
@@ -2159,19 +2160,6 @@ fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
return 0;
}
-static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
- int result)
-{
- struct fc_internal *i = to_fc_internal(shost->transportt);
- return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
-}
-
-static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
-{
- struct fc_internal *i = to_fc_internal(shost->transportt);
- return i->f->it_nexus_response(shost, nexus, result);
-}
-
struct scsi_transport_template *
fc_attach_transport(struct fc_function_template *ft)
{
@@ -2211,14 +2199,8 @@ fc_attach_transport(struct fc_function_template *ft)
/* Transport uses the shost workq for scsi scanning */
i->t.create_work_queue = 1;
- i->t.eh_timed_out = fc_timed_out;
-
i->t.user_scan = fc_user_scan;
- /* target-mode drivers' functions */
- i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
- i->t.it_nexus_response = fc_it_nexus_response;
-
/*
* Setup SCSI Target Attributes.
*/
@@ -3765,7 +3747,6 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
struct device *dev = &shost->shost_gendev;
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
- int err;
char bsg_name[20];
fc_host->rqst_q = NULL;
@@ -3776,23 +3757,14 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = __scsi_alloc_queue(shost, bsg_request_fn);
- if (!q) {
- dev_err(dev,
- "fc_host%d: bsg interface failed to initialize - no request queue\n",
- shost->host_no);
- return -ENOMEM;
- }
-
- err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
- i->f->dd_bsg_size);
- if (err) {
+ q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
+ if (IS_ERR(q)) {
dev_err(dev,
"fc_host%d: bsg interface failed to initialize - setup queue\n",
shost->host_no);
- blk_cleanup_queue(q);
- return err;
+ return PTR_ERR(q);
}
+ __scsi_init_queue(shost, q);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
@@ -3824,26 +3796,18 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
struct device *dev = &rport->dev;
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
- int err;
rport->rqst_q = NULL;
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = __scsi_alloc_queue(shost, bsg_request_fn);
- if (!q) {
- dev_err(dev, "bsg interface failed to initialize - no request queue\n");
- return -ENOMEM;
- }
-
- err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
- if (err) {
+ q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
+ if (IS_ERR(q)) {
dev_err(dev, "failed to setup bsg queue\n");
- blk_cleanup_queue(q);
- return err;
+ return PTR_ERR(q);
}
-
+ __scsi_init_queue(shost, q);
blk_queue_prep_rq(q, fc_bsg_rport_prep);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 42bca619f854..568c9f26a561 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1537,24 +1537,18 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
struct request_queue *q;
char bsg_name[20];
- int ret;
if (!i->iscsi_transport->bsg_request)
return -ENOTSUPP;
snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
-
- q = __scsi_alloc_queue(shost, bsg_request_fn);
- if (!q)
- return -ENOMEM;
-
- ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0);
- if (ret) {
+ q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
+ if (IS_ERR(q)) {
shost_printk(KERN_ERR, shost, "bsg interface failed to "
"initialize - no request queue\n");
- blk_cleanup_queue(q);
- return ret;
+ return PTR_ERR(q);
}
+ __scsi_init_queue(shost, q);
ihost->bsg_q = q;
return 0;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 60b651bfaa01..126a5ee00987 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -33,6 +33,7 @@
#include <linux/bsg.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_request.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -177,6 +178,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
while ((req = blk_fetch_request(q)) != NULL) {
spin_unlock_irq(q->queue_lock);
+ scsi_req(req)->resid_len = blk_rq_bytes(req);
+ if (req->next_rq)
+ scsi_req(req->next_rq)->resid_len =
+ blk_rq_bytes(req->next_rq);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
ret = handler(shost, rphy, req);
req->errors = ret;
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index b87a78673f65..3c5d89852e9f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(srp_reconnect_rport);
* Note: This function is called from soft-IRQ context and with the request
* queue lock held.
*/
-static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
+enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
@@ -603,6 +603,7 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
}
+EXPORT_SYMBOL(srp_timed_out);
static void srp_rport_release(struct device *dev)
{
@@ -793,19 +794,6 @@ void srp_stop_rport_timers(struct srp_rport *rport)
}
EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
-static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
- int result)
-{
- struct srp_internal *i = to_srp_internal(shost->transportt);
- return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
-}
-
-static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
-{
- struct srp_internal *i = to_srp_internal(shost->transportt);
- return i->f->it_nexus_response(shost, nexus, result);
-}
-
/**
* srp_attach_transport - instantiate SRP transport template
* @ft: SRP transport class function template
@@ -820,11 +808,6 @@ srp_attach_transport(struct srp_function_template *ft)
if (!i)
return NULL;
- i->t.eh_timed_out = srp_timed_out;
-
- i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
- i->t.it_nexus_response = srp_it_nexus_response;
-
i->t.host_size = sizeof(struct srp_host_attrs);
i->t.host_attrs.ac.attrs = &i->host_attrs[0];
i->t.host_attrs.ac.class = &srp_host_class.class;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1fbb1ecf49f2..cb6e68dd6df0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -703,7 +703,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
/**
* sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
- * @sdp: scsi device to operate one
+ * @sdp: scsi device to operate on
* @rq: Request to prepare
*
* Will issue either UNMAP or WRITE SAME(16) depending on preference
@@ -781,7 +781,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
rq->special_vec.bv_len = len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
- rq->resid_len = len;
+ scsi_req(rq)->resid_len = len;
ret = scsi_init_io(cmd);
out:
@@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
struct bio *bio = rq->bio;
sector_t sector = blk_rq_pos(rq);
unsigned int nr_sectors = blk_rq_sectors(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
int ret;
if (sdkp->device->no_write_same)
@@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = sdp->sector_size;
cmd->allowed = SD_MAX_RETRIES;
- return scsi_init_io(cmd);
+
+ /*
+ * For WRITE SAME the data transferred via the DATA OUT buffer is
+ * different from the amount of data actually written to the target.
+ *
+ * We set up __data_len to the amount of data transferred via the
+ * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
+ * to transfer a single sector of data first, but then reset it to
+ * the amount of data to be written right after so that the I/O path
+ * knows how much to actually write.
+ */
+ rq->__data_len = sdp->sector_size;
+ ret = scsi_init_io(cmd);
+ rq->__data_len = nr_bytes;
+ return ret;
}
static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
@@ -1164,7 +1179,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
__free_page(rq->special_vec.bv_page);
- if (SCpnt->cmnd != rq->cmd) {
+ if (SCpnt->cmnd != scsi_req(rq)->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool);
SCpnt->cmnd = NULL;
SCpnt->cmd_len = 0;
@@ -1735,9 +1750,6 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
unsigned int good_bytes;
- if (scmd->request->cmd_type != REQ_TYPE_FS)
- return 0;
-
info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
&bad_lba);
@@ -2585,7 +2597,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdp->broken_fua) {
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
sdkp->DPOFUA = 0;
- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+ !sdkp->device->use_16_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
@@ -2768,13 +2781,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
}
- sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1)
- q->limits.zoned = BLK_ZONED_HA;
- else if (sdkp->device->type == TYPE_ZBC)
+ if (sdkp->device->type == TYPE_ZBC) {
+ /* Host-managed */
q->limits.zoned = BLK_ZONED_HM;
- else
- q->limits.zoned = BLK_ZONED_NONE;
+ } else {
+ sdkp->zoned = (buffer[8] >> 4) & 3;
+ if (sdkp->zoned == 1)
+ /* Host-aware */
+ q->limits.zoned = BLK_ZONED_HA;
+ else
+ /*
+ * Treat drive-managed devices as
+ * regular block devices.
+ */
+ q->limits.zoned = BLK_ZONED_NONE;
+ }
if (blk_queue_is_zoned(q) && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
@@ -3058,6 +3079,23 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
put_device(&sdkp->dev);
}
+struct sd_devt {
+ int idx;
+ struct disk_devt disk_devt;
+};
+
+void sd_devt_release(struct disk_devt *disk_devt)
+{
+ struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
+ disk_devt);
+
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sd_devt->idx);
+ spin_unlock(&sd_index_lock);
+
+ kfree(sd_devt);
+}
+
/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
@@ -3079,6 +3117,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
static int sd_probe(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+ struct sd_devt *sd_devt;
struct scsi_disk *sdkp;
struct gendisk *gd;
int index;
@@ -3104,9 +3143,13 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
+ sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
+ if (!sd_devt)
+ goto out_free;
+
gd = alloc_disk(SD_MINORS);
if (!gd)
- goto out_free;
+ goto out_free_devt;
do {
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3122,6 +3165,11 @@ static int sd_probe(struct device *dev)
goto out_put;
}
+ atomic_set(&sd_devt->disk_devt.count, 1);
+ sd_devt->disk_devt.release = sd_devt_release;
+ sd_devt->idx = index;
+ gd->disk_devt = &sd_devt->disk_devt;
+
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
if (error) {
sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3161,13 +3209,14 @@ static int sd_probe(struct device *dev)
return 0;
out_free_index:
- spin_lock(&sd_index_lock);
- ida_remove(&sd_index_ida, index);
- spin_unlock(&sd_index_lock);
+ put_disk_devt(&sd_devt->disk_devt);
+ sd_devt = NULL;
out_put:
put_disk(gd);
out_free:
kfree(sdkp);
+ out_free_devt:
+ kfree(sd_devt);
out:
scsi_autopm_put_device(sdp);
return error;
@@ -3177,7 +3226,7 @@ static int sd_probe(struct device *dev)
* sd_remove - called whenever a scsi disk (previously recognized by
* sd_probe) is detached from the system. It is called (potentially
* multiple times) during sd module unload.
- * @sdp: pointer to mid level scsi device object
+ * @dev: pointer to device object
*
* Note: this function is invoked from the scsi mid-level.
* This function potentially frees up a device name (e.g. /dev/sdc)
@@ -3226,10 +3275,7 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
- spin_lock(&sd_index_lock);
- ida_remove(&sd_index_ida, sdkp->index);
- spin_unlock(&sd_index_lock);
-
+ put_disk_devt(disk->disk_devt);
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8c9a35c91705..50adabbb5808 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
- if (scsi_is_sas_rphy(&sdev->sdev_gendev))
+ if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
if (efd.addr) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index dbe5b4b95df0..e831e01f9fa6 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -781,9 +781,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
- if (srp->rq->cmd != srp->rq->__cmd)
- kfree(srp->rq->cmd);
-
+ scsi_req_free_cmd(scsi_req(srp->rq));
blk_end_request_all(srp->rq, -EIO);
srp->rq = NULL;
}
@@ -1279,6 +1277,7 @@ static void
sg_rq_end_io(struct request *rq, int uptodate)
{
struct sg_request *srp = rq->end_io_data;
+ struct scsi_request *req = scsi_req(rq);
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
@@ -1297,9 +1296,9 @@ sg_rq_end_io(struct request *rq, int uptodate)
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
- sense = rq->sense;
+ sense = req->sense;
result = rq->errors;
- resid = rq->resid_len;
+ resid = req->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
@@ -1333,6 +1332,10 @@ sg_rq_end_io(struct request *rq, int uptodate)
sdp->device->changed = 1;
}
}
+
+ if (req->sense_len)
+ memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
+
/* Rely on write phase to clean out srp status values, so no "else" */
/*
@@ -1342,8 +1345,7 @@ sg_rq_end_io(struct request *rq, int uptodate)
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
- if (rq->cmd != rq->__cmd)
- kfree(rq->cmd);
+ scsi_req_free_cmd(scsi_req(rq));
__blk_put_request(rq->q, rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
@@ -1658,6 +1660,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
+ struct scsi_request *req;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
@@ -1695,22 +1698,23 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
- rq = blk_get_request(q, rw, GFP_KERNEL);
+ rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
+ req = scsi_req(rq);
- blk_rq_set_block_pc(rq);
+ scsi_req_init(rq);
if (hp->cmd_len > BLK_MAX_CDB)
- rq->cmd = long_cmdp;
- memcpy(rq->cmd, cmd, hp->cmd_len);
- rq->cmd_len = hp->cmd_len;
+ req->cmd = long_cmdp;
+ memcpy(req->cmd, cmd, hp->cmd_len);
+ req->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
- rq->sense = srp->sense_b;
rq->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
@@ -1753,6 +1757,10 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
return res;
iov_iter_truncate(&i, hp->dxfer_len);
+ if (!iov_iter_count(&i)) {
+ kfree(iov);
+ return -EINVAL;
+ }
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
@@ -1786,8 +1794,7 @@ sg_finish_rem_req(Sg_request *srp)
ret = blk_rq_unmap_user(srp->bio);
if (srp->rq) {
- if (srp->rq->cmd != srp->rq->__cmd)
- kfree(srp->rq->cmd);
+ scsi_req_free_cmd(scsi_req(srp->rq));
blk_put_request(srp->rq);
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8702d9cf8040..11c0dfb3dfa3 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -4499,7 +4499,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->offload_enabled &&
- scmd->request->cmd_type == REQ_TYPE_FS) {
+ !blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
if (rc == 0 ||
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index 8ed778d4dbb9..de0ab5fc8474 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -299,7 +299,6 @@ struct snic {
/* pci related */
struct pci_dev *pdev;
- struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
/* io related info */
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index f552003128c6..d859501e4ccd 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -93,7 +93,7 @@ snic_free_intr(struct snic *snic)
/* ONLY interrupt mode MSIX is supported */
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
if (snic->msix[i].requested) {
- free_irq(snic->msix_entry[i].vector,
+ free_irq(pci_irq_vector(snic->pdev, i),
snic->msix[i].devid);
}
}
@@ -134,7 +134,7 @@ snic_request_intr(struct snic *snic)
snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
- ret = request_irq(snic->msix_entry[i].vector,
+ ret = request_irq(pci_irq_vector(snic->pdev, i),
snic->msix[i].isr,
0,
snic->msix[i].devname,
@@ -158,47 +158,37 @@ snic_set_intr_mode(struct snic *snic)
{
unsigned int n = ARRAY_SIZE(snic->wq);
unsigned int m = SNIC_CQ_IO_CMPL_MAX;
- unsigned int i;
+ unsigned int vecs = n + m + 1;
/*
* We need n WQs, m CQs, and n+m+1 INTRs
* (last INTR is used for WQ/CQ errors and notification area
*/
-
BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
ARRAY_SIZE(snic->intr));
- SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
-
- for (i = 0; i < (n + m + 1); i++)
- snic->msix_entry[i].entry = i;
-
- if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
- if (!pci_enable_msix(snic->pdev,
- snic->msix_entry,
- (n + m + 1))) {
- snic->wq_count = n;
- snic->cq_count = n + m;
- snic->intr_count = n + m + 1;
- snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
-
- SNIC_ISR_DBG(snic->shost,
- "Using MSI-X Interrupts\n");
- svnic_dev_set_intr_mode(snic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
- return 0;
- }
- }
- svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+ if (snic->wq_count < n || snic->cq_count < n + m)
+ goto fail;
+ if (pci_alloc_irq_vectors(snic->pdev, vecs, vecs, PCI_IRQ_MSIX) < 0)
+ goto fail;
+
+ snic->wq_count = n;
+ snic->cq_count = n + m;
+ snic->intr_count = vecs;
+ snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
+
+ SNIC_ISR_DBG(snic->shost, "Using MSI-X Interrupts\n");
+ svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_MSIX);
+ return 0;
+fail:
+ svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
} /* end of snic_set_intr_mode */
void
snic_clear_intr_mode(struct snic *snic)
{
- pci_disable_msix(snic->pdev);
-
+ pci_free_irq_vectors(snic->pdev);
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 94352e4df831..0b29b9329b1c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -117,7 +117,7 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
static int sr_packet(struct cdrom_device_info *, struct packet_command *);
-static struct cdrom_device_ops sr_dops = {
+static const struct cdrom_device_ops sr_dops = {
.open = sr_open,
.release = sr_release,
.drive_status = sr_drive_status,
@@ -437,14 +437,17 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
goto out;
}
- if (rq_data_dir(rq) == WRITE) {
+ switch (req_op(rq)) {
+ case REQ_OP_WRITE:
if (!cd->writeable)
goto out;
SCpnt->cmnd[0] = WRITE_10;
cd->cdi.media_written = 1;
- } else if (rq_data_dir(rq) == READ) {
+ break;
+ case REQ_OP_READ:
SCpnt->cmnd[0] = READ_10;
- } else {
+ break;
+ default:
blk_dump_rq_flags(rq, "Unknown sr command");
goto out;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5f35b863e1a7..81212d4bd9bf 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -475,7 +475,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
ktime_t now;
now = ktime_get();
- if (req->cmd[0] == WRITE_6) {
+ if (scsi_req(req)->cmd[0] == WRITE_6) {
now = ktime_sub(now, STp->stats->write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
@@ -489,7 +489,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
} else
atomic64_add(atomic_read(&STp->stats->last_write_size),
&STp->stats->write_byte_cnt);
- } else if (req->cmd[0] == READ_6) {
+ } else if (scsi_req(req)->cmd[0] == READ_6) {
now = ktime_sub(now, STp->stats->read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
@@ -514,15 +514,18 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
static void st_scsi_execute_end(struct request *req, int uptodate)
{
struct st_request *SRpnt = req->end_io_data;
+ struct scsi_request *rq = scsi_req(req);
struct scsi_tape *STp = SRpnt->stp;
struct bio *tmp;
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
- STp->buffer->cmdstat.residual = req->resid_len;
+ STp->buffer->cmdstat.residual = rq->resid_len;
st_do_stats(STp, req);
tmp = SRpnt->bio;
+ if (rq->sense_len)
+ memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -535,17 +538,18 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
int timeout, int retries)
{
struct request *req;
+ struct scsi_request *rq;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
- int write = (data_direction == DMA_TO_DEVICE);
struct scsi_tape *STp = SRpnt->stp;
- req = blk_get_request(SRpnt->stp->device->request_queue, write,
- GFP_KERNEL);
+ req = blk_get_request(SRpnt->stp->device->request_queue,
+ data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
-
- blk_rq_set_block_pc(req);
+ rq = scsi_req(req);
+ scsi_req_init(req);
req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
@@ -571,11 +575,9 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
}
SRpnt->bio = req->bio;
- req->cmd_len = COMMAND_SIZE(cmd[0]);
- memset(req->cmd, 0, BLK_MAX_CDB);
- memcpy(req->cmd, cmd, req->cmd_len);
- req->sense = SRpnt->sense;
- req->sense_len = 0;
+ rq->cmd_len = COMMAND_SIZE(cmd[0]);
+ memset(rq->cmd, 0, BLK_MAX_CDB);
+ memcpy(rq->cmd, cmd, rq->cmd_len);
req->timeout = timeout;
req->retries = retries;
req->end_io_data = SRpnt;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 05526b71541b..585e54f6512c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -136,6 +136,8 @@ struct hv_fc_wwn_packet {
#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000
#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000
+#define SP_UNTAGGED ((unsigned char) ~0)
+#define SRB_SIMPLE_TAG_REQUEST 0x20
/*
* Platform neutral description of a scsi request -
@@ -375,6 +377,7 @@ enum storvsc_request_type {
#define SRB_STATUS_SUCCESS 0x01
#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
+#define SRB_STATUS_DATA_OVERRUN 0x12
#define SRB_STATUS(status) \
(status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
@@ -458,6 +461,15 @@ struct storvsc_device {
* Max I/O, the device can support.
*/
u32 max_transfer_bytes;
+ /*
+ * Number of sub-channels we will open.
+ */
+ u16 num_sc;
+ struct vmbus_channel **stor_chns;
+ /*
+ * Mask of CPUs bound to subchannels.
+ */
+ struct cpumask alloced_cpus;
/* Used for vsc/vsp channel reset process */
struct storvsc_cmd_request init_request;
struct storvsc_cmd_request reset_request;
@@ -635,6 +647,11 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
(void *)&props,
sizeof(struct vmstorage_channel_properties),
storvsc_on_channel_callback, new_sc);
+
+ if (new_sc->state == CHANNEL_OPENED_STATE) {
+ stor_device->stor_chns[new_sc->target_cpu] = new_sc;
+ cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
+ }
}
static void handle_multichannel_storage(struct hv_device *device, int max_chns)
@@ -651,6 +668,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
if (!stor_device)
return;
+ stor_device->num_sc = num_sc;
request = &stor_device->init_request;
vstor_packet = &request->vstor_packet;
@@ -838,6 +856,25 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
* support multi-channel.
*/
max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
+
+ /*
+ * Allocate state to manage the sub-channels.
+ * We allocate an array based on the numbers of possible CPUs
+ * (Hyper-V does not support cpu online/offline).
+ * This Array will be sparseley populated with unique
+ * channels - primary + sub-channels.
+ * We will however populate all the slots to evenly distribute
+ * the load.
+ */
+ stor_device->stor_chns = kzalloc(sizeof(void *) * num_possible_cpus(),
+ GFP_KERNEL);
+ if (stor_device->stor_chns == NULL)
+ return -ENOMEM;
+
+ stor_device->stor_chns[device->channel->target_cpu] = device->channel;
+ cpumask_set_cpu(device->channel->target_cpu,
+ &stor_device->alloced_cpus);
+
if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
if (vstor_packet->storage_channel_properties.flags &
STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
@@ -889,6 +926,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
switch (SRB_STATUS(vm_srb->srb_status)) {
case SRB_STATUS_ERROR:
/*
+ * Let upper layer deal with error when
+ * sense message is present.
+ */
+
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
+ break;
+ /*
* If there is an error; offline the device since all
* error recovery strategies would have already been
* deployed on the host side. However, if the command
@@ -953,6 +997,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
struct scsi_cmnd *scmnd = cmd_request->cmd;
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
+ u32 data_transfer_length;
struct Scsi_Host *host;
u32 payload_sz = cmd_request->payload_sz;
void *payload = cmd_request->payload;
@@ -960,6 +1005,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
host = stor_dev->host;
vm_srb = &cmd_request->vstor_packet.vm_srb;
+ data_transfer_length = vm_srb->data_transfer_length;
scmnd->result = vm_srb->scsi_status;
@@ -973,13 +1019,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
&sense_hdr);
}
- if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+ if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
sense_hdr.ascq);
+ /*
+ * The Windows driver set data_transfer_length on
+ * SRB_STATUS_DATA_OVERRUN. On other errors, this value
+ * is untouched. In these cases we set it to 0.
+ */
+ if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
+ data_transfer_length = 0;
+ }
scsi_set_resid(scmnd,
- cmd_request->payload->range.len -
- vm_srb->data_transfer_length);
+ cmd_request->payload->range.len - data_transfer_length);
scmnd->scsi_done(scmnd);
@@ -1198,17 +1251,64 @@ static int storvsc_dev_remove(struct hv_device *device)
/* Close the channel */
vmbus_close(device->channel);
+ kfree(stor_device->stor_chns);
kfree(stor_device);
return 0;
}
+static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
+ u16 q_num)
+{
+ u16 slot = 0;
+ u16 hash_qnum;
+ struct cpumask alloced_mask;
+ int num_channels, tgt_cpu;
+
+ if (stor_device->num_sc == 0)
+ return stor_device->device->channel;
+
+ /*
+ * Our channel array is sparsley populated and we
+ * initiated I/O on a processor/hw-q that does not
+ * currently have a designated channel. Fix this.
+ * The strategy is simple:
+ * I. Ensure NUMA locality
+ * II. Distribute evenly (best effort)
+ * III. Mapping is persistent.
+ */
+
+ cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
+ cpumask_of_node(cpu_to_node(q_num)));
+
+ num_channels = cpumask_weight(&alloced_mask);
+ if (num_channels == 0)
+ return stor_device->device->channel;
+
+ hash_qnum = q_num;
+ while (hash_qnum >= num_channels)
+ hash_qnum -= num_channels;
+
+ for_each_cpu(tgt_cpu, &alloced_mask) {
+ if (slot == hash_qnum)
+ break;
+ slot++;
+ }
+
+ stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu];
+
+ return stor_device->stor_chns[q_num];
+}
+
+
static int storvsc_do_io(struct hv_device *device,
- struct storvsc_cmd_request *request)
+ struct storvsc_cmd_request *request, u16 q_num)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
struct vmbus_channel *outgoing_channel;
int ret = 0;
+ struct cpumask alloced_mask;
+ int tgt_cpu;
vstor_packet = &request->vstor_packet;
stor_device = get_out_stor_device(device);
@@ -1222,7 +1322,26 @@ static int storvsc_do_io(struct hv_device *device,
* Select an an appropriate channel to send the request out.
*/
- outgoing_channel = vmbus_get_outgoing_channel(device->channel);
+ if (stor_device->stor_chns[q_num] != NULL) {
+ outgoing_channel = stor_device->stor_chns[q_num];
+ if (outgoing_channel->target_cpu == smp_processor_id()) {
+ /*
+ * Ideally, we want to pick a different channel if
+ * available on the same NUMA node.
+ */
+ cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
+ cpumask_of_node(cpu_to_node(q_num)));
+ for_each_cpu(tgt_cpu, &alloced_mask) {
+ if (tgt_cpu != outgoing_channel->target_cpu) {
+ outgoing_channel =
+ stor_device->stor_chns[tgt_cpu];
+ break;
+ }
+ }
+ }
+ } else {
+ outgoing_channel = get_og_chn(stor_device, q_num);
+ }
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
@@ -1267,8 +1386,6 @@ static int storvsc_do_io(struct hv_device *device,
static int storvsc_device_configure(struct scsi_device *sdevice)
{
- blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
-
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
@@ -1451,6 +1568,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
vm_srb->win8_extension.srb_flags |=
SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
+ if (scmnd->device->tagged_supported) {
+ vm_srb->win8_extension.srb_flags |=
+ (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
+ vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
+ vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
+ }
+
/* Build the SRB */
switch (scmnd->sc_data_direction) {
case DMA_TO_DEVICE:
@@ -1511,20 +1635,14 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
page_to_pfn(sg_page((cur_sgl)));
cur_sgl = sg_next(cur_sgl);
}
-
- } else if (scsi_sglist(scmnd)) {
- payload->range.len = length;
- payload->range.offset =
- virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
- payload->range.pfn_array[0] =
- virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
cmd_request->payload = payload;
cmd_request->payload_sz = payload_sz;
/* Invokes the vsc to start an IO */
- ret = storvsc_do_io(dev, cmd_request);
+ ret = storvsc_do_io(dev, cmd_request, get_cpu());
+ put_cpu();
if (ret == -EAGAIN) {
/* no more space */
@@ -1550,6 +1668,7 @@ static struct scsi_host_template scsi_driver = {
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
.no_write_same = 1,
+ .track_queue_depth = 1,
};
enum {
@@ -1680,6 +1799,11 @@ static int storvsc_probe(struct hv_device *device,
* from the host.
*/
host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+ /*
+ * Set the number of HW queues we are supporting.
+ */
+ if (stor_device->num_sc != 0)
+ host->nr_hw_queues = stor_device->num_sc + 1;
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
@@ -1716,6 +1840,7 @@ err_out2:
goto err_out0;
err_out1:
+ kfree(stor_device->stor_chns);
kfree(stor_device);
err_out0:
@@ -1774,11 +1899,6 @@ static int __init storvsc_drv_init(void)
fc_transport_template = fc_attach_transport(&fc_transport_functions);
if (!fc_transport_template)
return -ENODEV;
-
- /*
- * Install Hyper-V specific timeout handler.
- */
- fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
#endif
ret = vmbus_driver_register(&storvsc_drv);
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 88db6992420e..e64b0c542f95 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -34,7 +34,6 @@
#include <asm/dvma.h>
#include <scsi/scsi_host.h>
-#include "sun3_scsi.h"
/* minimum number of bytes to do dma on */
#define DMA_MIN_SIZE 129
@@ -56,11 +55,87 @@
#define NCR5380_dma_send_setup sun3scsi_dma_count
#define NCR5380_dma_residual sun3scsi_dma_residual
-#define NCR5380_acquire_dma_irq(instance) (1)
-#define NCR5380_release_dma_irq(instance)
-
#include "NCR5380.h"
+/* dma regs start at regbase + 8, directly after the NCR regs */
+struct sun3_dma_regs {
+ unsigned short dma_addr_hi; /* vme only */
+ unsigned short dma_addr_lo; /* vme only */
+ unsigned short dma_count_hi; /* vme only */
+ unsigned short dma_count_lo; /* vme only */
+ unsigned short udc_data; /* udc dma data reg (obio only) */
+ unsigned short udc_addr; /* uda dma addr reg (obio only) */
+ unsigned short fifo_data; /* fifo data reg,
+ * holds extra byte on odd dma reads
+ */
+ unsigned short fifo_count;
+ unsigned short csr; /* control/status reg */
+ unsigned short bpack_hi; /* vme only */
+ unsigned short bpack_lo; /* vme only */
+ unsigned short ivect; /* vme only */
+ unsigned short fifo_count_hi; /* vme only */
+};
+
+/* ucd chip specific regs - live in dvma space */
+struct sun3_udc_regs {
+ unsigned short rsel; /* select regs to load */
+ unsigned short addr_hi; /* high word of addr */
+ unsigned short addr_lo; /* low word */
+ unsigned short count; /* words to be xfer'd */
+ unsigned short mode_hi; /* high word of channel mode */
+ unsigned short mode_lo; /* low word of channel mode */
+};
+
+/* addresses of the udc registers */
+#define UDC_MODE 0x38
+#define UDC_CSR 0x2e /* command/status */
+#define UDC_CHN_HI 0x26 /* chain high word */
+#define UDC_CHN_LO 0x22 /* chain lo word */
+#define UDC_CURA_HI 0x1a /* cur reg A high */
+#define UDC_CURA_LO 0x0a /* cur reg A low */
+#define UDC_CURB_HI 0x12 /* cur reg B high */
+#define UDC_CURB_LO 0x02 /* cur reg B low */
+#define UDC_MODE_HI 0x56 /* mode reg high */
+#define UDC_MODE_LO 0x52 /* mode reg low */
+#define UDC_COUNT 0x32 /* words to xfer */
+
+/* some udc commands */
+#define UDC_RESET 0
+#define UDC_CHN_START 0xa0 /* start chain */
+#define UDC_INT_ENABLE 0x32 /* channel 1 int on */
+
+/* udc mode words */
+#define UDC_MODE_HIWORD 0x40
+#define UDC_MODE_LSEND 0xc2
+#define UDC_MODE_LRECV 0xd2
+
+/* udc reg selections */
+#define UDC_RSEL_SEND 0x282
+#define UDC_RSEL_RECV 0x182
+
+/* bits in csr reg */
+#define CSR_DMA_ACTIVE 0x8000
+#define CSR_DMA_CONFLICT 0x4000
+#define CSR_DMA_BUSERR 0x2000
+
+#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */
+#define CSR_SDB_INT 0x200 /* sbc interrupt pending */
+#define CSR_DMA_INT 0x100 /* dma interrupt pending */
+
+#define CSR_LEFT 0xc0
+#define CSR_LEFT_3 0xc0
+#define CSR_LEFT_2 0x80
+#define CSR_LEFT_1 0x40
+#define CSR_PACK_ENABLE 0x20
+
+#define CSR_DMA_ENABLE 0x10
+
+#define CSR_SEND 0x8 /* 1 = send 0 = recv */
+#define CSR_FIFO 0x2 /* reset fifo */
+#define CSR_INTR 0x4 /* interrupt enable */
+#define CSR_SCSI 0x1
+
+#define VME_DATA24 0x3d00
extern int sun3_map_test(unsigned long, char *);
@@ -260,7 +335,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
{
int wanted_len = cmd->SCp.this_residual;
- if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
+ if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request))
return 0;
return wanted_len;
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
deleted file mode 100644
index d22745fae328..000000000000
--- a/drivers/scsi/sun3_scsi.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
- *
- * Sun3 DMA additions by Sam Creasey (sammy@sammy.net)
- *
- * Adapted from mac_scsinew.h:
- */
-/*
- * Cumana Generic NCR5380 driver defines
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 440-4894
- */
-
-#ifndef SUN3_SCSI_H
-#define SUN3_SCSI_H
-
-/* additional registers - mainly DMA control regs */
-/* these start at regbase + 8 -- directly after the NCR regs */
-struct sun3_dma_regs {
- unsigned short dma_addr_hi; /* vme only */
- unsigned short dma_addr_lo; /* vme only */
- unsigned short dma_count_hi; /* vme only */
- unsigned short dma_count_lo; /* vme only */
- unsigned short udc_data; /* udc dma data reg (obio only) */
- unsigned short udc_addr; /* uda dma addr reg (obio only) */
- unsigned short fifo_data; /* fifo data reg, holds extra byte on
- odd dma reads */
- unsigned short fifo_count;
- unsigned short csr; /* control/status reg */
- unsigned short bpack_hi; /* vme only */
- unsigned short bpack_lo; /* vme only */
- unsigned short ivect; /* vme only */
- unsigned short fifo_count_hi; /* vme only */
-};
-
-/* ucd chip specific regs - live in dvma space */
-struct sun3_udc_regs {
- unsigned short rsel; /* select regs to load */
- unsigned short addr_hi; /* high word of addr */
- unsigned short addr_lo; /* low word */
- unsigned short count; /* words to be xfer'd */
- unsigned short mode_hi; /* high word of channel mode */
- unsigned short mode_lo; /* low word of channel mode */
-};
-
-/* addresses of the udc registers */
-#define UDC_MODE 0x38
-#define UDC_CSR 0x2e /* command/status */
-#define UDC_CHN_HI 0x26 /* chain high word */
-#define UDC_CHN_LO 0x22 /* chain lo word */
-#define UDC_CURA_HI 0x1a /* cur reg A high */
-#define UDC_CURA_LO 0x0a /* cur reg A low */
-#define UDC_CURB_HI 0x12 /* cur reg B high */
-#define UDC_CURB_LO 0x02 /* cur reg B low */
-#define UDC_MODE_HI 0x56 /* mode reg high */
-#define UDC_MODE_LO 0x52 /* mode reg low */
-#define UDC_COUNT 0x32 /* words to xfer */
-
-/* some udc commands */
-#define UDC_RESET 0
-#define UDC_CHN_START 0xa0 /* start chain */
-#define UDC_INT_ENABLE 0x32 /* channel 1 int on */
-
-/* udc mode words */
-#define UDC_MODE_HIWORD 0x40
-#define UDC_MODE_LSEND 0xc2
-#define UDC_MODE_LRECV 0xd2
-
-/* udc reg selections */
-#define UDC_RSEL_SEND 0x282
-#define UDC_RSEL_RECV 0x182
-
-/* bits in csr reg */
-#define CSR_DMA_ACTIVE 0x8000
-#define CSR_DMA_CONFLICT 0x4000
-#define CSR_DMA_BUSERR 0x2000
-
-#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */
-#define CSR_SDB_INT 0x200 /* sbc interrupt pending */
-#define CSR_DMA_INT 0x100 /* dma interrupt pending */
-
-#define CSR_LEFT 0xc0
-#define CSR_LEFT_3 0xc0
-#define CSR_LEFT_2 0x80
-#define CSR_LEFT_1 0x40
-#define CSR_PACK_ENABLE 0x20
-
-#define CSR_DMA_ENABLE 0x10
-
-#define CSR_SEND 0x8 /* 1 = send 0 = recv */
-#define CSR_FIFO 0x2 /* reset fifo */
-#define CSR_INTR 0x4 /* interrupt enable */
-#define CSR_SCSI 0x1
-
-#define VME_DATA24 0x3d00
-
-#endif /* SUN3_SCSI_H */
-
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index abe617372661..ce5d023c1c91 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1497,17 +1497,21 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+ if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- else
+ } else {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+ }
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{
/* provide a legal default configuration */
- host->testbus.select_major = TSTBUS_UAWM;
- host->testbus.select_minor = 1;
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ host->testbus.select_minor = 37;
}
static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
@@ -1524,7 +1528,7 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0x1F) {
+ if (host->testbus.select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
__func__, host->testbus.select_minor);
@@ -1593,7 +1597,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
break;
case TSTBUS_UNIPRO:
reg = UFS_UNIPRO_CFG;
- offset = 1;
+ offset = 20;
+ mask = 0xFFF;
break;
/*
* No need for a default case, since
@@ -1612,6 +1617,11 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
(u32)host->testbus.select_minor << offset,
reg);
ufs_qcom_enable_test_bus(host);
+ /*
+ * Make sure the test bus configuration is
+ * committed before returning.
+ */
+ mb();
ufshcd_release(host->hba);
pm_runtime_put_sync(host->hba->dev);
@@ -1623,13 +1633,39 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba)
ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
}
+static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 *testbus = NULL;
+ int i, nminor = 256, testbus_len = nminor * sizeof(u32);
+
+ testbus = kmalloc(testbus_len, GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ kfree(testbus);
+}
+
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
+ /* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+ usleep_range(1000, 1100);
ufs_qcom_testbus_read(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_print_unipro_testbus(hba);
+ usleep_range(1000, 1100);
}
/**
@@ -1692,6 +1728,7 @@ static const struct of_device_id ufs_qcom_of_match[] = {
{ .compatible = "qcom,ufshc"},
{},
};
+MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
static const struct dev_pm_ops ufs_qcom_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index fe517cd7dac3..076f52813a4c 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -95,6 +95,7 @@ enum {
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
+#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
#define UAWM_HW_CGC_EN (1 << 0)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 8e6709a3fb6b..318e4a1f76c9 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -523,4 +523,16 @@ struct ufs_dev_info {
bool is_lu_power_on_wp;
};
+#define MAX_MODEL_LEN 16
+/**
+ * ufs_dev_desc - ufs device details from the device descriptor
+ *
+ * @wmanufacturerid: card details
+ * @model: card model
+ */
+struct ufs_dev_desc {
+ u16 wmanufacturerid;
+ char model[MAX_MODEL_LEN + 1];
+};
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 08b799d4efcc..71f73d1d1ad1 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -21,41 +21,28 @@
#define UFS_ANY_VENDOR 0xFFFF
#define UFS_ANY_MODEL "ANY_MODEL"
-#define MAX_MODEL_LEN 16
-
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
#define UFS_VENDOR_SKHYNIX 0x1AD
/**
- * ufs_device_info - ufs device details
- * @wmanufacturerid: card details
- * @model: card model
- */
-struct ufs_device_info {
- u16 wmanufacturerid;
- char model[MAX_MODEL_LEN + 1];
-};
-
-/**
* ufs_dev_fix - ufs device quirk info
* @card: ufs card details
* @quirk: device quirk
*/
struct ufs_dev_fix {
- struct ufs_device_info card;
+ struct ufs_dev_desc card;
unsigned int quirk;
};
#define END_FIX { { 0 }, 0 }
/* add specific device quirk */
-#define UFS_FIX(_vendor, _model, _quirk) \
- { \
- .card.wmanufacturerid = (_vendor),\
- .card.model = (_model), \
- .quirk = (_quirk), \
- }
+#define UFS_FIX(_vendor, _model, _quirk) { \
+ .card.wmanufacturerid = (_vendor),\
+ .card.model = (_model), \
+ .quirk = (_quirk), \
+}
/*
* If UFS device is having issue in processing LCC (Line Control
@@ -144,7 +131,4 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
-struct ufs_hba;
-void ufs_advertise_fixup_device(struct ufs_hba *hba);
-
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 20e5e5fb048c..8b721f431dd0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,9 @@
#include "ufs_quirks.h"
#include "unipro.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
#define UFSHCD_REQ_SENSE_SIZE 18
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
@@ -94,6 +97,9 @@
_ret; \
})
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
static u32 ufs_query_desc_max_size[] = {
QUERY_DESC_DEVICE_MAX_SIZE,
QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@ -185,6 +191,22 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
return ufs_pm_lvl_states[lvl].link_state;
}
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ enum uic_link_state link_state)
+{
+ enum ufs_pm_level lvl;
+
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+ if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+ (ufs_pm_lvl_states[lvl].link_state == link_state))
+ return lvl;
+ }
+
+ /* if no match found, return the level 0 */
+ return UFS_PM_LVL_0;
+}
+
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
@@ -212,6 +234,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba);
@@ -223,6 +246,10 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode);
@@ -267,6 +294,214 @@ static inline void ufshcd_remove_non_printable(char *val)
*val = ' ';
}
+static void ufshcd_add_command_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
+{
+ sector_t lba = -1;
+ u8 opcode = 0;
+ u32 intr, doorbell;
+ struct ufshcd_lrb *lrbp;
+ int transfer_len = -1;
+
+ if (!trace_ufshcd_command_enabled())
+ return;
+
+ lrbp = &hba->lrb[tag];
+
+ if (lrbp->cmd) { /* data phase exists */
+ opcode = (u8)(*lrbp->cmd->cmnd);
+ if ((opcode == READ_10) || (opcode == WRITE_10)) {
+ /*
+ * Currently we only fully trace read(10) and write(10)
+ * commands
+ */
+ if (lrbp->cmd->request && lrbp->cmd->request->bio)
+ lba =
+ lrbp->cmd->request->bio->bi_iter.bi_sector;
+ transfer_len = be32_to_cpu(
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ }
+ }
+
+ intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ trace_ufshcd_command(dev_name(hba->dev), str, tag,
+ doorbell, transfer_len, intr, lba, opcode);
+}
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!head || list_empty(head))
+ return;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+ clki->max_freq)
+ dev_err(hba->dev, "clk: %s, rate: %u\n",
+ clki->name, clki->curr_freq);
+ }
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+ struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+ int i;
+
+ for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+ int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+ if (err_hist->reg[p] == 0)
+ continue;
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
+ err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ }
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+ /*
+ * hex_dump reads its data without the readl macro. This might
+ * cause inconsistency issues on some platform, as the printed
+ * values may be from cache and not the most recent value.
+ * To know whether you are looking at an un-cached version verify
+ * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+ * during platform/pci probe function.
+ */
+ ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
+ hba->ufs_version, hba->capabilities);
+ dev_err(hba->dev,
+ "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
+ (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+ dev_err(hba->dev,
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ hba->ufs_stats.hibern8_exit_cnt);
+
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+ ufshcd_print_clk_freqs(hba);
+
+ if (hba->vops && hba->vops->dbg_register_dump)
+ hba->vops->dbg_register_dump(hba);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+ struct ufshcd_lrb *lrbp;
+ int prdt_length;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
+ tag, ktime_to_us(lrbp->issue_time_stamp));
+ dev_err(hba->dev,
+ "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
+ tag, (u64)lrbp->utrd_dma_addr);
+
+ ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+ sizeof(struct utp_transfer_req_desc));
+ dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_req_dma_addr);
+ ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
+ (u64)lrbp->ucd_rsp_dma_addr);
+ ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+ sizeof(struct utp_upiu_rsp));
+
+ prdt_length = le16_to_cpu(
+ lrbp->utr_descriptor_ptr->prd_table_length);
+ dev_err(hba->dev,
+ "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
+ tag, prdt_length,
+ (u64)lrbp->ucd_prdt_dma_addr);
+
+ if (pr_prdt)
+ ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+ sizeof(struct ufshcd_sg_entry) * prdt_length);
+ }
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct utp_task_req_desc *tmrdp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+ tmrdp = &hba->utmrdl_base_addr[tag];
+ dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
+ ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+ sizeof(struct request_desc_header));
+ dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
+ tag);
+ ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+ sizeof(struct utp_task_req_desc));
+ }
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+ dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+ dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+ dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
+ hba->saved_err, hba->saved_uic_err);
+ dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+ hba->pm_op_in_progress, hba->is_sys_suspended);
+ dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+ hba->auto_bkops_enabled, hba->host->host_self_blocked);
+ dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
+ dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+ hba->eh_flags, hba->req_abort_count);
+ dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+ hba->capabilities, hba->caps);
+ dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+ hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+ static const char * const names[] = {
+ "INVALID MODE",
+ "FAST MODE",
+ "SLOW_MODE",
+ "INVALID MODE",
+ "FASTAUTO_MODE",
+ "SLOWAUTO_MODE",
+ "INVALID MODE",
+ };
+
+ dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+ __func__,
+ hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+ hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+ names[hba->pwr_info.pwr_rx],
+ names[hba->pwr_info.pwr_tx],
+ hba->pwr_info.hs_rate);
+}
+
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
@@ -605,6 +840,28 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
}
+static const char *ufschd_uic_link_state_to_string(
+ enum uic_link_state state)
+{
+ switch (state) {
+ case UIC_LINK_OFF_STATE: return "OFF";
+ case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
+ case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufschd_ufs_dev_pwr_mode_to_string(
+ enum ufs_dev_pwr_mode state)
+{
+ switch (state) {
+ case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
+ case UFS_SLEEP_PWR_MODE: return "SLEEP";
+ case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
+ default: return "UNKNOWN";
+ }
+}
+
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
@@ -633,20 +890,523 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled up", clki->name,
+ clki->curr_freq,
+ clki->max_freq);
+
+ clki->curr_freq = clki->max_freq;
+
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+
+ clk_state_changed = true;
+ ret = clk_set_rate(clki->clk, clki->min_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->min_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled down", clki->name,
+ clki->curr_freq,
+ clki->min_freq);
+ clki->curr_freq = clki->min_freq;
+ }
+ }
+ dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+ return ret;
+}
+
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+ bool scale_up)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!head || list_empty(head))
+ return false;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+ return true;
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+ u64 wait_timeout_us)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 tm_doorbell;
+ u32 tr_doorbell;
+ bool timeout = false, do_last_check = false;
+ ktime_t start;
+
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * Wait for all the outstanding tasks/transfer requests.
+ * Verify by checking the doorbell registers are clear.
+ */
+ start = ktime_get();
+ do {
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!tm_doorbell && !tr_doorbell) {
+ timeout = false;
+ break;
+ } else if (do_last_check) {
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ schedule();
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+ wait_timeout_us) {
+ timeout = true;
+ /*
+ * We might have scheduled out for long time so make
+ * sure to check if doorbells are cleared by this time
+ * or not.
+ */
+ do_last_check = true;
+ }
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (tm_doorbell || tr_doorbell);
+
+ if (timeout) {
+ dev_err(hba->dev,
+ "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+ __func__, tm_doorbell, tr_doorbell);
+ ret = -EBUSY;
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+ #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
+ int ret = 0;
+ struct ufs_pa_layer_attr new_pwr_info;
+
+ if (scale_up) {
+ memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+ sizeof(struct ufs_pa_layer_attr));
+ } else {
+ memcpy(&new_pwr_info, &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
+ || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
+ /* save the current power mode */
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ /* scale down gear */
+ new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ }
+ }
+
+ /* check if the power mode needs to be changed or not? */
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+
+ if (ret)
+ dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
+ __func__, ret,
+ hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+ new_pwr_info.gear_tx, new_pwr_info.gear_rx);
+
+ return ret;
+}
+
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+ #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
+ int ret = 0;
+ /*
+ * make sure that there are no outstanding requests when
+ * clock scaling is in progress
+ */
+ scsi_block_requests(hba->host);
+ down_write(&hba->clk_scaling_lock);
+ if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
+ scsi_unblock_requests(hba->host);
+ }
+
+ return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+ up_write(&hba->clk_scaling_lock);
+ scsi_unblock_requests(hba->host);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+ return ret;
+
+ /* scale down the gear before scaling down clocks */
+ if (!scale_up) {
+ ret = ufshcd_scale_gear(hba, false);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_scale_clks(hba, scale_up);
+ if (ret) {
+ if (!scale_up)
+ ufshcd_scale_gear(hba, true);
+ goto out;
+ }
+
+ /* scale up the gear after scaling up clocks */
+ if (scale_up) {
+ ret = ufshcd_scale_gear(hba, true);
+ if (ret) {
+ ufshcd_scale_clks(hba, false);
+ goto out;
+ }
+ }
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+ ufshcd_clock_scaling_unprepare(hba);
+ ufshcd_release(hba);
+ return ret;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.suspend_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = true;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.resume_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (!hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = false;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ devfreq_resume_device(hba->devfreq);
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ ktime_t start;
+ bool scale_up, sched_clk_scaling_suspend_work = false;
+ unsigned long irq_flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ if ((*freq > 0) && (*freq < UINT_MAX)) {
+ dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
+
+ scale_up = (*freq == UINT_MAX) ? true : false;
+ if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ ret = 0;
+ goto out; /* no state change required */
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ start = ktime_get();
+ ret = ufshcd_devfreq_scale(hba, scale_up);
+
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+out:
+ if (sched_clk_scaling_suspend_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.suspend_work);
+
+ return ret;
+}
+
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return -EINVAL;
+
+ memset(stat, 0, sizeof(*stat));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!scaling->window_start_t)
+ goto start_window;
+
+ if (scaling->is_busy_started)
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+
+ stat->total_time = jiffies_to_usecs((long)jiffies -
+ (long)scaling->window_start_t);
+ stat->busy_time = scaling->tot_busy_t;
+start_window:
+ scaling->window_start_t = jiffies;
+ scaling->tot_busy_t = 0;
+
+ if (hba->outstanding_reqs) {
+ scaling->busy_start_t = ktime_get();
+ scaling->is_busy_started = true;
+ } else {
+ scaling->busy_start_t = 0;
+ scaling->is_busy_started = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 100,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ devfreq_suspend_device(hba->devfreq);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.window_start_t = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
+ unsigned long flags;
+ bool suspend = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (suspend)
+ __ufshcd_suspend_clkscaling(hba);
}
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
- if (ufshcd_is_clkscaling_enabled(hba))
+ unsigned long flags;
+ bool resume = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (resume)
devfreq_resume_device(hba->devfreq);
}
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_scaling.is_allowed)
+ goto out;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+
+ hba->clk_scaling.is_allowed = value;
+
+ if (value) {
+ ufshcd_resume_clkscaling(hba);
+ } else {
+ ufshcd_suspend_clkscaling(hba);
+ err = ufshcd_devfreq_scale(hba, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ }
+
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+out:
+ return count;
+}
+
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+ hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+ hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+ sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+ hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+ hba->clk_scaling.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -680,7 +1440,6 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- ufshcd_resume_clkscaling(hba);
scsi_unblock_requests(hba->host);
}
@@ -727,6 +1486,8 @@ start:
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
break;
}
/*
@@ -737,6 +1498,8 @@ start:
case CLKS_OFF:
scsi_block_requests(hba->host);
hba->clk_gating.state = REQ_CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
schedule_work(&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
@@ -781,6 +1544,8 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.is_suspended ||
(hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
goto rel_lock;
}
@@ -796,13 +1561,13 @@ static void ufshcd_gate_work(struct work_struct *work)
if (ufshcd_can_hibern8_during_gating(hba)) {
if (ufshcd_uic_hibern8_enter(hba)) {
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
goto out;
}
ufshcd_set_link_hibern8(hba);
}
- ufshcd_suspend_clkscaling(hba);
-
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -819,9 +1584,11 @@ static void ufshcd_gate_work(struct work_struct *work)
* new requests arriving before the current cancel work is done.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == REQ_CLKS_OFF)
+ if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
-
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
rel_lock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
@@ -844,6 +1611,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
return;
hba->clk_gating.state = REQ_CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
schedule_delayed_work(&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
}
@@ -881,6 +1649,41 @@ static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
return count;
}
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 value;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_gating.is_enabled)
+ goto out;
+
+ if (value) {
+ ufshcd_release(hba);
+ } else {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ hba->clk_gating.is_enabled = value;
+out:
+ return count;
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
if (!ufshcd_is_clkgating_allowed(hba))
@@ -890,13 +1693,23 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+ hba->clk_gating.is_enabled = true;
+
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
- hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ hba->clk_gating.delay_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+ hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+ hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+ hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+ hba->clk_gating.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
@@ -904,6 +1717,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba))
return;
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
}
@@ -911,9 +1725,27 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
/* Must be called with host lock acquired */
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkscaling_enabled(hba))
+ bool queue_resume_work = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->clk_scaling.active_reqs++)
+ queue_resume_work = true;
+
+ if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
return;
+ if (queue_resume_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.resume_work);
+
+ if (!hba->clk_scaling.window_start_t) {
+ hba->clk_scaling.window_start_t = jiffies;
+ hba->clk_scaling.tot_busy_t = 0;
+ hba->clk_scaling.is_busy_started = false;
+ }
+
if (!hba->clk_scaling.is_busy_started) {
hba->clk_scaling.busy_start_t = ktime_get();
hba->clk_scaling.is_busy_started = true;
@@ -924,7 +1756,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- if (!ufshcd_is_clkscaling_enabled(hba))
+ if (!ufshcd_is_clkscaling_supported(hba))
return;
if (!hba->outstanding_reqs && scaling->is_busy_started) {
@@ -942,11 +1774,13 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
+ hba->lrb[task_tag].issue_time_stamp = ktime_get();
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
+ ufshcd_add_command_trace(hba, task_tag, "send");
}
/**
@@ -1484,6 +2318,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
BUG();
}
+ if (!down_read_trylock(&hba->clk_scaling_lock))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
spin_lock_irqsave(hba->host->host_lock, flags);
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
@@ -1512,6 +2349,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ hba->req_abort_count = 0;
+
/* acquire the tag to make sure device cmds don't use it */
if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
/*
@@ -1541,6 +2380,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
+ lrbp->req_abort_skip = false;
ufshcd_comp_scsi_upiu(hba, lrbp);
@@ -1560,6 +2400,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
+ up_read(&hba->clk_scaling_lock);
return err;
}
@@ -1622,6 +2463,7 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
int resp;
int err = 0;
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
switch (resp) {
@@ -1748,6 +2590,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
struct completion wait;
unsigned long flags;
+ down_read(&hba->clk_scaling_lock);
+
/*
* Get free slot, sleep if slots are unavailable.
* Even though we use wait_event() which sleeps indefinitely,
@@ -1776,6 +2620,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
+ up_read(&hba->clk_scaling_lock);
return err;
}
@@ -2073,9 +2918,11 @@ out:
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*/
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum desc_idn idn, u8 index,
- u8 selector, u8 *desc_buf, int *buf_len)
+static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len)
{
int err;
int retries;
@@ -2089,7 +2936,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
return err;
}
-EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
/**
* ufshcd_read_desc_param - read the specified descriptor parameter
@@ -2207,11 +3053,10 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
return err;
}
-int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
{
return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
}
-EXPORT_SYMBOL(ufshcd_read_device_desc);
/**
* ufshcd_read_string_desc - read string descriptor
@@ -2223,8 +3068,9 @@ EXPORT_SYMBOL(ufshcd_read_device_desc);
*
* Return 0 in case of success, non-zero otherwise
*/
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
- u32 size, bool ascii)
+#define ASCII_STD true
+static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
+ u8 *buf, u32 size, bool ascii)
{
int err = 0;
@@ -2280,7 +3126,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
out:
return err;
}
-EXPORT_SYMBOL(ufshcd_read_string_desc);
/**
* ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
@@ -2453,12 +3298,19 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
}
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+ (i * sizeof(struct utp_transfer_req_desc));
hba->lrb[i].ucd_req_ptr =
(struct utp_upiu_req *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
hba->lrb[i].ucd_rsp_ptr =
(struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+ response_offset;
hba->lrb[i].ucd_prdt_ptr =
(struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+ prdt_offset;
}
}
@@ -2482,7 +3334,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
- dev_err(hba->dev,
+ dev_dbg(hba->dev,
"dme-link-startup: error code %d\n", ret);
return ret;
}
@@ -2702,6 +3554,12 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
ret = (status != PWR_OK) ? status : -1;
}
out:
+ if (ret) {
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
+
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
@@ -2776,11 +3634,14 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
int ret;
struct uic_command uic_cmd = {0};
+ ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
@@ -2816,18 +3677,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
+ ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
ret = ufshcd_link_recovery(hba);
- } else
+ } else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.hibern8_exit_cnt++;
+ }
return ret;
}
@@ -2994,6 +3862,8 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_print_pwr_info(hba);
return ret;
}
@@ -3265,6 +4135,10 @@ link_startup:
goto link_startup;
}
+ /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+ ufshcd_init_pwr_info(hba);
+ ufshcd_print_pwr_info(hba);
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
@@ -3278,8 +4152,12 @@ link_startup:
ret = ufshcd_make_hba_operational(hba);
out:
- if (ret)
+ if (ret) {
dev_err(hba->dev, "link startup failed %d\n", ret);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
return ret;
}
@@ -3591,7 +4469,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
switch (ocs) {
case OCS_SUCCESS:
result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
switch (result) {
case UPIU_TRANSACTION_RESPONSE:
/*
@@ -3652,10 +4530,15 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
default:
result |= DID_ERROR << 16;
dev_err(hba->dev,
- "OCS error from controller = %x\n", ocs);
+ "OCS error from controller = %x for tag %d\n",
+ ocs, lrbp->task_tag);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
break;
} /* end of switch */
+ if (host_byte(result) != DID_OK)
+ ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -3695,6 +4578,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp = &hba->lrb[index];
cmd = lrbp->cmd;
if (cmd) {
+ ufshcd_add_command_trace(hba, index, "complete");
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
@@ -3706,9 +4590,16 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
__ufshcd_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete)
+ if (hba->dev_cmd.complete) {
+ ufshcd_add_command_trace(hba, index,
+ "dev_complete");
complete(hba->dev_cmd.complete);
+ }
}
+ if (ufshcd_is_clkscaling_supported(hba))
+ hba->clk_scaling.active_reqs--;
+ if (ufshcd_is_clkscaling_supported(hba))
+ hba->clk_scaling.active_reqs--;
}
/* clear corresponding bits of completed commands */
@@ -3828,6 +4719,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = true;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -3878,23 +4770,31 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = false;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
out:
return err;
}
/**
- * ufshcd_force_reset_auto_bkops - force enable of auto bkops
+ * ufshcd_force_reset_auto_bkops - force reset auto bkops state
* @hba: per adapter instance
*
* After a device reset the device may toggle the BKOPS_EN flag
* to default value. The s/w tracking variables should be updated
- * as well. Do this by forcing enable of auto bkops.
+ * as well. This function would change the auto-bkops state based on
+ * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
*/
-static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
{
- hba->auto_bkops_enabled = false;
- hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
- ufshcd_enable_auto_bkops(hba);
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
+ hba->auto_bkops_enabled = false;
+ hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+ ufshcd_enable_auto_bkops(hba);
+ } else {
+ hba->auto_bkops_enabled = true;
+ hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
+ ufshcd_disable_auto_bkops(hba);
+ }
}
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -4246,6 +5146,14 @@ out:
pm_runtime_put_sync(hba->dev);
}
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+ u32 reg)
+{
+ reg_hist->reg[reg_hist->pos] = reg;
+ reg_hist->tstamp[reg_hist->pos] = ktime_get();
+ reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
@@ -4258,15 +5166,20 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
/* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+ }
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+ if (reg)
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
else if (hba->dev_quirks &
@@ -4280,16 +5193,22 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg)
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ }
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg)
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ }
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg)
+ if (reg) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ }
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
@@ -4327,6 +5246,22 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
scsi_block_requests(hba->host);
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
+
+ /* dump controller state before resetting */
+ if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+ bool pr_prdt = !!(hba->saved_err &
+ SYSTEM_BUS_FATAL_ERROR);
+
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
+ __func__, hba->saved_err,
+ hba->saved_uic_err);
+
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_print_trs(hba, hba->outstanding_reqs,
+ pr_prdt);
+ }
schedule_work(&hba->eh_work);
}
}
@@ -4557,7 +5492,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(host->host_lock, flags);
ufshcd_transfer_req_compl(hba);
spin_unlock_irqrestore(host->host_lock, flags);
+
out:
+ hba->req_abort_count = 0;
if (!err) {
err = SUCCESS;
} else {
@@ -4567,6 +5504,17 @@ out:
return err;
}
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+ lrbp->req_abort_skip = true;
+ }
+}
+
/**
* ufshcd_abort - abort a specific command
* @cmd: SCSI command pointer
@@ -4594,6 +5542,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
host = cmd->device->host;
hba = shost_priv(host);
tag = cmd->request->tag;
+ lrbp = &hba->lrb[tag];
if (!ufshcd_valid_tag(hba, tag)) {
dev_err(hba->dev,
"%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
@@ -4601,6 +5550,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
BUG();
}
+ /*
+ * Task abort to the device W-LUN is illegal. When this command
+ * will fail, due to spec violation, scsi err handling next step
+ * will be to send LU reset which, again, is a spec violation.
+ * To avoid these unnecessary/illegal step we skip to the last error
+ * handling stage: reset and restore.
+ */
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+ return ufshcd_eh_host_reset_handler(cmd);
+
ufshcd_hold(hba, false);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return SUCCESS */
@@ -4617,18 +5576,48 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
__func__, tag);
}
- lrbp = &hba->lrb[tag];
+ /* Print Transfer Request of aborted task */
+ dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+
+ /*
+ * Print detailed info about aborted request.
+ * As more than one request might get aborted at the same time,
+ * print full information only for the first aborted request in order
+ * to reduce repeated printouts. For other aborted requests only print
+ * basic details.
+ */
+ scsi_print_command(hba->lrb[tag].cmd);
+ if (!hba->req_abort_count) {
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_trs(hba, 1 << tag, true);
+ } else {
+ ufshcd_print_trs(hba, 1 << tag, false);
+ }
+ hba->req_abort_count++;
+
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ err = -EIO;
+ goto out;
+ }
+
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_QUERY_TASK, &resp);
if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
/* cmd pending in the device */
+ dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
+ __func__, tag);
break;
} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
/*
* cmd not pending in the device, check if it is
* in transition.
*/
+ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ __func__, tag);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (reg & (1 << tag)) {
/* sleep for max. 200us to stabilize */
@@ -4636,8 +5625,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
continue;
}
/* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+ __func__, tag);
goto out;
} else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d\n",
+ __func__, tag, err);
if (!err)
err = resp; /* service response error */
goto out;
@@ -4652,14 +5646,20 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_ABORT_TASK, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err)
+ if (!err) {
err = resp; /* service response error */
+ dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
+ __func__, tag, err);
+ }
goto out;
}
err = ufshcd_clear_cmd(hba, tag);
- if (err)
+ if (err) {
+ dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
+ __func__, tag, err);
goto out;
+ }
scsi_dma_unmap(cmd);
@@ -4676,6 +5676,7 @@ out:
err = SUCCESS;
} else {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
err = FAILED;
}
@@ -4707,6 +5708,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshcd_hba_stop(hba, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* scale up clocks to max frequency before full reinitialization */
+ ufshcd_scale_clks(hba, true);
+
err = ufshcd_hba_enable(hba);
if (err)
goto out;
@@ -4822,7 +5826,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
u16 unit;
for (i = start_scan; i >= 0; i--) {
- data = be16_to_cpu(*((u16 *)(buff + 2*i)));
+ data = be16_to_cpup((__be16 *)&buff[2 * i]);
unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
ATTR_ICC_LVL_UNIT_OFFSET;
curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
@@ -5008,8 +6012,8 @@ out:
return ret;
}
-static int ufs_get_device_info(struct ufs_hba *hba,
- struct ufs_device_info *card_data)
+static int ufs_get_device_desc(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
{
int err;
u8 model_index;
@@ -5028,7 +6032,7 @@ static int ufs_get_device_info(struct ufs_hba *hba,
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
- card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -5042,36 +6046,26 @@ static int ufs_get_device_info(struct ufs_hba *hba,
}
str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
- strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+ strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
/* Null terminate the model string */
- card_data->model[MAX_MODEL_LEN] = '\0';
+ dev_desc->model[MAX_MODEL_LEN] = '\0';
out:
return err;
}
-void ufs_advertise_fixup_device(struct ufs_hba *hba)
+static void ufs_fixup_device_setup(struct ufs_hba *hba,
+ struct ufs_dev_desc *dev_desc)
{
- int err;
struct ufs_dev_fix *f;
- struct ufs_device_info card_data;
-
- card_data.wmanufacturerid = 0;
-
- err = ufs_get_device_info(hba, &card_data);
- if (err) {
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
- __func__, err);
- return;
- }
for (f = ufs_fixups; f->quirk; f++) {
- if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
- (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
- (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
+ if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
+ f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
+ (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
!strcmp(f->card.model, UFS_ANY_MODEL)))
hba->dev_quirks |= f->quirk;
}
@@ -5241,6 +6235,22 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
ufshcd_vops_apply_dev_quirks(hba);
}
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+ int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+ hba->ufs_stats.hibern8_exit_cnt = 0;
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+
+ memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+ hba->req_abort_count = 0;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -5249,18 +6259,21 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
*/
static int ufshcd_probe_hba(struct ufs_hba *hba)
{
+ struct ufs_dev_desc card = {0};
int ret;
+ ktime_t start = ktime_get();
ret = ufshcd_link_startup(hba);
if (ret)
goto out;
- ufshcd_init_pwr_info(hba);
-
/* set the default level for urgent bkops */
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
hba->is_urgent_bkops_lvl_checked = false;
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+
/* UniPro link is active now */
ufshcd_set_link_active(hba);
@@ -5272,7 +6285,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
- ufs_advertise_fixup_device(hba);
+ ret = ufs_get_device_desc(hba, &card);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ufs_fixup_device_setup(hba, &card);
ufshcd_tune_unipro_params(hba);
ret = ufshcd_set_vccq_rail_unused(hba,
@@ -5320,6 +6340,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ufshcd_scsi_add_wlus(hba))
goto out;
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ if (!hba->devfreq) {
+ hba->devfreq = devm_devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+ "simple_ondemand",
+ NULL);
+ if (IS_ERR(hba->devfreq)) {
+ ret = PTR_ERR(hba->devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n",
+ ret);
+ goto out;
+ }
+ }
+ hba->clk_scaling.is_allowed = true;
+ }
+
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
}
@@ -5327,9 +6368,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (!hba->is_init_prefetch)
hba->is_init_prefetch = true;
- /* Resume devfreq after UFS device is detected */
- ufshcd_resume_clkscaling(hba);
-
out:
/*
* If we failed to initialize the device or the device is not
@@ -5340,6 +6378,9 @@ out:
ufshcd_hba_exit(hba);
}
+ trace_ufshcd_init(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
@@ -5650,6 +6691,8 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
unsigned long flags;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
if (!head || list_empty(head))
goto out;
@@ -5663,6 +6706,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
continue;
+ clk_state_changed = on ^ clki->enabled;
if (on && !clki->enabled) {
ret = clk_prepare_enable(clki->clk);
if (ret) {
@@ -5689,11 +6733,18 @@ out:
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (on) {
+ } else if (!ret && on) {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
+
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ (on ? "on" : "off"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
}
@@ -5835,6 +6886,11 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
ufshcd_suspend_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ if (hba->devfreq)
+ ufshcd_suspend_clkscaling(hba);
+ destroy_workqueue(hba->clk_scaling.workq);
+ }
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -6110,7 +7166,11 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
- ufshcd_suspend_clkscaling(hba);
+ if (hba->clk_scaling.is_allowed) {
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ ufshcd_suspend_clkscaling(hba);
+ }
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -6176,6 +7236,7 @@ disable_clks:
__ufshcd_setup_clocks(hba, false, true);
hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
/*
* Disable the host irq as host controller as there won't be any
* host controller transaction expected till resume.
@@ -6186,7 +7247,8 @@ disable_clks:
goto out;
set_link_active:
- ufshcd_resume_clkscaling(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
@@ -6196,7 +7258,8 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
- ufshcd_resume_clkscaling(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
out:
@@ -6268,14 +7331,19 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto set_old_link_state;
}
- /*
- * If BKOPs operations are urgently needed at this moment then
- * keep auto-bkops enabled or else disable it.
- */
- ufshcd_urgent_bkops(hba);
+ if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
+ ufshcd_enable_auto_bkops(hba);
+ else
+ /*
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
+ */
+ ufshcd_urgent_bkops(hba);
+
hba->clk_gating.is_suspended = false;
- ufshcd_resume_clkscaling(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -6289,7 +7357,8 @@ disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
- ufshcd_suspend_clkscaling(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
out:
hba->pm_op_in_progress = 0;
@@ -6308,6 +7377,7 @@ out:
int ufshcd_system_suspend(struct ufs_hba *hba)
{
int ret = 0;
+ ktime_t start = ktime_get();
if (!hba || !hba->is_powered)
return 0;
@@ -6334,6 +7404,9 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
out:
+ trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = true;
return ret;
@@ -6349,6 +7422,9 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
if (!hba)
return -EINVAL;
@@ -6357,9 +7433,14 @@ int ufshcd_system_resume(struct ufs_hba *hba)
* Let the runtime resume take care of resuming
* if runtime suspended.
*/
- return 0;
-
- return ufshcd_resume(hba, UFS_SYSTEM_PM);
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -6373,13 +7454,21 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
if (!hba)
return -EINVAL;
if (!hba->is_powered)
- return 0;
-
- return ufshcd_suspend(hba, UFS_RUNTIME_PM);
+ goto out;
+ else
+ ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_suspend);
@@ -6406,13 +7495,21 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
if (!hba)
return -EINVAL;
if (!hba->is_powered)
- return 0;
-
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -6422,6 +7519,127 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
}
EXPORT_SYMBOL(ufshcd_runtime_idle);
+static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ bool rpm)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (rpm)
+ hba->rpm_lvl = value;
+ else
+ hba->spm_lvl = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int curr_len;
+ u8 lvl;
+
+ curr_len = snprintf(buf, PAGE_SIZE,
+ "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
+ hba->rpm_lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[hba->rpm_lvl].link_state));
+
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\nAll available Runtime PM levels info:\n");
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
+ lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[lvl].link_state));
+
+ return curr_len;
+}
+
+static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
+}
+
+static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+ hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
+ hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
+ sysfs_attr_init(&hba->rpm_lvl_attr.attr);
+ hba->rpm_lvl_attr.attr.name = "rpm_lvl";
+ hba->rpm_lvl_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
+ dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
+}
+
+static ssize_t ufshcd_spm_lvl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int curr_len;
+ u8 lvl;
+
+ curr_len = snprintf(buf, PAGE_SIZE,
+ "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
+ hba->spm_lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[hba->spm_lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[hba->spm_lvl].link_state));
+
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\nAll available System PM levels info:\n");
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
+ lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[lvl].link_state));
+
+ return curr_len;
+}
+
+static ssize_t ufshcd_spm_lvl_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
+}
+
+static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+ hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
+ hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
+ sysfs_attr_init(&hba->spm_lvl_attr.attr);
+ hba->spm_lvl_attr.attr.name = "spm_lvl";
+ hba->spm_lvl_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->spm_lvl_attr))
+ dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
+}
+
+static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
+{
+ ufshcd_add_rpm_lvl_sysfs_nodes(hba);
+ ufshcd_add_spm_lvl_sysfs_nodes(hba);
+}
+
/**
* ufshcd_shutdown - shutdown routine
* @hba: per adapter instance
@@ -6465,6 +7683,8 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_hba_stop(hba, true);
ufshcd_exit_clk_gating(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6531,149 +7751,6 @@ out_error:
}
EXPORT_SYMBOL(ufshcd_alloc_host);
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
-{
- int ret = 0;
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
-
- if (!head || list_empty(head))
- goto out;
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
- if (ret)
- return ret;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk)) {
- if (scale_up && clki->max_freq) {
- if (clki->curr_freq == clki->max_freq)
- continue;
- ret = clk_set_rate(clki->clk, clki->max_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->max_freq, ret);
- break;
- }
- clki->curr_freq = clki->max_freq;
-
- } else if (!scale_up && clki->min_freq) {
- if (clki->curr_freq == clki->min_freq)
- continue;
- ret = clk_set_rate(clki->clk, clki->min_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->min_freq, ret);
- break;
- }
- clki->curr_freq = clki->min_freq;
- }
- }
- dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
- clki->name, clk_get_rate(clki->clk));
- }
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
-
-out:
- return ret;
-}
-
-static int ufshcd_devfreq_target(struct device *dev,
- unsigned long *freq, u32 flags)
-{
- int err = 0;
- struct ufs_hba *hba = dev_get_drvdata(dev);
- bool release_clk_hold = false;
- unsigned long irq_flags;
-
- if (!ufshcd_is_clkscaling_enabled(hba))
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
-
- if (ufshcd_is_clkgating_allowed(hba) &&
- (hba->clk_gating.state != CLKS_ON)) {
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
- /* hold the vote until the scaling work is completed */
- hba->clk_gating.active_reqs++;
- release_clk_hold = true;
- hba->clk_gating.state = CLKS_ON;
- } else {
- /*
- * Clock gating work seems to be running in parallel
- * hence skip scaling work to avoid deadlock between
- * current scaling work and gating work.
- */
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
- }
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- if (*freq == UINT_MAX)
- err = ufshcd_scale_clks(hba, true);
- else if (*freq == 0)
- err = ufshcd_scale_clks(hba, false);
-
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (release_clk_hold)
- __ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- return err;
-}
-
-static int ufshcd_devfreq_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
-
- if (!ufshcd_is_clkscaling_enabled(hba))
- return -EINVAL;
-
- memset(stat, 0, sizeof(*stat));
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!scaling->window_start_t)
- goto start_window;
-
- if (scaling->is_busy_started)
- scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
- scaling->busy_start_t));
-
- stat->total_time = jiffies_to_usecs((long)jiffies -
- (long)scaling->window_start_t);
- stat->busy_time = scaling->tot_busy_t;
-start_window:
- scaling->window_start_t = jiffies;
- scaling->tot_busy_t = 0;
-
- if (hba->outstanding_reqs) {
- scaling->busy_start_t = ktime_get();
- scaling->is_busy_started = true;
- } else {
- scaling->busy_start_t = 0;
- scaling->is_busy_started = false;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return 0;
-}
-
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 100,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
-
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
@@ -6757,6 +7834,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize mutex for device management commands */
mutex_init(&hba->dev_cmd.lock);
+ init_rwsem(&hba->clk_scaling_lock);
+
/* Initialize device management tag acquire wait queue */
init_waitqueue_head(&hba->dev_cmd.tag_wq);
@@ -6795,22 +7874,38 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
goto out_remove_scsi_host;
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
- "simple_ondemand", NULL);
- if (IS_ERR(hba->devfreq)) {
- dev_err(hba->dev, "Unable to register with devfreq %ld\n",
- PTR_ERR(hba->devfreq));
- err = PTR_ERR(hba->devfreq);
- goto out_remove_scsi_host;
- }
- /* Suspend devfreq until the UFS device is detected */
- ufshcd_suspend_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
+ host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
}
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
@@ -6823,6 +7918,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
+ ufshcd_add_sysfs_nodes(hba);
return 0;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 08cd26ed2382..7630600217a2 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -45,6 +45,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/rwsem.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -152,6 +153,10 @@ struct ufs_pm_lvl_states {
* @ucd_req_ptr: UCD address of the command
* @ucd_rsp_ptr: Response UPIU address for this command
* @ucd_prdt_ptr: PRDT address of the command
+ * @utrd_dma_addr: UTRD dma address for debug
+ * @ucd_prdt_dma_addr: PRDT dma address for debug
+ * @ucd_rsp_dma_addr: UPIU response dma address for debug
+ * @ucd_req_dma_addr: UPIU request dma address for debug
* @cmd: pointer to SCSI command
* @sense_buffer: pointer to sense buffer address of the SCSI command
* @sense_bufflen: Length of the sense buffer
@@ -160,6 +165,8 @@ struct ufs_pm_lvl_states {
* @task_tag: Task tag of the command
* @lun: LUN of the command
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @issue_time_stamp: time stamp for debug purposes
+ * @req_abort_skip: skip request abort task flag
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -167,6 +174,11 @@ struct ufshcd_lrb {
struct utp_upiu_rsp *ucd_rsp_ptr;
struct ufshcd_sg_entry *ucd_prdt_ptr;
+ dma_addr_t utrd_dma_addr;
+ dma_addr_t ucd_req_dma_addr;
+ dma_addr_t ucd_rsp_dma_addr;
+ dma_addr_t ucd_prdt_dma_addr;
+
struct scsi_cmnd *cmd;
u8 *sense_buffer;
unsigned int sense_bufflen;
@@ -176,6 +188,9 @@ struct ufshcd_lrb {
int task_tag;
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
+ ktime_t issue_time_stamp;
+
+ bool req_abort_skip;
};
/**
@@ -320,6 +335,8 @@ enum clk_gating_state {
* @is_suspended: clk gating is suspended when set to 1 which can be used
* during suspend/resume
* @delay_attr: sysfs attribute to control delay_attr
+ * @enable_attr: sysfs attribute to enable/disable clock gating
+ * @is_enabled: Indicates the current status of clock gating
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
*/
@@ -330,14 +347,47 @@ struct ufs_clk_gating {
unsigned long delay_ms;
bool is_suspended;
struct device_attribute delay_attr;
+ struct device_attribute enable_attr;
+ bool is_enabled;
int active_reqs;
};
+struct ufs_saved_pwr_info {
+ struct ufs_pa_layer_attr info;
+ bool is_valid;
+};
+
+/**
+ * struct ufs_clk_scaling - UFS clock scaling related data
+ * @active_reqs: number of requests that are pending. If this is zero when
+ * devfreq ->target() function is called then schedule "suspend_work" to
+ * suspend devfreq.
+ * @tot_busy_t: Total busy time in current polling window
+ * @window_start_t: Start time (in jiffies) of the current polling window
+ * @busy_start_t: Start time of current busy period
+ * @enable_attr: sysfs attribute to enable/disable clock scaling
+ * @saved_pwr_info: UFS power mode may also be changed during scaling and this
+ * one keeps track of previous power mode.
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_busy_started: tracks if busy period has started or not
+ * @is_suspended: tracks if devfreq is suspended or not
+ */
struct ufs_clk_scaling {
- ktime_t busy_start_t;
- bool is_busy_started;
- unsigned long tot_busy_t;
+ int active_reqs;
+ unsigned long tot_busy_t;
unsigned long window_start_t;
+ ktime_t busy_start_t;
+ struct device_attribute enable_attr;
+ struct ufs_saved_pwr_info saved_pwr_info;
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+ bool is_allowed;
+ bool is_busy_started;
+ bool is_suspended;
};
/**
@@ -349,6 +399,41 @@ struct ufs_init_prefetch {
u32 icc_level;
};
+#define UIC_ERR_REG_HIST_LENGTH 8
+/**
+ * struct ufs_uic_err_reg_hist - keeps history of uic errors
+ * @pos: index to indicate cyclic buffer position
+ * @reg: cyclic buffer for registers value
+ * @tstamp: cyclic buffer for time stamp
+ */
+struct ufs_uic_err_reg_hist {
+ int pos;
+ u32 reg[UIC_ERR_REG_HIST_LENGTH];
+ ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
+};
+
+/**
+ * struct ufs_stats - keeps usage/err statistics
+ * @hibern8_exit_cnt: Counter to keep track of number of exits,
+ * reset this after link-startup.
+ * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
+ * Clear after the first successful command completion.
+ * @pa_err: tracks pa-uic errors
+ * @dl_err: tracks dl-uic errors
+ * @nl_err: tracks nl-uic errors
+ * @tl_err: tracks tl-uic errors
+ * @dme_err: tracks dme errors
+ */
+struct ufs_stats {
+ u32 hibern8_exit_cnt;
+ ktime_t last_hibern8_exit_tstamp;
+ struct ufs_uic_err_reg_hist pa_err;
+ struct ufs_uic_err_reg_hist dl_err;
+ struct ufs_uic_err_reg_hist nl_err;
+ struct ufs_uic_err_reg_hist tl_err;
+ struct ufs_uic_err_reg_hist dme_err;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -429,6 +514,8 @@ struct ufs_hba {
enum ufs_pm_level rpm_lvl;
/* Desired UFS power management level during system PM */
enum ufs_pm_level spm_lvl;
+ struct device_attribute rpm_lvl_attr;
+ struct device_attribute spm_lvl_attr;
int pm_op_in_progress;
struct ufshcd_lrb *lrb;
@@ -523,6 +610,7 @@ struct ufs_hba {
u32 uic_error;
u32 saved_err;
u32 saved_uic_err;
+ struct ufs_stats ufs_stats;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -536,6 +624,9 @@ struct ufs_hba {
bool wlun_dev_clr_ua;
+ /* Number of requests aborts */
+ int req_abort_count;
+
/* Number of lanes available (1 or 2) for Rx/Tx */
u32 lanes_per_direction;
struct ufs_pa_layer_attr pwr_info;
@@ -558,6 +649,14 @@ struct ufs_hba {
* CAUTION: Enabling this might reduce overall UFS throughput.
*/
#define UFSHCD_CAP_INTR_AGGR (1 << 4)
+ /*
+ * This capability allows the device auto-bkops to be always enabled
+ * except during suspend (both runtime and suspend).
+ * Enabling this capability means that device will always be allowed
+ * to do background operation when it's active but it might degrade
+ * the performance of ongoing read/write operations.
+ */
+#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -565,6 +664,8 @@ struct ufs_hba {
enum bkops_status urgent_bkops_lvl;
bool is_urgent_bkops_lvl_checked;
+
+ struct rw_semaphore clk_scaling_lock;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -576,7 +677,7 @@ static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
}
-static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_CLK_SCALING;
}
@@ -655,6 +756,11 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba)
BUG_ON(!hba);
return hba->priv;
}
+static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
+ struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
+}
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
@@ -713,8 +819,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
-int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
-
static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
{
return (pwr_info->pwr_rx == FAST_MODE ||
@@ -723,11 +827,6 @@ static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
pwr_info->pwr_tx == FASTAUTO_MODE);
}
-#define ASCII_STD true
-
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
- u32 size, bool ascii);
-
/* Expose Query-Request API */
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 8c5190e2e1c9..d14e9b965d1e 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -72,6 +72,9 @@ enum {
REG_UIC_COMMAND_ARG_1 = 0x94,
REG_UIC_COMMAND_ARG_2 = 0x98,
REG_UIC_COMMAND_ARG_3 = 0x9C,
+
+ UFSHCI_REG_SPACE_SIZE = 0xA0,
+
REG_UFS_CCAP = 0x100,
REG_UFS_CRYPTOCAP = 0x104,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index ec91bd07f00a..c680d7641311 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
{
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ unsigned long flags;
int req_size;
+ int ret;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
req_size = sizeof(cmd->req.cmd);
}
- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ if (ret == -EIO) {
+ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ spin_lock_irqsave(&req_vq->vq_lock, flags);
+ virtscsi_complete_cmd(vscsi, cmd);
+ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+ } else if (ret != 0) {
return SCSI_MLQUEUE_HOST_BUSY;
+ }
return 0;
}
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 15ca09cd16f3..ef474a748744 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -68,10 +68,7 @@ struct pvscsi_ctx {
struct pvscsi_adapter {
char *mmioBase;
- unsigned int irq;
u8 rev;
- bool use_msi;
- bool use_msix;
bool use_msg;
bool use_req_threshold;
@@ -1161,30 +1158,26 @@ static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
static irqreturn_t pvscsi_isr(int irq, void *devp)
{
struct pvscsi_adapter *adapter = devp;
- int handled;
-
- if (adapter->use_msi || adapter->use_msix)
- handled = true;
- else {
- u32 val = pvscsi_read_intr_status(adapter);
- handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
- if (handled)
- pvscsi_write_intr_status(devp, val);
- }
-
- if (handled) {
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&adapter->hw_lock, flags);
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+ pvscsi_process_completion_ring(adapter);
+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
+ queue_work(adapter->workqueue, &adapter->work);
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
- pvscsi_process_completion_ring(adapter);
- if (adapter->use_msg && pvscsi_msg_pending(adapter))
- queue_work(adapter->workqueue, &adapter->work);
+ return IRQ_HANDLED;
+}
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
- }
+static irqreturn_t pvscsi_shared_isr(int irq, void *devp)
+{
+ struct pvscsi_adapter *adapter = devp;
+ u32 val = pvscsi_read_intr_status(adapter);
- return IRQ_RETVAL(handled);
+ if (!(val & PVSCSI_INTR_ALL_SUPPORTED))
+ return IRQ_NONE;
+ pvscsi_write_intr_status(devp, val);
+ return pvscsi_isr(irq, devp);
}
static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
@@ -1196,34 +1189,10 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
}
-static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
- unsigned int *irq)
-{
- struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
- int ret;
-
- ret = pci_enable_msix_exact(adapter->dev, &entry, 1);
- if (ret)
- return ret;
-
- *irq = entry.vector;
-
- return 0;
-}
-
static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
{
- if (adapter->irq) {
- free_irq(adapter->irq, adapter);
- adapter->irq = 0;
- }
- if (adapter->use_msi) {
- pci_disable_msi(adapter->dev);
- adapter->use_msi = 0;
- } else if (adapter->use_msix) {
- pci_disable_msix(adapter->dev);
- adapter->use_msix = 0;
- }
+ free_irq(pci_irq_vector(adapter->dev, 0), adapter);
+ pci_free_irq_vectors(adapter->dev);
}
static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
@@ -1359,11 +1328,11 @@ exit:
static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY;
struct pvscsi_adapter *adapter;
struct pvscsi_adapter adapter_temp;
struct Scsi_Host *host = NULL;
unsigned int i;
- unsigned long flags = 0;
int error;
u32 max_id;
@@ -1512,30 +1481,33 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_reset_adapter;
}
- if (!pvscsi_disable_msix &&
- pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
- printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
- adapter->use_msix = 1;
- } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
- printk(KERN_INFO "vmw_pvscsi: using MSI\n");
- adapter->use_msi = 1;
- adapter->irq = pdev->irq;
- } else {
- printk(KERN_INFO "vmw_pvscsi: using INTx\n");
- adapter->irq = pdev->irq;
- flags = IRQF_SHARED;
- }
+ if (pvscsi_disable_msix)
+ irq_flag &= ~PCI_IRQ_MSIX;
+ if (pvscsi_disable_msi)
+ irq_flag &= ~PCI_IRQ_MSI;
+
+ error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
+ if (error)
+ goto out_reset_adapter;
adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
adapter->use_req_threshold ? "en" : "dis");
- error = request_irq(adapter->irq, pvscsi_isr, flags,
- "vmw_pvscsi", adapter);
+ if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) {
+ printk(KERN_INFO "vmw_pvscsi: using MSI%s\n",
+ adapter->dev->msix_enabled ? "-X" : "");
+ error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr,
+ 0, "vmw_pvscsi", adapter);
+ } else {
+ printk(KERN_INFO "vmw_pvscsi: using INTx\n");
+ error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr,
+ IRQF_SHARED, "vmw_pvscsi", adapter);
+ }
+
if (error) {
printk(KERN_ERR
"vmw_pvscsi: unable to request IRQ: %d\n", error);
- adapter->irq = 0;
goto out_reset_adapter;
}
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index d41292ef85f2..75966d3f326e 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -423,11 +423,6 @@ struct PVSCSIConfigPageController {
#define PVSCSI_MAX_INTRS 24
/*
- * Enumeration of supported MSI-X vectors
- */
-#define PVSCSI_VECTOR_COMPLETION 0
-
-/*
* Misc constants for the rings.
*/
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 0acdfd82e751..813df6e7292d 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -11,6 +11,8 @@
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -92,9 +94,18 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
{ /*sentinel*/ },
};
+struct regmap *exynos_get_pmu_regmap(void)
+{
+ struct device_node *np = of_find_matching_node(NULL,
+ exynos_pmu_of_device_ids);
+ if (np)
+ return syscon_node_to_regmap(np);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct resource *res;
@@ -106,15 +117,10 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_context = devm_kzalloc(&pdev->dev,
sizeof(struct exynos_pmu_context),
GFP_KERNEL);
- if (!pmu_context) {
- dev_err(dev, "Cannot allocate memory.\n");
+ if (!pmu_context)
return -ENOMEM;
- }
pmu_context->dev = dev;
-
- match = of_match_node(exynos_pmu_of_device_ids, dev->of_node);
-
- pmu_context->pmu_data = match->data;
+ pmu_context->pmu_data = of_device_get_match_data(dev);
if (pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 8823cc81ae45..5bb376009d98 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
if (IS_ERR(task)) {
dev_err(dev, "can't create rproc_boot thread\n");
+ ret = PTR_ERR(task);
goto err_put_rproc;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec4aa252d6e8..25ae7f2e44b5 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -162,7 +162,8 @@ config SPI_BCM63XX_HSSPI
config SPI_BCM_QSPI
tristate "Broadcom BSPI and MSPI controller support"
- depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || \
+ BMIPS_GENERIC || COMPILE_TEST
default ARCH_BCM_IPROC
help
Enables support for the Broadcom SPI flash and MSPI controller.
@@ -263,7 +264,7 @@ config SPI_EP93XX
mode.
config SPI_FALCON
- tristate "Falcon SPI controller support"
+ bool "Falcon SPI controller support"
depends on SOC_FALCON
help
The external bus unit (EBU) found on the FALC-ON SoC has SPI
@@ -378,6 +379,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
+ depends on HAS_DMA
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
@@ -415,6 +417,14 @@ config SPI_NUC900
help
SPI driver for Nuvoton NUC900 series ARM SoCs
+config SPI_LANTIQ_SSC
+ tristate "Lantiq SSC SPI controller"
+ depends on LANTIQ || COMPILE_TEST
+ help
+ This driver supports the Lantiq SSC SPI controller in master
+ mode. This controller is found on Intel (former Lantiq) SoCs like
+ the Danube, Falcon, xRX200, xRX300.
+
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 7a6b64662c82..b375a7a89216 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index e89da0af45d2..6c7d7a460689 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -170,12 +170,12 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1);
switch (pin_mode) {
- case 1:
+ case SPI_NBITS_SINGLE:
break;
- case 2:
+ case SPI_NBITS_DUAL:
val |= A3700_SPI_DATA_PIN0;
break;
- case 4:
+ case SPI_NBITS_QUAD:
val |= A3700_SPI_DATA_PIN1;
break;
default:
@@ -340,8 +340,7 @@ static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause);
/* Wake up the transfer */
- if (a3700_spi->wait_mask & cause)
- complete(&a3700_spi->done);
+ complete(&a3700_spi->done);
return IRQ_HANDLED;
}
@@ -421,7 +420,7 @@ static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi,
}
static void a3700_spi_transfer_setup(struct spi_device *spi,
- struct spi_transfer *xfer)
+ struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
unsigned int byte_len;
@@ -562,6 +561,7 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
u32 data = le32_to_cpu(val);
+
memcpy(a3700_spi->rx_buf, &data, 4);
a3700_spi->buf_len -= 4;
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct a3700_spi *spi;
u32 num_cs = 0;
- int ret = 0;
+ int irq, ret = 0;
master = spi_alloc_master(dev, sizeof(*spi));
if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
master->flags = SPI_MASTER_HALF_DUPLEX;
- master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+ master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
- spi->irq = platform_get_irq(pdev, 0);
- if (spi->irq < 0) {
- dev_err(dev, "could not get irq: %d\n", spi->irq);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "could not get irq: %d\n", irq);
ret = -ENXIO;
goto error;
}
+ spi->irq = irq;
init_completion(&spi->done);
@@ -900,7 +901,6 @@ static int a3700_spi_remove(struct platform_device *pdev)
struct a3700_spi *spi = spi_master_get_devdata(master);
clk_unprepare(spi->clk);
- spi_master_put(master);
return 0;
}
@@ -908,7 +908,6 @@ static int a3700_spi_remove(struct platform_device *pdev)
static struct platform_driver a3700_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(a3700_spi_dt_ids),
},
.probe = a3700_spi_probe,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index f369174fbd88..b89cee11f418 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -78,14 +78,16 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
- if (spi->chip_select) {
+ if (gpio_is_valid(spi->cs_gpio)) {
/* SPI is normally active-low */
- gpio_set_value(spi->cs_gpio, cs_high);
+ gpio_set_value_cansleep(spi->cs_gpio, cs_high);
} else {
+ u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
+
if (cs_high)
- sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+ sp->ioc_base |= cs_bit;
else
- sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+ sp->ioc_base &= ~cs_bit;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
@@ -118,11 +120,8 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
int status;
- if (spi->chip_select && !gpio_is_valid(spi->cs_gpio))
- return -EINVAL;
-
status = 0;
- if (spi->chip_select) {
+ if (gpio_is_valid(spi->cs_gpio)) {
unsigned long flags;
flags = GPIOF_DIR_OUT;
@@ -134,10 +133,12 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
status = gpio_request_one(spi->cs_gpio, flags,
dev_name(&spi->dev));
} else {
+ u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
+
if (spi->mode & SPI_CS_HIGH)
- sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+ sp->ioc_base &= ~cs_bit;
else
- sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+ sp->ioc_base |= cs_bit;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
@@ -147,7 +148,7 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
static void ath79_spi_cleanup_cs(struct spi_device *spi)
{
- if (spi->chip_select) {
+ if (gpio_is_valid(spi->cs_gpio)) {
gpio_free(spi->cs_gpio);
}
}
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 319225d7e761..6ab4c7700228 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
SPI_ENGINE_VERSION_MAJOR(version),
SPI_ENGINE_VERSION_MINOR(version),
SPI_ENGINE_VERSION_PATCH(version));
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_master;
}
spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 14f9dea3173f..b19722ba908c 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -89,7 +89,7 @@
#define BSPI_BPP_MODE_SELECT_MASK BIT(8)
#define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
-#define BSPI_READ_LENGTH 256
+#define BSPI_READ_LENGTH 512
/* MSPI register offsets */
#define MSPI_SPCR0_LSB 0x000
@@ -192,9 +192,11 @@ struct bcm_qspi_dev_id {
void *dev;
};
+
struct qspi_trans {
struct spi_transfer *trans;
int byte;
+ bool mspi_last_trans;
};
struct bcm_qspi {
@@ -371,7 +373,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int width,
/* default mode, does not need flex_cmd */
flex_mode = 0;
else
- command = SPINOR_OP_READ4_FAST;
+ command = SPINOR_OP_READ_FAST_4B;
break;
case SPI_NBITS_DUAL:
bpc = 0x00000001;
@@ -384,7 +386,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int width,
} else {
command = SPINOR_OP_READ_1_1_2;
if (spans_4byte)
- command = SPINOR_OP_READ4_1_1_2;
+ command = SPINOR_OP_READ_1_1_2_4B;
}
break;
case SPI_NBITS_QUAD:
@@ -399,7 +401,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int width,
} else {
command = SPINOR_OP_READ_1_1_4;
if (spans_4byte)
- command = SPINOR_OP_READ4_1_1_4;
+ command = SPINOR_OP_READ_1_1_4_4B;
}
break;
default:
@@ -616,6 +618,16 @@ static int bcm_qspi_setup(struct spi_device *spi)
return 0;
}
+static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
+ struct qspi_trans *qt)
+{
+ if (qt->mspi_last_trans &&
+ spi_transfer_is_last(qspi->master, qt->trans))
+ return true;
+ else
+ return false;
+}
+
static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
struct qspi_trans *qt, int flags)
{
@@ -629,7 +641,6 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
if (qt->byte >= qt->trans->len) {
/* we're at the end of the spi_transfer */
-
/* in TX mode, need to pause for a delay or CS change */
if (qt->trans->delay_usecs &&
(flags & TRANS_STATUS_BREAK_DELAY))
@@ -641,7 +652,7 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
goto done;
dev_dbg(&qspi->pdev->dev, "advance msg exit\n");
- if (spi_transfer_is_last(qspi->master, qt->trans))
+ if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
ret = TRANS_STATUS_BREAK_EOM;
else
ret = TRANS_STATUS_BREAK_NO_BYTES;
@@ -813,7 +824,7 @@ static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
struct spi_flash_read_message *msg)
{
struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
- u32 addr = 0, len, len_words;
+ u32 addr = 0, len, rdlen, len_words;
int ret = 0;
unsigned long timeo = msecs_to_jiffies(100);
struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
@@ -826,7 +837,7 @@ static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
/*
- * when using flex mode mode we need to send
+ * when using flex mode we need to send
* the upper address byte to bspi
*/
if (bcm_qspi_bspi_ver_three(qspi) == false) {
@@ -840,48 +851,127 @@ static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
else
addr = msg->from & 0x00ffffff;
- /* set BSPI RAF buffer max read length */
- len = msg->len;
- if (len > BSPI_READ_LENGTH)
- len = BSPI_READ_LENGTH;
-
if (bcm_qspi_bspi_ver_three(qspi) == true)
addr = (addr + 0xc00000) & 0xffffff;
- reinit_completion(&qspi->bspi_done);
- bcm_qspi_enable_bspi(qspi);
- len_words = (len + 3) >> 2;
- qspi->bspi_rf_msg = msg;
- qspi->bspi_rf_msg_status = 0;
+ /*
+ * read into the entire buffer by breaking the reads
+ * into RAF buffer read lengths
+ */
+ len = msg->len;
qspi->bspi_rf_msg_idx = 0;
- qspi->bspi_rf_msg_len = len;
- dev_dbg(&qspi->pdev->dev, "bspi xfr addr 0x%x len 0x%x", addr, len);
- bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
- bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
- bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
+ do {
+ if (len > BSPI_READ_LENGTH)
+ rdlen = BSPI_READ_LENGTH;
+ else
+ rdlen = len;
+
+ reinit_completion(&qspi->bspi_done);
+ bcm_qspi_enable_bspi(qspi);
+ len_words = (rdlen + 3) >> 2;
+ qspi->bspi_rf_msg = msg;
+ qspi->bspi_rf_msg_status = 0;
+ qspi->bspi_rf_msg_len = rdlen;
+ dev_dbg(&qspi->pdev->dev,
+ "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
+ if (qspi->soc_intc) {
+ /*
+ * clear soc MSPI and BSPI interrupts and enable
+ * BSPI interrupts.
+ */
+ soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
+ soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
+ }
- if (qspi->soc_intc) {
- /*
- * clear soc MSPI and BSPI interrupts and enable
- * BSPI interrupts.
- */
- soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
- soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
+ /* Must flush previous writes before starting BSPI operation */
+ mb();
+ bcm_qspi_bspi_lr_start(qspi);
+ if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
+ dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ /* set msg return length */
+ msg->retlen += rdlen;
+ addr += rdlen;
+ len -= rdlen;
+ } while (len);
+
+ return ret;
+}
+
+static int bcm_qspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *trans)
+{
+ struct bcm_qspi *qspi = spi_master_get_devdata(master);
+ int slots;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ bcm_qspi_chip_select(qspi, spi->chip_select);
+ qspi->trans_pos.trans = trans;
+ qspi->trans_pos.byte = 0;
+
+ while (qspi->trans_pos.byte < trans->len) {
+ reinit_completion(&qspi->mspi_done);
+
+ slots = write_to_hw(qspi, spi);
+ if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
+ dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
+ return -ETIMEDOUT;
+ }
+
+ read_from_hw(qspi, slots);
}
- /* Must flush previous writes before starting BSPI operation */
- mb();
+ return 0;
+}
- bcm_qspi_bspi_lr_start(qspi);
- if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
- dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
- ret = -ETIMEDOUT;
- } else {
- /* set the return length for the caller */
- msg->retlen = len;
+static int bcm_qspi_mspi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg)
+{
+ struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct spi_transfer t[2];
+ u8 cmd[6];
+ int ret;
+
+ memset(cmd, 0, sizeof(cmd));
+ memset(t, 0, sizeof(t));
+
+ /* tx */
+ /* opcode is in cmd[0] */
+ cmd[0] = msg->read_opcode;
+ cmd[1] = msg->from >> (msg->addr_width * 8 - 8);
+ cmd[2] = msg->from >> (msg->addr_width * 8 - 16);
+ cmd[3] = msg->from >> (msg->addr_width * 8 - 24);
+ cmd[4] = msg->from >> (msg->addr_width * 8 - 32);
+ t[0].tx_buf = cmd;
+ t[0].len = msg->addr_width + msg->dummy_bytes + 1;
+ t[0].bits_per_word = spi->bits_per_word;
+ t[0].tx_nbits = msg->opcode_nbits;
+ /* lets mspi know that this is not last transfer */
+ qspi->trans_pos.mspi_last_trans = false;
+ ret = bcm_qspi_transfer_one(spi->master, spi, &t[0]);
+
+ /* rx */
+ qspi->trans_pos.mspi_last_trans = true;
+ if (!ret) {
+ /* rx */
+ t[1].rx_buf = msg->buf;
+ t[1].len = msg->len;
+ t[1].rx_nbits = msg->data_nbits;
+ t[1].bits_per_word = spi->bits_per_word;
+ ret = bcm_qspi_transfer_one(spi->master, spi, &t[1]);
}
+ if (!ret)
+ msg->retlen = msg->len;
+
return ret;
}
@@ -918,8 +1008,7 @@ static int bcm_qspi_flash_read(struct spi_device *spi,
mspi_read = true;
if (mspi_read)
- /* this will make the m25p80 read to fallback to mspi read */
- return -EAGAIN;
+ return bcm_qspi_mspi_flash_read(spi, msg);
io_width = msg->data_nbits ? msg->data_nbits : SPI_NBITS_SINGLE;
addrlen = msg->addr_width;
@@ -931,33 +1020,6 @@ static int bcm_qspi_flash_read(struct spi_device *spi,
return ret;
}
-static int bcm_qspi_transfer_one(struct spi_master *master,
- struct spi_device *spi,
- struct spi_transfer *trans)
-{
- struct bcm_qspi *qspi = spi_master_get_devdata(master);
- int slots;
- unsigned long timeo = msecs_to_jiffies(100);
-
- bcm_qspi_chip_select(qspi, spi->chip_select);
- qspi->trans_pos.trans = trans;
- qspi->trans_pos.byte = 0;
-
- while (qspi->trans_pos.byte < trans->len) {
- reinit_completion(&qspi->mspi_done);
-
- slots = write_to_hw(qspi, spi);
- if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
- dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
- return -ETIMEDOUT;
- }
-
- read_from_hw(qspi, slots);
- }
-
- return 0;
-}
-
static void bcm_qspi_cleanup(struct spi_device *spi)
{
struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
@@ -1187,6 +1249,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->pdev = pdev;
qspi->trans_pos.trans = NULL;
qspi->trans_pos.byte = 0;
+ qspi->trans_pos.mspi_last_trans = true;
qspi->master = master;
master->bus_num = -1;
@@ -1345,7 +1408,6 @@ int bcm_qspi_remove(struct platform_device *pdev)
{
struct bcm_qspi *qspi = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
kfree(qspi->dev_ids);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index afb51699dbb5..6e409eabe1c9 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -1,3 +1,11 @@
+/*
+ * Copyright (C) 2014-2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
@@ -275,10 +283,6 @@ static int bcm53xxspi_flash_read(struct spi_device *spi,
* BCMA
**************************************************/
-static struct spi_board_info bcm53xx_info = {
- .modalias = "bcm53xxspiflash",
-};
-
static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS),
{},
@@ -311,6 +315,7 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core)
b53spi->bspi = true;
bcm53xxspi_disable_bspi(b53spi);
+ master->dev.of_node = dev->of_node;
master->transfer_one = bcm53xxspi_transfer_one;
if (b53spi->mmio_base)
master->spi_flash_read = bcm53xxspi_flash_read;
@@ -324,9 +329,6 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core)
return err;
}
- /* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */
- spi_new_device(master, &bcm53xx_info);
-
return 0;
}
@@ -361,4 +363,4 @@ module_exit(bcm53xxspi_module_exit);
MODULE_DESCRIPTION("Broadcom BCM53xx SPI Controller driver");
MODULE_AUTHOR("Rafał Miłecki <zajec5@gmail.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index d36c11b73a35..02fb96797ac8 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (!t->rx_dma) {
+ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_TO_DEVICE);
- if (!t->tx_dma) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
ret = -EFAULT;
goto err_tx_map;
}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e31971f91475..837cb8d0bac6 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
static void mid_spi_dma_stop(struct dw_spi *dws)
{
if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_all(dws->txchan);
+ dmaengine_terminate_sync(dws->txchan);
clear_bit(TX_BUSY, &dws->dma_chan_busy);
}
if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_all(dws->rxchan);
+ dmaengine_terminate_sync(dws->rxchan);
clear_bit(RX_BUSY, &dws->dma_chan_busy);
}
}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b715a26a9148..b217c22ff72f 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
static int dw_spi_debugfs_init(struct dw_spi *dws)
{
- dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+ char name[32];
+
+ snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
+ dws->debugfs = debugfs_create_dir(name, NULL);
if (!dws->debugfs)
return -ENOMEM;
@@ -483,9 +486,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
- snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
- ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master);
+ ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
+ master);
if (ret < 0) {
dev_err(dev, "can not get IRQ\n");
goto err_free_master;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index c21ca02f8ec5..da5eab62df34 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -101,7 +101,6 @@ struct dw_spi_dma_ops {
struct dw_spi {
struct spi_master *master;
enum dw_ssi_type type;
- char name[16];
void __iomem *regs;
unsigned long paddr;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 17a6387e20b5..b5d766064b7b 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -28,6 +28,7 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
+#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/dma-ep93xx.h>
@@ -107,16 +108,6 @@ struct ep93xx_spi {
void *zeropage;
};
-/**
- * struct ep93xx_spi_chip - SPI device hardware settings
- * @spi: back pointer to the SPI device
- * @ops: private chip operations
- */
-struct ep93xx_spi_chip {
- const struct spi_device *spi;
- struct ep93xx_spi_chip_ops *ops;
-};
-
/* converts bits per word to CR0.DSS value */
#define bits_per_word_to_dss(bpw) ((bpw) - 1)
@@ -229,104 +220,36 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
return -EINVAL;
}
-static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
-{
- struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
- int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
-
- if (chip->ops && chip->ops->cs_control)
- chip->ops->cs_control(spi, value);
-}
-
-/**
- * ep93xx_spi_setup() - setup an SPI device
- * @spi: SPI device to setup
- *
- * This function sets up SPI device mode, speed etc. Can be called multiple
- * times for a single device. Returns %0 in case of success, negative error in
- * case of failure. When this function returns success, the device is
- * deselected.
- */
-static int ep93xx_spi_setup(struct spi_device *spi)
+static void ep93xx_spi_cs_control(struct spi_device *spi, bool enable)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
- struct ep93xx_spi_chip *chip;
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
- chip = spi_get_ctldata(spi);
- if (!chip) {
- dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
- spi->modalias);
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->spi = spi;
- chip->ops = spi->controller_data;
-
- if (chip->ops && chip->ops->setup) {
- int ret = chip->ops->setup(spi);
-
- if (ret) {
- kfree(chip);
- return ret;
- }
- }
-
- spi_set_ctldata(spi, chip);
- }
-
- ep93xx_spi_cs_control(spi, false);
- return 0;
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_set_value(spi->cs_gpio, !enable);
}
-/**
- * ep93xx_spi_cleanup() - cleans up master controller specific state
- * @spi: SPI device to cleanup
- *
- * This function releases master controller specific state for given @spi
- * device.
- */
-static void ep93xx_spi_cleanup(struct spi_device *spi)
-{
- struct ep93xx_spi_chip *chip;
-
- chip = spi_get_ctldata(spi);
- if (chip) {
- if (chip->ops && chip->ops->cleanup)
- chip->ops->cleanup(spi);
- spi_set_ctldata(spi, NULL);
- kfree(chip);
- }
-}
-
-/**
- * ep93xx_spi_chip_setup() - configures hardware according to given @chip
- * @espi: ep93xx SPI controller struct
- * @chip: chip specific settings
- * @speed_hz: transfer speed
- * @bits_per_word: transfer bits_per_word
- */
static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
- const struct ep93xx_spi_chip *chip,
- u32 speed_hz, u8 bits_per_word)
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- u8 dss = bits_per_word_to_dss(bits_per_word);
+ u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
u8 div_cpsr = 0;
u8 div_scr = 0;
u16 cr0;
int err;
- err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
+ err = ep93xx_spi_calc_divisors(espi, xfer->speed_hz,
+ &div_cpsr, &div_scr);
if (err)
return err;
cr0 = div_scr << SSPCR0_SCR_SHIFT;
- cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
+ cr0 |= (spi->mode & (SPI_CPHA | SPI_CPOL)) << SSPCR0_MODE_SHIFT;
cr0 |= dss;
dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
- chip->spi->mode, div_cpsr, div_scr, dss);
+ spi->mode, div_cpsr, div_scr, dss);
dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
@@ -603,12 +526,11 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
struct spi_message *msg,
struct spi_transfer *t)
{
- struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
int err;
msg->state = t;
- err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
+ err = ep93xx_spi_chip_setup(espi, msg->spi, t);
if (err) {
dev_err(&espi->pdev->dev,
"failed to setup chip for transfer\n");
@@ -863,8 +785,13 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
struct resource *res;
int irq;
int error;
+ int i;
info = dev_get_platdata(&pdev->dev);
+ if (!info) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -882,14 +809,36 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- master->setup = ep93xx_spi_setup;
master->transfer_one_message = ep93xx_spi_transfer_one_message;
- master->cleanup = ep93xx_spi_cleanup;
master->bus_num = pdev->id;
- master->num_chipselect = info->num_chipselect;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ master->num_chipselect = info->num_chipselect;
+ master->cs_gpios = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect,
+ GFP_KERNEL);
+ if (!master->cs_gpios) {
+ error = -ENOMEM;
+ goto fail_release_master;
+ }
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ master->cs_gpios[i] = info->chipselect[i];
+
+ if (!gpio_is_valid(master->cs_gpios[i]))
+ continue;
+
+ error = devm_gpio_request_one(&pdev->dev, master->cs_gpios[i],
+ GPIOF_OUT_INIT_HIGH,
+ "ep93xx-spi");
+ if (error) {
+ dev_err(&pdev->dev, "could not request cs gpio %d\n",
+ master->cs_gpios[i]);
+ goto fail_release_master;
+ }
+ }
+
platform_set_drvdata(pdev, master);
espi = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 52551f6d0c7d..cb3c73007ca1 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -366,7 +366,7 @@ static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
struct spi_transfer *xfer;
bool is_first_xfer = true;
u32 temp;
- int ret;
+ int ret = 0;
msg->status = 0;
msg->actual_length = 0;
@@ -512,9 +512,9 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
static struct platform_driver fsl_lpspi_driver = {
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = fsl_lpspi_dt_ids,
- },
+ .name = DRIVER_NAME,
+ .of_match_table = fsl_lpspi_dt_ids,
+ },
.probe = fsl_lpspi_probe,
.remove = fsl_lpspi_remove,
};
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 8b290d9d7935..0fc3452652ae 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -267,10 +267,9 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= SPMODE_DIV16;
pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
-
- WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
- "Will use %d Hz instead.\n", dev_name(&spi->dev),
- hz, mpc8xxx_spi->spibrg / 1024);
+ WARN_ONCE(pm > 16,
+ "%s: Requested speed is too low: %d Hz. Will use %d Hz instead.\n",
+ dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / 1024);
if (pm > 16)
pm = 16;
} else {
@@ -727,12 +726,13 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
return 0;
}
- pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
+ pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
+ GFP_KERNEL);
if (!pinfo->gpios)
return -ENOMEM;
memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
- pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
+ pinfo->alow_flags = kcalloc(ngpios, sizeof(*pinfo->alow_flags),
GFP_KERNEL);
if (!pinfo->alow_flags) {
ret = -ENOMEM;
@@ -762,8 +762,9 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
ret = gpio_direction_output(pinfo->gpios[i],
pinfo->alow_flags[i]);
if (ret) {
- dev_err(dev, "can't set output direction for gpio "
- "#%d: %d\n", i, ret);
+ dev_err(dev,
+ "can't set output direction for gpio #%d: %d\n",
+ i, ret);
goto err_loop;
}
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 32ced64a5bb9..9a7c62f471dc 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -211,7 +211,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- unsigned int bpw;
+ unsigned int bpw, i;
if (!master->dma_rx)
return false;
@@ -228,12 +228,16 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
if (bpw != 1 && bpw != 2 && bpw != 4)
return false;
- if (transfer->len < spi_imx->wml * bpw)
- return false;
+ for (i = spi_imx_get_fifosize(spi_imx) / 2; i > 0; i--) {
+ if (!(transfer->len % (i * bpw)))
+ break;
+ }
- if (transfer->len % (spi_imx->wml * bpw))
+ if (i == 0)
return false;
+ spi_imx->wml = i;
+
return true;
}
@@ -837,10 +841,6 @@ static int spi_imx_dma_configure(struct spi_master *master,
struct dma_slave_config rx = {}, tx = {};
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- if (bytes_per_word == spi_imx->bytes_per_word)
- /* Same as last time */
- return 0;
-
switch (bytes_per_word) {
case 4:
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
new file mode 100644
index 000000000000..8a626f7fccea
--- /dev/null
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -0,0 +1,983 @@
+/*
+ * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
+ * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#ifdef CONFIG_LANTIQ
+#include <lantiq_soc.h>
+#endif
+
+#define SPI_RX_IRQ_NAME "spi_rx"
+#define SPI_TX_IRQ_NAME "spi_tx"
+#define SPI_ERR_IRQ_NAME "spi_err"
+#define SPI_FRM_IRQ_NAME "spi_frm"
+
+#define SPI_CLC 0x00
+#define SPI_PISEL 0x04
+#define SPI_ID 0x08
+#define SPI_CON 0x10
+#define SPI_STAT 0x14
+#define SPI_WHBSTATE 0x18
+#define SPI_TB 0x20
+#define SPI_RB 0x24
+#define SPI_RXFCON 0x30
+#define SPI_TXFCON 0x34
+#define SPI_FSTAT 0x38
+#define SPI_BRT 0x40
+#define SPI_BRSTAT 0x44
+#define SPI_SFCON 0x60
+#define SPI_SFSTAT 0x64
+#define SPI_GPOCON 0x70
+#define SPI_GPOSTAT 0x74
+#define SPI_FPGO 0x78
+#define SPI_RXREQ 0x80
+#define SPI_RXCNT 0x84
+#define SPI_DMACON 0xec
+#define SPI_IRNEN 0xf4
+#define SPI_IRNICR 0xf8
+#define SPI_IRNCR 0xfc
+
+#define SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
+#define SPI_CLC_SMC_M (0xFF << SPI_CLC_SMC_S)
+#define SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */
+#define SPI_CLC_RMC_M (0xFF << SPI_CLC_RMC_S)
+#define SPI_CLC_DISS BIT(1) /* Disable status bit */
+#define SPI_CLC_DISR BIT(0) /* Disable request bit */
+
+#define SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
+#define SPI_ID_TXFS_M (0x3F << SPI_ID_TXFS_S)
+#define SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
+#define SPI_ID_RXFS_M (0x3F << SPI_ID_RXFS_S)
+#define SPI_ID_MOD_S 8 /* Module ID */
+#define SPI_ID_MOD_M (0xff << SPI_ID_MOD_S)
+#define SPI_ID_CFG_S 5 /* DMA interface support */
+#define SPI_ID_CFG_M (1 << SPI_ID_CFG_S)
+#define SPI_ID_REV_M 0x1F /* Hardware revision number */
+
+#define SPI_CON_BM_S 16 /* Data width selection */
+#define SPI_CON_BM_M (0x1F << SPI_CON_BM_S)
+#define SPI_CON_EM BIT(24) /* Echo mode */
+#define SPI_CON_IDLE BIT(23) /* Idle bit value */
+#define SPI_CON_ENBV BIT(22) /* Enable byte valid control */
+#define SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
+#define SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
+#define SPI_CON_AEN BIT(10) /* Abort error enable */
+#define SPI_CON_REN BIT(9) /* Receive overflow error enable */
+#define SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
+#define SPI_CON_LB BIT(7) /* Loopback control */
+#define SPI_CON_PO BIT(6) /* Clock polarity control */
+#define SPI_CON_PH BIT(5) /* Clock phase control */
+#define SPI_CON_HB BIT(4) /* Heading control */
+#define SPI_CON_RXOFF BIT(1) /* Switch receiver off */
+#define SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
+
+#define SPI_STAT_RXBV_S 28
+#define SPI_STAT_RXBV_M (0x7 << SPI_STAT_RXBV_S)
+#define SPI_STAT_BSY BIT(13) /* Busy flag */
+#define SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
+#define SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
+#define SPI_STAT_AE BIT(10) /* Abort error flag */
+#define SPI_STAT_RE BIT(9) /* Receive error flag */
+#define SPI_STAT_TE BIT(8) /* Transmit error flag */
+#define SPI_STAT_ME BIT(7) /* Mode error flag */
+#define SPI_STAT_MS BIT(1) /* Master/slave select bit */
+#define SPI_STAT_EN BIT(0) /* Enable bit */
+#define SPI_STAT_ERRORS (SPI_STAT_ME | SPI_STAT_TE | SPI_STAT_RE | \
+ SPI_STAT_AE | SPI_STAT_TUE | SPI_STAT_RUE)
+
+#define SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
+#define SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
+#define SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
+#define SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
+#define SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
+#define SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
+#define SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
+#define SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
+#define SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
+#define SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
+#define SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
+#define SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
+#define SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
+#define SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
+#define SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
+#define SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
+#define SPI_WHBSTATE_CLR_ERRORS (SPI_WHBSTATE_CLRRUE | SPI_WHBSTATE_CLRME | \
+ SPI_WHBSTATE_CLRTE | SPI_WHBSTATE_CLRRE | \
+ SPI_WHBSTATE_CLRAE | SPI_WHBSTATE_CLRTUE)
+
+#define SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
+#define SPI_RXFCON_RXFITL_M (0x3F << SPI_RXFCON_RXFITL_S)
+#define SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
+#define SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
+
+#define SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
+#define SPI_TXFCON_TXFITL_M (0x3F << SPI_TXFCON_TXFITL_S)
+#define SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
+#define SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
+
+#define SPI_FSTAT_RXFFL_S 0
+#define SPI_FSTAT_RXFFL_M (0x3f << SPI_FSTAT_RXFFL_S)
+#define SPI_FSTAT_TXFFL_S 8
+#define SPI_FSTAT_TXFFL_M (0x3f << SPI_FSTAT_TXFFL_S)
+
+#define SPI_GPOCON_ISCSBN_S 8
+#define SPI_GPOCON_INVOUTN_S 0
+
+#define SPI_FGPO_SETOUTN_S 8
+#define SPI_FGPO_CLROUTN_S 0
+
+#define SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */
+#define SPI_RXCNT_TODO_M 0xFFFF /* Recevie to-do value */
+
+#define SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */
+#define SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
+#define SPI_IRNEN_E BIT(2) /* Error end interrupt request */
+#define SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
+#define SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
+#define SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */
+#define SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
+#define SPI_IRNEN_ALL 0x1F
+
+struct lantiq_ssc_hwcfg {
+ unsigned int irnen_r;
+ unsigned int irnen_t;
+};
+
+struct lantiq_ssc_spi {
+ struct spi_master *master;
+ struct device *dev;
+ void __iomem *regbase;
+ struct clk *spi_clk;
+ struct clk *fpi_clk;
+ const struct lantiq_ssc_hwcfg *hwcfg;
+
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+
+ const u8 *tx;
+ u8 *rx;
+ unsigned int tx_todo;
+ unsigned int rx_todo;
+ unsigned int bits_per_word;
+ unsigned int speed_hz;
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
+ unsigned int base_cs;
+};
+
+static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
+{
+ return __raw_readl(spi->regbase + reg);
+}
+
+static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
+ u32 reg)
+{
+ __raw_writel(val, spi->regbase + reg);
+}
+
+static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
+ u32 set, u32 reg)
+{
+ u32 val = __raw_readl(spi->regbase + reg);
+
+ val &= ~clr;
+ val |= set;
+ __raw_writel(val, spi->regbase + reg);
+}
+
+static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
+{
+ u32 fstat = lantiq_ssc_readl(spi, SPI_FSTAT);
+
+ return (fstat & SPI_FSTAT_TXFFL_M) >> SPI_FSTAT_TXFFL_S;
+}
+
+static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
+{
+ u32 fstat = lantiq_ssc_readl(spi, SPI_FSTAT);
+
+ return fstat & SPI_FSTAT_RXFFL_M;
+}
+
+static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
+{
+ return spi->tx_fifo_size - tx_fifo_level(spi);
+}
+
+static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
+{
+ u32 val = spi->rx_fifo_size << SPI_RXFCON_RXFITL_S;
+
+ val |= SPI_RXFCON_RXFEN | SPI_RXFCON_RXFLU;
+ lantiq_ssc_writel(spi, val, SPI_RXFCON);
+}
+
+static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
+{
+ u32 val = 1 << SPI_TXFCON_TXFITL_S;
+
+ val |= SPI_TXFCON_TXFEN | SPI_TXFCON_TXFLU;
+ lantiq_ssc_writel(spi, val, SPI_TXFCON);
+}
+
+static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_maskl(spi, 0, SPI_RXFCON_RXFLU, SPI_RXFCON);
+}
+
+static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_maskl(spi, 0, SPI_TXFCON_TXFLU, SPI_TXFCON);
+}
+
+static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_writel(spi, SPI_WHBSTATE_CLREN, SPI_WHBSTATE);
+}
+
+static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
+{
+ lantiq_ssc_writel(spi, SPI_WHBSTATE_SETEN, SPI_WHBSTATE);
+}
+
+static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
+ unsigned int max_speed_hz)
+{
+ u32 spi_clk, brt;
+
+ /*
+ * SPI module clock is derived from FPI bus clock dependent on
+ * divider value in CLC.RMS which is always set to 1.
+ *
+ * f_SPI
+ * baudrate = --------------
+ * 2 * (BR + 1)
+ */
+ spi_clk = clk_get_rate(spi->fpi_clk) / 2;
+
+ if (max_speed_hz > spi_clk)
+ brt = 0;
+ else
+ brt = spi_clk / max_speed_hz - 1;
+
+ if (brt > 0xFFFF)
+ brt = 0xFFFF;
+
+ dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
+ spi_clk, max_speed_hz, brt);
+
+ lantiq_ssc_writel(spi, brt, SPI_BRT);
+}
+
+static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
+ unsigned int bits_per_word)
+{
+ u32 bm;
+
+ /* CON.BM value = bits_per_word - 1 */
+ bm = (bits_per_word - 1) << SPI_CON_BM_S;
+
+ lantiq_ssc_maskl(spi, SPI_CON_BM_M, bm, SPI_CON);
+}
+
+static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
+ unsigned int mode)
+{
+ u32 con_set = 0, con_clr = 0;
+
+ /*
+ * SPI mode mapping in CON register:
+ * Mode CPOL CPHA CON.PO CON.PH
+ * 0 0 0 0 1
+ * 1 0 1 0 0
+ * 2 1 0 1 1
+ * 3 1 1 1 0
+ */
+ if (mode & SPI_CPHA)
+ con_clr |= SPI_CON_PH;
+ else
+ con_set |= SPI_CON_PH;
+
+ if (mode & SPI_CPOL)
+ con_set |= SPI_CON_PO | SPI_CON_IDLE;
+ else
+ con_clr |= SPI_CON_PO | SPI_CON_IDLE;
+
+ /* Set heading control */
+ if (mode & SPI_LSB_FIRST)
+ con_clr |= SPI_CON_HB;
+ else
+ con_set |= SPI_CON_HB;
+
+ /* Set loopback mode */
+ if (mode & SPI_LOOP)
+ con_set |= SPI_CON_LB;
+ else
+ con_clr |= SPI_CON_LB;
+
+ lantiq_ssc_maskl(spi, con_clr, con_set, SPI_CON);
+}
+
+static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
+{
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+
+ /*
+ * Set clock divider for run mode to 1 to
+ * run at same frequency as FPI bus
+ */
+ lantiq_ssc_writel(spi, 1 << SPI_CLC_RMC_S, SPI_CLC);
+
+ /* Put controller into config mode */
+ hw_enter_config_mode(spi);
+
+ /* Clear error flags */
+ lantiq_ssc_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
+
+ /* Enable error checking, disable TX/RX */
+ lantiq_ssc_writel(spi, SPI_CON_RUEN | SPI_CON_AEN | SPI_CON_TEN |
+ SPI_CON_REN | SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
+
+ /* Setup default SPI mode */
+ hw_setup_bits_per_word(spi, spi->bits_per_word);
+ hw_setup_clock_mode(spi, SPI_MODE_0);
+
+ /* Enable master mode and clear error flags */
+ lantiq_ssc_writel(spi, SPI_WHBSTATE_SETMS | SPI_WHBSTATE_CLR_ERRORS,
+ SPI_WHBSTATE);
+
+ /* Reset GPIO/CS registers */
+ lantiq_ssc_writel(spi, 0, SPI_GPOCON);
+ lantiq_ssc_writel(spi, 0xFF00, SPI_FPGO);
+
+ /* Enable and flush FIFOs */
+ rx_fifo_reset(spi);
+ tx_fifo_reset(spi);
+
+ /* Enable interrupts */
+ lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r | SPI_IRNEN_E,
+ SPI_IRNEN);
+}
+
+static int lantiq_ssc_setup(struct spi_device *spidev)
+{
+ struct spi_master *master = spidev->master;
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+ unsigned int cs = spidev->chip_select;
+ u32 gpocon;
+
+ /* GPIOs are used for CS */
+ if (gpio_is_valid(spidev->cs_gpio))
+ return 0;
+
+ dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
+
+ if (cs < spi->base_cs) {
+ dev_err(spi->dev,
+ "chipselect %i too small (min %i)\n", cs, spi->base_cs);
+ return -EINVAL;
+ }
+
+ /* set GPO pin to CS mode */
+ gpocon = 1 << ((cs - spi->base_cs) + SPI_GPOCON_ISCSBN_S);
+
+ /* invert GPO pin */
+ if (spidev->mode & SPI_CS_HIGH)
+ gpocon |= 1 << (cs - spi->base_cs);
+
+ lantiq_ssc_maskl(spi, 0, gpocon, SPI_GPOCON);
+
+ return 0;
+}
+
+static int lantiq_ssc_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ hw_enter_config_mode(spi);
+ hw_setup_clock_mode(spi, message->spi->mode);
+ hw_enter_active_mode(spi);
+
+ return 0;
+}
+
+static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
+ struct spi_device *spidev, struct spi_transfer *t)
+{
+ unsigned int speed_hz = t->speed_hz;
+ unsigned int bits_per_word = t->bits_per_word;
+ u32 con;
+
+ if (bits_per_word != spi->bits_per_word ||
+ speed_hz != spi->speed_hz) {
+ hw_enter_config_mode(spi);
+ hw_setup_speed_hz(spi, speed_hz);
+ hw_setup_bits_per_word(spi, bits_per_word);
+ hw_enter_active_mode(spi);
+
+ spi->speed_hz = speed_hz;
+ spi->bits_per_word = bits_per_word;
+ }
+
+ /* Configure transmitter and receiver */
+ con = lantiq_ssc_readl(spi, SPI_CON);
+ if (t->tx_buf)
+ con &= ~SPI_CON_TXOFF;
+ else
+ con |= SPI_CON_TXOFF;
+
+ if (t->rx_buf)
+ con &= ~SPI_CON_RXOFF;
+ else
+ con |= SPI_CON_RXOFF;
+
+ lantiq_ssc_writel(spi, con, SPI_CON);
+}
+
+static int lantiq_ssc_unprepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ flush_workqueue(spi->wq);
+
+ /* Disable transmitter and receiver while idle */
+ lantiq_ssc_maskl(spi, 0, SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
+
+ return 0;
+}
+
+static void tx_fifo_write(struct lantiq_ssc_spi *spi)
+{
+ const u8 *tx8;
+ const u16 *tx16;
+ const u32 *tx32;
+ u32 data;
+ unsigned int tx_free = tx_fifo_free(spi);
+
+ while (spi->tx_todo && tx_free) {
+ switch (spi->bits_per_word) {
+ case 2 ... 8:
+ tx8 = spi->tx;
+ data = *tx8;
+ spi->tx_todo--;
+ spi->tx++;
+ break;
+ case 16:
+ tx16 = (u16 *) spi->tx;
+ data = *tx16;
+ spi->tx_todo -= 2;
+ spi->tx += 2;
+ break;
+ case 32:
+ tx32 = (u32 *) spi->tx;
+ data = *tx32;
+ spi->tx_todo -= 4;
+ spi->tx += 4;
+ break;
+ default:
+ WARN_ON(1);
+ data = 0;
+ break;
+ }
+
+ lantiq_ssc_writel(spi, data, SPI_TB);
+ tx_free--;
+ }
+}
+
+static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
+{
+ u8 *rx8;
+ u16 *rx16;
+ u32 *rx32;
+ u32 data;
+ unsigned int rx_fill = rx_fifo_level(spi);
+
+ while (rx_fill) {
+ data = lantiq_ssc_readl(spi, SPI_RB);
+
+ switch (spi->bits_per_word) {
+ case 2 ... 8:
+ rx8 = spi->rx;
+ *rx8 = data;
+ spi->rx_todo--;
+ spi->rx++;
+ break;
+ case 16:
+ rx16 = (u16 *) spi->rx;
+ *rx16 = data;
+ spi->rx_todo -= 2;
+ spi->rx += 2;
+ break;
+ case 32:
+ rx32 = (u32 *) spi->rx;
+ *rx32 = data;
+ spi->rx_todo -= 4;
+ spi->rx += 4;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ rx_fill--;
+ }
+}
+
+static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
+{
+ u32 data, *rx32;
+ u8 *rx8;
+ unsigned int rxbv, shift;
+ unsigned int rx_fill = rx_fifo_level(spi);
+
+ /*
+ * In RX-only mode the bits per word value is ignored by HW. A value
+ * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
+ * If remaining RX bytes are less than 4, the FIFO must be read
+ * differently. The amount of received and valid bytes is indicated
+ * by STAT.RXBV register value.
+ */
+ while (rx_fill) {
+ if (spi->rx_todo < 4) {
+ rxbv = (lantiq_ssc_readl(spi, SPI_STAT) &
+ SPI_STAT_RXBV_M) >> SPI_STAT_RXBV_S;
+ data = lantiq_ssc_readl(spi, SPI_RB);
+
+ shift = (rxbv - 1) * 8;
+ rx8 = spi->rx;
+
+ while (rxbv) {
+ *rx8++ = (data >> shift) & 0xFF;
+ rxbv--;
+ shift -= 8;
+ spi->rx_todo--;
+ spi->rx++;
+ }
+ } else {
+ data = lantiq_ssc_readl(spi, SPI_RB);
+ rx32 = (u32 *) spi->rx;
+
+ *rx32++ = data;
+ spi->rx_todo -= 4;
+ spi->rx += 4;
+ }
+ rx_fill--;
+ }
+}
+
+static void rx_request(struct lantiq_ssc_spi *spi)
+{
+ unsigned int rxreq, rxreq_max;
+
+ /*
+ * To avoid receive overflows at high clocks it is better to request
+ * only the amount of bytes that fits into all FIFOs. This value
+ * depends on the FIFO size implemented in hardware.
+ */
+ rxreq = spi->rx_todo;
+ rxreq_max = spi->rx_fifo_size * 4;
+ if (rxreq > rxreq_max)
+ rxreq = rxreq_max;
+
+ lantiq_ssc_writel(spi, rxreq, SPI_RXREQ);
+}
+
+static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
+{
+ struct lantiq_ssc_spi *spi = data;
+
+ if (spi->tx) {
+ if (spi->rx && spi->rx_todo)
+ rx_fifo_read_full_duplex(spi);
+
+ if (spi->tx_todo)
+ tx_fifo_write(spi);
+ else if (!tx_fifo_level(spi))
+ goto completed;
+ } else if (spi->rx) {
+ if (spi->rx_todo) {
+ rx_fifo_read_half_duplex(spi);
+
+ if (spi->rx_todo)
+ rx_request(spi);
+ else
+ goto completed;
+ } else {
+ goto completed;
+ }
+ }
+
+ return IRQ_HANDLED;
+
+completed:
+ queue_work(spi->wq, &spi->work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
+{
+ struct lantiq_ssc_spi *spi = data;
+ u32 stat = lantiq_ssc_readl(spi, SPI_STAT);
+
+ if (!(stat & SPI_STAT_ERRORS))
+ return IRQ_NONE;
+
+ if (stat & SPI_STAT_RUE)
+ dev_err(spi->dev, "receive underflow error\n");
+ if (stat & SPI_STAT_TUE)
+ dev_err(spi->dev, "transmit underflow error\n");
+ if (stat & SPI_STAT_AE)
+ dev_err(spi->dev, "abort error\n");
+ if (stat & SPI_STAT_RE)
+ dev_err(spi->dev, "receive overflow error\n");
+ if (stat & SPI_STAT_TE)
+ dev_err(spi->dev, "transmit overflow error\n");
+ if (stat & SPI_STAT_ME)
+ dev_err(spi->dev, "mode error\n");
+
+ /* Clear error flags */
+ lantiq_ssc_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
+
+ /* set bad status so it can be retried */
+ if (spi->master->cur_msg)
+ spi->master->cur_msg->status = -EIO;
+ queue_work(spi->wq, &spi->work);
+
+ return IRQ_HANDLED;
+}
+
+static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
+ struct spi_transfer *t)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ spi->tx = t->tx_buf;
+ spi->rx = t->rx_buf;
+
+ if (t->tx_buf) {
+ spi->tx_todo = t->len;
+
+ /* initially fill TX FIFO */
+ tx_fifo_write(spi);
+ }
+
+ if (spi->rx) {
+ spi->rx_todo = t->len;
+
+ /* start shift clock in RX-only mode */
+ if (!spi->tx)
+ rx_request(spi);
+ }
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return t->len;
+}
+
+/*
+ * The driver only gets an interrupt when the FIFO is empty, but there
+ * is an additional shift register from which the data is written to
+ * the wire. We get the last interrupt when the controller starts to
+ * write the last word to the wire, not when it is finished. Do busy
+ * waiting till it finishes.
+ */
+static void lantiq_ssc_bussy_work(struct work_struct *work)
+{
+ struct lantiq_ssc_spi *spi;
+ unsigned long long timeout = 8LL * 1000LL;
+ unsigned long end;
+
+ spi = container_of(work, typeof(*spi), work);
+
+ do_div(timeout, spi->speed_hz);
+ timeout += timeout + 100; /* some tolerance */
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ u32 stat = lantiq_ssc_readl(spi, SPI_STAT);
+
+ if (!(stat & SPI_STAT_BSY)) {
+ spi_finalize_current_transfer(spi->master);
+ return;
+ }
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, end));
+
+ if (spi->master->cur_msg)
+ spi->master->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(spi->master);
+}
+
+static void lantiq_ssc_handle_err(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ /* flush FIFOs on timeout */
+ rx_fifo_flush(spi);
+ tx_fifo_flush(spi);
+}
+
+static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
+ unsigned int cs = spidev->chip_select;
+ u32 fgpo;
+
+ if (!!(spidev->mode & SPI_CS_HIGH) == enable)
+ fgpo = (1 << (cs - spi->base_cs));
+ else
+ fgpo = (1 << (cs - spi->base_cs + SPI_FGPO_SETOUTN_S));
+
+ lantiq_ssc_writel(spi, fgpo, SPI_FPGO);
+}
+
+static int lantiq_ssc_transfer_one(struct spi_master *master,
+ struct spi_device *spidev,
+ struct spi_transfer *t)
+{
+ struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+
+ hw_setup_transfer(spi, spidev, t);
+
+ return transfer_start(spi, spidev, t);
+}
+
+static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
+ .irnen_r = SPI_IRNEN_R_XWAY,
+ .irnen_t = SPI_IRNEN_T_XWAY,
+};
+
+static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
+ .irnen_r = SPI_IRNEN_R_XRX,
+ .irnen_t = SPI_IRNEN_T_XRX,
+};
+
+static const struct of_device_id lantiq_ssc_match[] = {
+ { .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
+ { .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
+ { .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
+
+static int lantiq_ssc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct resource *res;
+ struct lantiq_ssc_spi *spi;
+ const struct lantiq_ssc_hwcfg *hwcfg;
+ const struct of_device_id *match;
+ int err, rx_irq, tx_irq, err_irq;
+ u32 id, supports_dma, revision;
+ unsigned int num_cs;
+
+ match = of_match_device(lantiq_ssc_match, dev);
+ if (!match) {
+ dev_err(dev, "no device match\n");
+ return -EINVAL;
+ }
+ hwcfg = match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get resources\n");
+ return -ENXIO;
+ }
+
+ rx_irq = platform_get_irq_byname(pdev, SPI_RX_IRQ_NAME);
+ if (rx_irq < 0) {
+ dev_err(dev, "failed to get %s\n", SPI_RX_IRQ_NAME);
+ return -ENXIO;
+ }
+
+ tx_irq = platform_get_irq_byname(pdev, SPI_TX_IRQ_NAME);
+ if (tx_irq < 0) {
+ dev_err(dev, "failed to get %s\n", SPI_TX_IRQ_NAME);
+ return -ENXIO;
+ }
+
+ err_irq = platform_get_irq_byname(pdev, SPI_ERR_IRQ_NAME);
+ if (err_irq < 0) {
+ dev_err(dev, "failed to get %s\n", SPI_ERR_IRQ_NAME);
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
+ if (!master)
+ return -ENOMEM;
+
+ spi = spi_master_get_devdata(master);
+ spi->master = master;
+ spi->dev = dev;
+ spi->hwcfg = hwcfg;
+ platform_set_drvdata(pdev, spi);
+
+ spi->regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(spi->regbase)) {
+ err = PTR_ERR(spi->regbase);
+ goto err_master_put;
+ }
+
+ err = devm_request_irq(dev, rx_irq, lantiq_ssc_xmit_interrupt,
+ 0, SPI_RX_IRQ_NAME, spi);
+ if (err)
+ goto err_master_put;
+
+ err = devm_request_irq(dev, tx_irq, lantiq_ssc_xmit_interrupt,
+ 0, SPI_TX_IRQ_NAME, spi);
+ if (err)
+ goto err_master_put;
+
+ err = devm_request_irq(dev, err_irq, lantiq_ssc_err_interrupt,
+ 0, SPI_ERR_IRQ_NAME, spi);
+ if (err)
+ goto err_master_put;
+
+ spi->spi_clk = devm_clk_get(dev, "gate");
+ if (IS_ERR(spi->spi_clk)) {
+ err = PTR_ERR(spi->spi_clk);
+ goto err_master_put;
+ }
+ err = clk_prepare_enable(spi->spi_clk);
+ if (err)
+ goto err_master_put;
+
+ /*
+ * Use the old clk_get_fpi() function on Lantiq platform, till it
+ * supports common clk.
+ */
+#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
+ spi->fpi_clk = clk_get_fpi();
+#else
+ spi->fpi_clk = clk_get(dev, "freq");
+#endif
+ if (IS_ERR(spi->fpi_clk)) {
+ err = PTR_ERR(spi->fpi_clk);
+ goto err_clk_disable;
+ }
+
+ num_cs = 8;
+ of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+
+ spi->base_cs = 1;
+ of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
+
+ spin_lock_init(&spi->lock);
+ spi->bits_per_word = 8;
+ spi->speed_hz = 0;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->num_chipselect = num_cs;
+ master->setup = lantiq_ssc_setup;
+ master->set_cs = lantiq_ssc_set_cs;
+ master->handle_err = lantiq_ssc_handle_err;
+ master->prepare_message = lantiq_ssc_prepare_message;
+ master->unprepare_message = lantiq_ssc_unprepare_message;
+ master->transfer_one = lantiq_ssc_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
+ SPI_LOOP;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
+
+ spi->wq = alloc_ordered_workqueue(dev_name(dev), 0);
+ if (!spi->wq) {
+ err = -ENOMEM;
+ goto err_clk_put;
+ }
+ INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
+
+ id = lantiq_ssc_readl(spi, SPI_ID);
+ spi->tx_fifo_size = (id & SPI_ID_TXFS_M) >> SPI_ID_TXFS_S;
+ spi->rx_fifo_size = (id & SPI_ID_RXFS_M) >> SPI_ID_RXFS_S;
+ supports_dma = (id & SPI_ID_CFG_M) >> SPI_ID_CFG_S;
+ revision = id & SPI_ID_REV_M;
+
+ lantiq_ssc_hw_init(spi);
+
+ dev_info(dev,
+ "Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
+ revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
+
+ err = devm_spi_register_master(dev, master);
+ if (err) {
+ dev_err(dev, "failed to register spi_master\n");
+ goto err_wq_destroy;
+ }
+
+ return 0;
+
+err_wq_destroy:
+ destroy_workqueue(spi->wq);
+err_clk_put:
+ clk_put(spi->fpi_clk);
+err_clk_disable:
+ clk_disable_unprepare(spi->spi_clk);
+err_master_put:
+ spi_master_put(master);
+
+ return err;
+}
+
+static int lantiq_ssc_remove(struct platform_device *pdev)
+{
+ struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
+
+ lantiq_ssc_writel(spi, 0, SPI_IRNEN);
+ lantiq_ssc_writel(spi, 0, SPI_CLC);
+ rx_fifo_flush(spi);
+ tx_fifo_flush(spi);
+ hw_enter_config_mode(spi);
+
+ destroy_workqueue(spi->wq);
+ clk_disable_unprepare(spi->spi_clk);
+ clk_put(spi->fpi_clk);
+
+ return 0;
+}
+
+static struct platform_driver lantiq_ssc_driver = {
+ .probe = lantiq_ssc_probe,
+ .remove = lantiq_ssc_remove,
+ .driver = {
+ .name = "spi-lantiq-ssc",
+ .owner = THIS_MODULE,
+ .of_match_table = lantiq_ssc_match,
+ },
+};
+module_platform_driver(lantiq_ssc_driver);
+
+MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
+MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spi-lantiq-ssc");
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index c36002110c30..e8b59ce4dc3a 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -437,8 +437,9 @@ static int mpc52xx_spi_probe(struct platform_device *op)
ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
- ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
- GFP_KERNEL);
+ ms->gpio_cs = kmalloc_array(ms->gpio_cs_count,
+ sizeof(*ms->gpio_cs),
+ GFP_KERNEL);
if (!ms->gpio_cs) {
rc = -ENOMEM;
goto err_alloc_gpio;
@@ -448,8 +449,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
gpio_cs = of_get_gpio(op->dev.of_node, i);
if (gpio_cs < 0) {
dev_err(&op->dev,
- "could not parse the gpio field "
- "in oftree\n");
+ "could not parse the gpio field in oftree\n");
rc = -ENODEV;
goto err_gpio;
}
@@ -457,8 +457,8 @@ static int mpc52xx_spi_probe(struct platform_device *op)
rc = gpio_request(gpio_cs, dev_name(&op->dev));
if (rc) {
dev_err(&op->dev,
- "can't request spi cs gpio #%d "
- "on gpio line %d\n", i, gpio_cs);
+ "can't request spi cs gpio #%d on gpio line %d\n",
+ i, gpio_cs);
goto err_gpio;
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 899d7a8f0889..278867a31950 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -73,7 +73,7 @@
#define MTK_SPI_IDLE 0
#define MTK_SPI_PAUSED 1
-#define MTK_SPI_MAX_FIFO_SIZE 32
+#define MTK_SPI_MAX_FIFO_SIZE 32U
#define MTK_SPI_PACKET_SIZE 1024
struct mtk_spi_compatible {
@@ -333,7 +333,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
struct mtk_spi *mdata = spi_master_get_devdata(master);
mdata->cur_transfer = xfer;
- mdata->xfer_len = xfer->len;
+ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
mtk_spi_prepare_transfer(master, xfer);
mtk_spi_setup_packet(master);
@@ -410,7 +410,10 @@ static bool mtk_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
+ /* Buffers for DMA transactions must be 4-byte aligned */
+ return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
+ (unsigned long)xfer->tx_buf % 4 == 0 &&
+ (unsigned long)xfer->rx_buf % 4 == 0);
}
static int mtk_spi_setup(struct spi_device *spi)
@@ -451,7 +454,33 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
&reg_val, remainder);
}
}
- spi_finalize_current_transfer(master);
+
+ trans->len -= mdata->xfer_len;
+ if (!trans->len) {
+ spi_finalize_current_transfer(master);
+ return IRQ_HANDLED;
+ }
+
+ if (trans->tx_buf)
+ trans->tx_buf += mdata->xfer_len;
+ if (trans->rx_buf)
+ trans->rx_buf += mdata->xfer_len;
+
+ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, trans->len);
+ mtk_spi_setup_packet(master);
+
+ cnt = trans->len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG, trans->tx_buf, cnt);
+
+ remainder = trans->len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val, trans->tx_buf + (cnt * 4), remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
+
+ mtk_spi_enable_transfer(master);
+
return IRQ_HANDLED;
}
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index dd3d0a218d8b..967d94844b30 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -411,7 +411,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
if (num_gpios > 0) {
int i;
- hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL);
+ hw->gpios = kcalloc(num_gpios, sizeof(*hw->gpios), GFP_KERNEL);
if (!hw->gpios) {
ret = -ENOMEM;
goto free_master;
@@ -428,8 +428,9 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
/* Real CS - set the initial state. */
ret = gpio_request(gpio, np->name);
if (ret < 0) {
- dev_err(dev, "can't request gpio "
- "#%d: %d\n", i, ret);
+ dev_err(dev,
+ "can't request gpio #%d: %d\n",
+ i, ret);
goto free_gpios;
}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 58d2d48e16a5..869f188b02eb 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -41,6 +41,13 @@ struct pxa_spi_info {
static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
static struct dw_dma_slave byt_rx_param = { .src_id = 1 };
+static struct dw_dma_slave mrfld3_tx_param = { .dst_id = 15 };
+static struct dw_dma_slave mrfld3_rx_param = { .src_id = 14 };
+static struct dw_dma_slave mrfld5_tx_param = { .dst_id = 13 };
+static struct dw_dma_slave mrfld5_rx_param = { .src_id = 12 };
+static struct dw_dma_slave mrfld6_tx_param = { .dst_id = 11 };
+static struct dw_dma_slave mrfld6_rx_param = { .src_id = 10 };
+
static struct dw_dma_slave bsw0_tx_param = { .dst_id = 0 };
static struct dw_dma_slave bsw0_rx_param = { .src_id = 1 };
static struct dw_dma_slave bsw1_tx_param = { .dst_id = 6 };
@@ -93,22 +100,39 @@ static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
{
+ struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+ struct dw_dma_slave *tx, *rx;
+
switch (PCI_FUNC(dev->devfn)) {
case 0:
c->port_id = 3;
c->num_chipselect = 1;
+ c->tx_param = &mrfld3_tx_param;
+ c->rx_param = &mrfld3_rx_param;
break;
case 1:
c->port_id = 5;
c->num_chipselect = 4;
+ c->tx_param = &mrfld5_tx_param;
+ c->rx_param = &mrfld5_rx_param;
break;
case 2:
c->port_id = 6;
c->num_chipselect = 1;
+ c->tx_param = &mrfld6_tx_param;
+ c->rx_param = &mrfld6_rx_param;
break;
default:
return -ENODEV;
}
+
+ tx = c->tx_param;
+ tx->dma_dev = &dma_dev->dev;
+
+ rx = c->rx_param;
+ rx->dma_dev = &dma_dev->dev;
+
+ c->dma_filter = lpss_dma_filter;
return 0;
}
@@ -203,10 +227,16 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
- ssp->irq = dev->irq;
ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
ssp->type = c->type;
+ pci_set_master(dev);
+
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+ ssp->irq = pci_irq_vector(dev, 0);
+
snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL, 0,
c->max_clk_rate);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dd7b5b47291d..47b65d7c4072 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -732,6 +732,20 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
return IRQ_HANDLED;
}
+static void handle_bad_msg(struct driver_data *drv_data)
+{
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+
+ dev_err(&drv_data->pdev->dev,
+ "bad message state in interrupt handler\n");
+}
+
static irqreturn_t ssp_int(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
@@ -771,21 +785,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
if (!(status & mask))
return IRQ_NONE;
- if (!drv_data->master->cur_msg) {
-
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0)
- & ~SSCR0_SSE);
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1)
- & ~drv_data->int_cr1);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
- write_SSSR_CS(drv_data, drv_data->clear_sr);
-
- dev_err(&drv_data->pdev->dev,
- "bad message state in interrupt handler\n");
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+ if (!drv_data->master->cur_msg) {
+ handle_bad_msg(drv_data);
/* Never fail */
return IRQ_HANDLED;
}
@@ -1458,6 +1462,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
+ /* GLK */
+ { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
@@ -1690,6 +1698,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
+ break;
default:
tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 0f89c2169c24..acf31f36b898 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -17,6 +17,7 @@
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/pm_runtime.h>
@@ -843,6 +844,8 @@ static int rockchip_spi_suspend(struct device *dev)
clk_disable_unprepare(rs->apb_pclk);
}
+ pinctrl_pm_select_sleep_state(dev);
+
return ret;
}
@@ -852,6 +855,8 @@ static int rockchip_spi_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct rockchip_spi *rs = spi_master_get_devdata(master);
+ pinctrl_pm_select_default_state(dev);
+
if (!pm_runtime_suspended(dev)) {
ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0)
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 9daf50031737..2a10b3f94ff7 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -808,7 +808,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
for (i = 0; i < len; i++)
rspi_write_data(rspi, *tx++);
} else {
- ret = rspi_pio_transfer(rspi, tx, NULL, n);
+ ret = rspi_pio_transfer(rspi, tx, NULL, len);
if (ret < 0)
return ret;
}
@@ -845,10 +845,9 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
for (i = 0; i < len; i++)
*rx++ = rspi_read_data(rspi);
} else {
- ret = rspi_pio_transfer(rspi, NULL, rx, n);
+ ret = rspi_pio_transfer(rspi, NULL, rx, len);
if (ret < 0)
return ret;
- *rx++ = ret;
}
n -= len;
}
@@ -1227,10 +1226,8 @@ static int rspi_probe(struct platform_device *pdev)
const struct spi_ops *ops;
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
- if (master == NULL) {
- dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ if (master == NULL)
return -ENOMEM;
- }
of_id = of_match_device(rspi_of_match, &pdev->dev);
if (of_id) {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 28dfdce4beae..b392cca8fa4f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -341,43 +341,16 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
- struct device *dev = &sdd->pdev->dev;
if (is_polling(sdd))
return 0;
- /* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx");
- if (!sdd->rx_dma.ch) {
- dev_err(dev, "Failed to get RX DMA channel\n");
- return -EBUSY;
- }
spi->dma_rx = sdd->rx_dma.ch;
-
- sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx");
- if (!sdd->tx_dma.ch) {
- dev_err(dev, "Failed to get TX DMA channel\n");
- dma_release_channel(sdd->rx_dma.ch);
- return -EBUSY;
- }
spi->dma_tx = sdd->tx_dma.ch;
return 0;
}
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Free DMA channels */
- if (!is_polling(sdd)) {
- dma_release_channel(sdd->rx_dma.ch);
- dma_release_channel(sdd->tx_dma.ch);
- }
-
- return 0;
-}
-
static bool s3c64xx_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
@@ -996,7 +969,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
sci->num_cs = temp;
}
- sci->no_cs = of_property_read_bool(dev->of_node, "broken-cs");
+ sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
return sci;
}
@@ -1094,7 +1067,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
- master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
@@ -1161,6 +1133,24 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
}
}
+ if (!is_polling(sdd)) {
+ /* Acquire DMA channels */
+ sdd->rx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
+ "rx");
+ if (IS_ERR(sdd->rx_dma.ch)) {
+ dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
+ ret = PTR_ERR(sdd->rx_dma.ch);
+ goto err_disable_io_clk;
+ }
+ sdd->tx_dma.ch = dma_request_slave_channel_reason(&pdev->dev,
+ "tx");
+ if (IS_ERR(sdd->tx_dma.ch)) {
+ dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
+ ret = PTR_ERR(sdd->tx_dma.ch);
+ goto err_release_rx_dma;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -1206,6 +1196,12 @@ err_pm_put:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
+ if (!is_polling(sdd))
+ dma_release_channel(sdd->tx_dma.ch);
+err_release_rx_dma:
+ if (!is_polling(sdd))
+ dma_release_channel(sdd->rx_dma.ch);
+err_disable_io_clk:
clk_disable_unprepare(sdd->ioclk);
err_disable_src_clk:
clk_disable_unprepare(sdd->src_clk);
@@ -1226,6 +1222,11 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
+ if (!is_polling(sdd)) {
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+ }
+
clk_disable_unprepare(sdd->ioclk);
clk_disable_unprepare(sdd->src_clk);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0012ad02e569..2ce15ca97782 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
};
static const struct of_device_id sh_msiof_match[] = {
- { .compatible = "renesas,sh-msiof", .data = &sh_data },
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
+ { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
+ { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
+ { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
@@ -1162,10 +1164,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
- if (master == NULL) {
- dev_err(&pdev->dev, "failed to allocate spi master\n");
+ if (master == NULL)
return -ENOMEM;
- }
p = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index ec6fb09e2e17..ad76a44fee6f 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -652,7 +652,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto free_master;
}
}
@@ -669,7 +670,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
- return irq;
+ ret = irq;
+ goto free_master;
}
mutex_init(&qspi->list_lock);
@@ -685,15 +687,17 @@ static int ti_qspi_probe(struct platform_device *pdev)
qspi->ctrl_base =
syscon_regmap_lookup_by_phandle(np,
"syscon-chipselects");
- if (IS_ERR(qspi->ctrl_base))
- return PTR_ERR(qspi->ctrl_base);
+ if (IS_ERR(qspi->ctrl_base)) {
+ ret = PTR_ERR(qspi->ctrl_base);
+ goto free_master;
+ }
ret = of_property_read_u32_index(np,
"syscon-chipselects",
1, &qspi->ctrl_reg);
if (ret) {
dev_err(&pdev->dev,
"couldn't get ctrl_mod reg index\n");
- return ret;
+ goto free_master;
}
}
@@ -714,9 +718,10 @@ static int ti_qspi_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, mask);
qspi->rx_chan = dma_request_chan_by_mask(&mask);
- if (!qspi->rx_chan) {
+ if (IS_ERR(qspi->rx_chan)) {
dev_err(qspi->dev,
"No Rx DMA available, trying mmap mode\n");
+ qspi->rx_chan = NULL;
ret = 0;
goto no_dma;
}
@@ -742,6 +747,7 @@ no_dma:
if (!ret)
return 0;
+ pm_runtime_disable(&pdev->dev);
free_master:
spi_master_put(master);
return ret;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index fcb991034c3d..97d137591b18 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -591,7 +591,6 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
if (!data->pkt_rx_buff) {
/* flush queue and set status of all transfers to -ENOMEM */
- dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -ENOMEM;
@@ -622,8 +621,9 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
if (n_writes > PCH_MAX_FIFO_DEPTH)
n_writes = PCH_MAX_FIFO_DEPTH;
- dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
- "0x2 to SSNXCR\n", __func__);
+ dev_dbg(&data->master->dev,
+ "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
+ __func__);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
for (j = 0; j < n_writes; j++)
@@ -915,7 +915,6 @@ static void pch_spi_release_dma(struct pch_spi_data *data)
dma_release_channel(dma->chan_rx);
dma->chan_rx = NULL;
}
- return;
}
static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
@@ -1008,7 +1007,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
spin_unlock_irqrestore(&data->lock, flags);
/* RX */
- dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_rx_p;
@@ -1068,7 +1067,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
head = 0;
}
- dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_tx_p;
@@ -1181,14 +1180,16 @@ static void pch_spi_process_messages(struct work_struct *pwork)
data->cur_trans =
list_entry(data->current_msg->transfers.next,
struct spi_transfer, transfer_list);
- dev_dbg(&data->master->dev, "%s "
- ":Getting 1st transfer message\n", __func__);
+ dev_dbg(&data->master->dev,
+ "%s :Getting 1st transfer message\n",
+ __func__);
} else {
data->cur_trans =
list_entry(data->cur_trans->transfer_list.next,
struct spi_transfer, transfer_list);
- dev_dbg(&data->master->dev, "%s "
- ":Getting next transfer message\n", __func__);
+ dev_dbg(&data->master->dev,
+ "%s :Getting next transfer message\n",
+ __func__);
}
spin_unlock(&data->lock);
@@ -1233,9 +1234,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
/* check for delay */
if (data->cur_trans->delay_usecs) {
- dev_dbg(&data->master->dev, "%s:"
- "delay in usec=%d\n", __func__,
- data->cur_trans->delay_usecs);
+ dev_dbg(&data->master->dev, "%s:delay in usec=%d\n",
+ __func__, data->cur_trans->delay_usecs);
udelay(data->cur_trans->delay_usecs);
}
@@ -1292,7 +1292,6 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
if (dma->rx_buf_dma)
dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
dma->rx_buf_virt, dma->rx_buf_dma);
- return;
}
static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
@@ -1541,11 +1540,11 @@ static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int i;
struct pch_pd_dev_save *pd_dev_save;
- pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
+ pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
if (!pd_dev_save)
return -ENOMEM;
- board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
+ board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
if (!board_dat) {
retval = -ENOMEM;
goto err_no_mem;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 656dd3e3220c..44222ef9471e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -621,8 +621,10 @@ void spi_unregister_device(struct spi_device *spi)
if (!spi)
return;
- if (spi->dev.of_node)
+ if (spi->dev.of_node) {
of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
+ of_node_put(spi->dev.of_node);
+ }
if (ACPI_COMPANION(&spi->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_unregister(&spi->dev);
@@ -672,7 +674,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
if (!n)
return -EINVAL;
- bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
+ bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
if (!bi)
return -ENOMEM;
@@ -805,12 +807,12 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
if (master->dma_tx)
tx_dev = master->dma_tx->device->dev;
else
- tx_dev = &master->dev;
+ tx_dev = master->dev.parent;
if (master->dma_rx)
rx_dev = master->dma_rx->device->dev;
else
- rx_dev = &master->dev;
+ rx_dev = master->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (!master->can_dma(master, msg->spi, xfer))
@@ -852,12 +854,12 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
if (master->dma_tx)
tx_dev = master->dma_tx->device->dev;
else
- tx_dev = &master->dev;
+ tx_dev = master->dev.parent;
if (master->dma_rx)
rx_dev = master->dma_rx->device->dev;
else
- rx_dev = &master->dev;
+ rx_dev = master->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (!master->can_dma(master, msg->spi, xfer))
@@ -1502,37 +1504,18 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
-static struct spi_device *
-of_register_spi_device(struct spi_master *master, struct device_node *nc)
+static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
+ struct device_node *nc)
{
- struct spi_device *spi;
- int rc;
u32 value;
-
- /* Alloc an spi_device */
- spi = spi_alloc_device(master);
- if (!spi) {
- dev_err(&master->dev, "spi_device alloc error for %s\n",
- nc->full_name);
- rc = -ENOMEM;
- goto err_out;
- }
-
- /* Select device driver */
- rc = of_modalias_node(nc, spi->modalias,
- sizeof(spi->modalias));
- if (rc < 0) {
- dev_err(&master->dev, "cannot find modalias for %s\n",
- nc->full_name);
- goto err_out;
- }
+ int rc;
/* Device address */
rc = of_property_read_u32(nc, "reg", &value);
if (rc) {
dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
nc->full_name, rc);
- goto err_out;
+ return rc;
}
spi->chip_select = value;
@@ -1590,10 +1573,41 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
if (rc) {
dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
nc->full_name, rc);
- goto err_out;
+ return rc;
}
spi->max_speed_hz = value;
+ return 0;
+}
+
+static struct spi_device *
+of_register_spi_device(struct spi_master *master, struct device_node *nc)
+{
+ struct spi_device *spi;
+ int rc;
+
+ /* Alloc an spi_device */
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "spi_device alloc error for %s\n",
+ nc->full_name);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Select device driver */
+ rc = of_modalias_node(nc, spi->modalias,
+ sizeof(spi->modalias));
+ if (rc < 0) {
+ dev_err(&master->dev, "cannot find modalias for %s\n",
+ nc->full_name);
+ goto err_out;
+ }
+
+ rc = of_spi_parse_dt(master, spi, nc);
+ if (rc)
+ goto err_out;
+
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
@@ -1603,11 +1617,13 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
nc->full_name);
- goto err_out;
+ goto err_of_node_put;
}
return spi;
+err_of_node_put:
+ of_node_put(nc);
err_out:
spi_dev_put(spi);
return ERR_PTR(rc);
@@ -1722,13 +1738,15 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
return AE_OK;
}
+ acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
+ sizeof(spi->modalias));
+
if (spi->irq < 0)
spi->irq = acpi_dev_gpio_irq_get(adev, 0);
acpi_device_set_enumerated(adev);
adev->power.flags.ignore_parent = true;
- strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
if (spi_add_device(spi)) {
adev->power.flags.ignore_parent = false;
dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index b653451843c8..937c2d5d7ec3 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1300,7 +1300,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16s %16u %16zu %d %d\n",
buffer->task_comm, buffer->pid,
buffer->size, buffer->kmap_cnt,
- atomic_read(&buffer->ref.refcount));
+ kref_read(&buffer->ref));
total_orphaned_size += buffer->size;
}
}
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index c7d7682b1412..1e1df89b5018 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -188,7 +188,7 @@ bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
{
struct comedi_buf_map *bm = s->async->buf_map;
- return bm && (atomic_read(&bm->refcount.refcount) > 1);
+ return bm && (kref_read(&bm->refcount) > 1);
}
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 250caa00de5e..51384bdde450 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -474,17 +474,20 @@ static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
}
-static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- unsigned debounce)
+static int gb_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
- u16 usec;
+ u32 debounce;
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
if (debounce > U16_MAX)
return -EINVAL;
- usec = (u16)debounce;
- return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
+ return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
}
static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
@@ -689,7 +692,7 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
gpio->direction_output = gb_gpio_direction_output;
gpio->get = gb_gpio_get;
gpio->set = gb_gpio_set;
- gpio->set_debounce = gb_gpio_set_debounce;
+ gpio->set_config = gb_gpio_set_config;
gpio->to_irq = gb_gpio_to_irq;
gpio->base = -1; /* Allocate base dynamically */
gpio->ngpio = ggc->line_max + 1;
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
index 113f3d6c4b3a..27f75b17679b 100644
--- a/drivers/staging/greybus/timesync_platform.c
+++ b/drivers/staging/greybus/timesync_platform.c
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
{
+ if (!arche_platform_change_state_cb)
+ return 0;
+
return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
pdata);
}
void gb_timesync_platform_unlock_bus(void)
{
+ if (!arche_platform_change_state_cb)
+ return;
+
arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
}
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 1fbd495e5e63..c7652c35be19 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -461,7 +461,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->protocol = eth_type_trans(skb, skb->dev);
priv->nstats.rx_packets++;
priv->nstats.rx_bytes += rx_ind_size;
- skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
priv->nstats.rx_dropped++;
@@ -494,7 +493,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->protocol = eth_type_trans(skb, skb->dev);
priv->nstats.rx_packets++;
priv->nstats.rx_bytes += rx_ind_size;
- skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
priv->nstats.rx_dropped++;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 39a72e3f0c18..7035356e56b3 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -107,7 +107,7 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
libcfs_debug_dumplog();
if (libcfs_panic_on_lbug)
panic("LBUG");
- set_task_state(current, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
while (1)
schedule();
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index ee01f20d8b11..9afa6bec3e6f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -390,15 +390,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = VM_FAULT_LOCKED;
break;
case -ENODATA:
+ case -EAGAIN:
case -EFAULT:
result = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
result = VM_FAULT_OOM;
break;
- case -EAGAIN:
- result = VM_FAULT_RETRY;
- break;
default:
result = VM_FAULT_SIGBUS;
break;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index c27d7e9a1bdb..8b2117ee0f60 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -129,7 +129,7 @@ __vpfe_video_get_format(struct vpfe_video_device *video,
/* make a note of pipeline details */
static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
{
- struct media_entity_graph graph;
+ struct media_graph graph;
struct media_entity *entity = &video->video_dev.entity;
struct media_device *mdev = entity->graph_obj.mdev;
struct vpfe_pipeline *pipe = &video->pipe;
@@ -145,13 +145,13 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
pipe->outputs[pipe->output_num++] = video;
mutex_lock(&mdev->graph_mutex);
- ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
+ ret = media_graph_walk_init(&graph, mdev);
if (ret) {
mutex_unlock(&mdev->graph_mutex);
return -ENOMEM;
}
- media_entity_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph))) {
+ media_graph_walk_start(&graph, entity);
+ while ((entity = media_graph_walk_next(&graph))) {
if (entity == &video->video_dev.entity)
continue;
if (!is_media_entity_v4l2_video_device(entity))
@@ -162,7 +162,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
else
pipe->outputs[pipe->output_num++] = far_end;
}
- media_entity_graph_walk_cleanup(&graph);
+ media_graph_walk_cleanup(&graph);
mutex_unlock(&mdev->graph_mutex);
return 0;
@@ -300,12 +300,11 @@ static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe)
mdev = entity->graph_obj.mdev;
mutex_lock(&mdev->graph_mutex);
- ret = media_entity_graph_walk_init(&pipe->graph,
- entity->graph_obj.mdev);
+ ret = media_graph_walk_init(&pipe->graph, mdev);
if (ret)
goto out;
- media_entity_graph_walk_start(&pipe->graph, entity);
- while ((entity = media_entity_graph_walk_next(&pipe->graph))) {
+ media_graph_walk_start(&pipe->graph, entity);
+ while ((entity = media_graph_walk_next(&pipe->graph))) {
if (!is_media_entity_v4l2_subdev(entity))
continue;
@@ -316,7 +315,7 @@ static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe)
}
out:
if (ret)
- media_entity_graph_walk_cleanup(&pipe->graph);
+ media_graph_walk_cleanup(&pipe->graph);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
@@ -346,9 +345,9 @@ static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
mdev = entity->graph_obj.mdev;
mutex_lock(&mdev->graph_mutex);
- media_entity_graph_walk_start(&pipe->graph, entity);
+ media_graph_walk_start(&pipe->graph, entity);
- while ((entity = media_entity_graph_walk_next(&pipe->graph))) {
+ while ((entity = media_graph_walk_next(&pipe->graph))) {
if (!is_media_entity_v4l2_subdev(entity))
continue;
@@ -359,7 +358,7 @@ static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
}
mutex_unlock(&mdev->graph_mutex);
- media_entity_graph_walk_cleanup(&pipe->graph);
+ media_graph_walk_cleanup(&pipe->graph);
return ret ? -ETIMEDOUT : 0;
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index aaec4403df3b..22136d3dadcb 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -52,7 +52,7 @@ enum vpfe_video_state {
struct vpfe_pipeline {
/* media pipeline */
struct media_pipeline *pipe;
- struct media_entity_graph graph;
+ struct media_graph graph;
/* state of the pipeline, continuous,
* single-shot or stopped
*/
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index 25b7e7ccf554..bc67da254262 100644
--- a/drivers/staging/media/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
@@ -12,26 +12,6 @@ menuconfig LIRC_STAGING
if LIRC_STAGING
-config LIRC_BT829
- tristate "BT829 based hardware"
- depends on LIRC && PCI
- help
- Driver for the IR interface on BT829-based hardware
-
-config LIRC_IMON
- tristate "Legacy SoundGraph iMON Receiver and Display"
- depends on LIRC && USB
- help
- Driver for the original SoundGraph iMON IR Receiver and Display
-
- Current generation iMON devices use the input layer imon driver.
-
-config LIRC_PARALLEL
- tristate "Homebrew Parallel Port Receiver"
- depends on LIRC && PARPORT
- help
- Driver for Homebrew Parallel Port Receivers
-
config LIRC_SASEM
tristate "Sasem USB IR Remote"
depends on LIRC && USB
@@ -40,7 +20,7 @@ config LIRC_SASEM
config LIRC_SIR
tristate "Built-in SIR IrDA port"
- depends on LIRC
+ depends on RC_CORE
help
Driver for the SIR IrDA port
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index 7f919eab1989..28740c94349c 100644
--- a/drivers/staging/media/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
@@ -3,9 +3,6 @@
# Each configuration option enables a list of files.
-obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o
-obj-$(CONFIG_LIRC_IMON) += lirc_imon.o
-obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o
obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
obj-$(CONFIG_LIRC_SIR) += lirc_sir.o
obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
deleted file mode 100644
index 04d881b391c7..000000000000
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Remote control driver for the TV-card based on bt829
- *
- * by Leonid Froenchenko <lfroen@galileo.co.il>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/threads.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include <media/lirc_dev.h>
-
-static int poll_main(void);
-static int atir_init_start(void);
-
-static void write_index(unsigned char index, unsigned int value);
-static unsigned int read_index(unsigned char index);
-
-static void do_i2c_start(void);
-static void do_i2c_stop(void);
-
-static void seems_wr_byte(unsigned char al);
-static unsigned char seems_rd_byte(void);
-
-static unsigned int read_index(unsigned char al);
-static void write_index(unsigned char ah, unsigned int edx);
-
-static void cycle_delay(int cycle);
-
-static void do_set_bits(unsigned char bl);
-static unsigned char do_get_bits(void);
-
-#define DATA_PCI_OFF 0x7FFC00
-#define WAIT_CYCLE 20
-
-#define DRIVER_NAME "lirc_bt829"
-
-static bool debug;
-
-static int atir_minor;
-static phys_addr_t pci_addr_phys;
-static unsigned char __iomem *pci_addr_lin;
-
-static struct lirc_driver atir_driver;
-
-static struct pci_dev *do_pci_probe(void)
-{
- struct pci_dev *my_dev;
-
- my_dev = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_264VT, NULL);
- if (my_dev) {
- pr_err("Using device: %s\n", pci_name(my_dev));
- pci_addr_phys = 0;
- if (my_dev->resource[0].flags & IORESOURCE_MEM) {
- pci_addr_phys = my_dev->resource[0].start;
- pr_info("memory at %pa\n", &pci_addr_phys);
- }
- if (pci_addr_phys == 0) {
- pr_err("no memory resource ?\n");
- pci_dev_put(my_dev);
- return NULL;
- }
- } else {
- pr_err("pci_probe failed\n");
- return NULL;
- }
- return my_dev;
-}
-
-static int atir_add_to_buf(void *data, struct lirc_buffer *buf)
-{
- unsigned char key;
- int status;
-
- status = poll_main();
- key = (status >> 8) & 0xFF;
- if (status & 0xFF) {
- dev_dbg(atir_driver.dev, "reading key %02X\n", key);
- lirc_buffer_write(buf, &key);
- return 0;
- }
- return -ENODATA;
-}
-
-static int atir_set_use_inc(void *data)
-{
- dev_dbg(atir_driver.dev, "driver is opened\n");
- return 0;
-}
-
-static void atir_set_use_dec(void *data)
-{
- dev_dbg(atir_driver.dev, "driver is closed\n");
-}
-
-int init_module(void)
-{
- struct pci_dev *pdev;
- int rc;
-
- pdev = do_pci_probe();
- if (!pdev)
- return -ENODEV;
-
- rc = pci_enable_device(pdev);
- if (rc)
- goto err_put_dev;
-
- if (!atir_init_start()) {
- rc = -ENODEV;
- goto err_disable;
- }
-
- strcpy(atir_driver.name, "ATIR");
- atir_driver.minor = -1;
- atir_driver.code_length = 8;
- atir_driver.sample_rate = 10;
- atir_driver.data = NULL;
- atir_driver.add_to_buf = atir_add_to_buf;
- atir_driver.set_use_inc = atir_set_use_inc;
- atir_driver.set_use_dec = atir_set_use_dec;
- atir_driver.dev = &pdev->dev;
- atir_driver.owner = THIS_MODULE;
-
- atir_minor = lirc_register_driver(&atir_driver);
- if (atir_minor < 0) {
- pr_err("failed to register driver!\n");
- rc = atir_minor;
- goto err_unmap;
- }
- dev_dbg(atir_driver.dev, "driver is registered on minor %d\n",
- atir_minor);
-
- return 0;
-
-err_unmap:
- iounmap(pci_addr_lin);
-err_disable:
- pci_disable_device(pdev);
-err_put_dev:
- pci_dev_put(pdev);
- return rc;
-}
-
-void cleanup_module(void)
-{
- struct pci_dev *pdev = to_pci_dev(atir_driver.dev);
-
- lirc_unregister_driver(atir_minor);
- iounmap(pci_addr_lin);
- pci_disable_device(pdev);
- pci_dev_put(pdev);
-}
-
-static int atir_init_start(void)
-{
- pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400);
- if (!pci_addr_lin) {
- pr_info("pci mem must be mapped\n");
- return 0;
- }
- return 1;
-}
-
-static void cycle_delay(int cycle)
-{
- udelay(WAIT_CYCLE * cycle);
-}
-
-static int poll_main(void)
-{
- unsigned char status_high, status_low;
-
- do_i2c_start();
-
- seems_wr_byte(0xAA);
- seems_wr_byte(0x01);
-
- do_i2c_start();
-
- seems_wr_byte(0xAB);
-
- status_low = seems_rd_byte();
- status_high = seems_rd_byte();
-
- do_i2c_stop();
-
- return (status_high << 8) | status_low;
-}
-
-static void do_i2c_start(void)
-{
- do_set_bits(3);
- cycle_delay(4);
-
- do_set_bits(1);
- cycle_delay(7);
-
- do_set_bits(0);
- cycle_delay(2);
-}
-
-static void do_i2c_stop(void)
-{
- unsigned char bits;
-
- bits = do_get_bits() & 0xFD;
- do_set_bits(bits);
- cycle_delay(1);
-
- bits |= 1;
- do_set_bits(bits);
- cycle_delay(2);
-
- bits |= 2;
- do_set_bits(bits);
- bits = 3;
- do_set_bits(bits);
- cycle_delay(2);
-}
-
-static void seems_wr_byte(unsigned char value)
-{
- int i;
- unsigned char reg;
-
- reg = do_get_bits();
- for (i = 0; i < 8; i++) {
- if (value & 0x80)
- reg |= 0x02;
- else
- reg &= 0xFD;
-
- do_set_bits(reg);
- cycle_delay(1);
-
- reg |= 1;
- do_set_bits(reg);
- cycle_delay(1);
-
- reg &= 0xFE;
- do_set_bits(reg);
- cycle_delay(1);
- value <<= 1;
- }
- cycle_delay(2);
-
- reg |= 2;
- do_set_bits(reg);
-
- reg |= 1;
- do_set_bits(reg);
-
- cycle_delay(1);
- do_get_bits();
-
- reg &= 0xFE;
- do_set_bits(reg);
- cycle_delay(3);
-}
-
-static unsigned char seems_rd_byte(void)
-{
- int i;
- int rd_byte;
- unsigned char bits_2, bits_1;
-
- bits_1 = do_get_bits() | 2;
- do_set_bits(bits_1);
-
- rd_byte = 0;
- for (i = 0; i < 8; i++) {
- bits_1 &= 0xFE;
- do_set_bits(bits_1);
- cycle_delay(2);
-
- bits_1 |= 1;
- do_set_bits(bits_1);
- cycle_delay(1);
-
- bits_2 = do_get_bits();
- if (bits_2 & 2)
- rd_byte |= 1;
-
- rd_byte <<= 1;
- }
-
- bits_1 = 0;
- if (bits_2 == 0)
- bits_1 |= 2;
-
- do_set_bits(bits_1);
- cycle_delay(2);
-
- bits_1 |= 1;
- do_set_bits(bits_1);
- cycle_delay(3);
-
- bits_1 &= 0xFE;
- do_set_bits(bits_1);
- cycle_delay(2);
-
- rd_byte >>= 1;
- rd_byte &= 0xFF;
- return rd_byte;
-}
-
-static void do_set_bits(unsigned char new_bits)
-{
- int reg_val;
-
- reg_val = read_index(0x34);
- if (new_bits & 2) {
- reg_val &= 0xFFFFFFDF;
- reg_val |= 1;
- } else {
- reg_val &= 0xFFFFFFFE;
- reg_val |= 0x20;
- }
- reg_val |= 0x10;
- write_index(0x34, reg_val);
-
- reg_val = read_index(0x31);
- if (new_bits & 1)
- reg_val |= 0x1000000;
- else
- reg_val &= 0xFEFFFFFF;
-
- reg_val |= 0x8000000;
- write_index(0x31, reg_val);
-}
-
-static unsigned char do_get_bits(void)
-{
- unsigned char bits;
- int reg_val;
-
- reg_val = read_index(0x34);
- reg_val |= 0x10;
- reg_val &= 0xFFFFFFDF;
- write_index(0x34, reg_val);
-
- reg_val = read_index(0x34);
- bits = 0;
- if (reg_val & 8)
- bits |= 2;
- else
- bits &= 0xFD;
-
- reg_val = read_index(0x31);
- if (reg_val & 0x1000000)
- bits |= 1;
- else
- bits &= 0xFE;
-
- return bits;
-}
-
-static unsigned int read_index(unsigned char index)
-{
- unsigned char __iomem *addr;
- /* addr = pci_addr_lin + DATA_PCI_OFF + ((index & 0xFF) << 2); */
- addr = pci_addr_lin + ((index & 0xFF) << 2);
- return readl(addr);
-}
-
-static void write_index(unsigned char index, unsigned int reg_val)
-{
- unsigned char __iomem *addr;
-
- addr = pci_addr_lin + ((index & 0xFF) << 2);
- writel(reg_val, addr);
-}
-
-MODULE_AUTHOR("Froenchenko Leonid");
-MODULE_DESCRIPTION("IR remote driver for bt829 based TV cards");
-MODULE_LICENSE("GPL");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
deleted file mode 100644
index 1e650fba4a92..000000000000
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ /dev/null
@@ -1,979 +0,0 @@
-/*
- * lirc_imon.c: LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD
- * including the iMON PAD model
- *
- * Copyright(C) 2004 Venky Raju(dev@venky.ws)
- * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com>
- *
- * lirc_imon is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#define MOD_AUTHOR "Venky Raju <dev@venky.ws>"
-#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
-#define MOD_NAME "lirc_imon"
-#define MOD_VERSION "0.8"
-
-#define DISPLAY_MINOR_BASE 144
-#define DEVICE_NAME "lcd%d"
-
-#define BUF_CHUNK_SIZE 4
-#define BUF_SIZE 128
-
-#define BIT_DURATION 250 /* each bit received is 250us */
-
-/*** P R O T O T Y P E S ***/
-
-/* USB Callback prototypes */
-static int imon_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void imon_disconnect(struct usb_interface *interface);
-static void usb_rx_callback(struct urb *urb);
-static void usb_tx_callback(struct urb *urb);
-
-/* suspend/resume support */
-static int imon_resume(struct usb_interface *intf);
-static int imon_suspend(struct usb_interface *intf, pm_message_t message);
-
-/* Display file_operations function prototypes */
-static int display_open(struct inode *inode, struct file *file);
-static int display_close(struct inode *inode, struct file *file);
-
-/* VFD write operation */
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos);
-
-/* LIRC driver function prototypes */
-static int ir_open(void *data);
-static void ir_close(void *data);
-
-/*** G L O B A L S ***/
-#define IMON_DATA_BUF_SZ 35
-
-struct imon_context {
- struct usb_device *usbdev;
- /* Newer devices have two interfaces */
- int display; /* not all controllers do */
- int display_isopen; /* display port has been opened */
- int ir_isopen; /* IR port open */
- int dev_present; /* USB device presence */
- struct mutex ctx_lock; /* to lock this object */
- wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
-
- int vfd_proto_6p; /* some VFD require a 6th packet */
-
- struct lirc_driver *driver;
- struct usb_endpoint_descriptor *rx_endpoint;
- struct usb_endpoint_descriptor *tx_endpoint;
- struct urb *rx_urb;
- struct urb *tx_urb;
- unsigned char usb_rx_buf[8];
- unsigned char usb_tx_buf[8];
-
- struct rx_data {
- int count; /* length of 0 or 1 sequence */
- int prev_bit; /* logic level of sequence */
- int initial_space; /* initial space flag */
- } rx;
-
- struct tx_t {
- unsigned char data_buf[IMON_DATA_BUF_SZ]; /* user data buffer */
- struct completion finished; /* wait for write to finish */
- atomic_t busy; /* write in progress */
- int status; /* status of tx completion */
- } tx;
-};
-
-static const struct file_operations display_fops = {
- .owner = THIS_MODULE,
- .open = &display_open,
- .write = &vfd_write,
- .release = &display_close,
- .llseek = noop_llseek,
-};
-
-/*
- * USB Device ID for iMON USB Control Boards
- *
- * The Windows drivers contain 6 different inf files, more or less one for
- * each new device until the 0x0034-0x0046 devices, which all use the same
- * driver. Some of the devices in the 34-46 range haven't been definitively
- * identified yet. Early devices have either a TriGem Computer, Inc. or a
- * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
- * devices use the SoundGraph vendor ID (0x15c2).
- */
-static struct usb_device_id imon_usb_id_table[] = {
- /* TriGem iMON (IR only) -- TG_iMON.inf */
- { USB_DEVICE(0x0aa8, 0x8001) },
-
- /* SoundGraph iMON (IR only) -- sg_imon.inf */
- { USB_DEVICE(0x04e8, 0xff30) },
-
- /* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */
- { USB_DEVICE(0x0aa8, 0xffda) },
-
- /* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */
- { USB_DEVICE(0x15c2, 0xffda) },
-
- {}
-};
-
-/* Some iMON VFD models requires a 6th packet for VFD writes */
-static struct usb_device_id vfd_proto_6p_list[] = {
- { USB_DEVICE(0x15c2, 0xffda) },
- {}
-};
-
-/* Some iMON devices have no lcd/vfd, don't set one up */
-static struct usb_device_id ir_only_list[] = {
- { USB_DEVICE(0x0aa8, 0x8001) },
- { USB_DEVICE(0x04e8, 0xff30) },
- {}
-};
-
-/* USB Device data */
-static struct usb_driver imon_driver = {
- .name = MOD_NAME,
- .probe = imon_probe,
- .disconnect = imon_disconnect,
- .suspend = imon_suspend,
- .resume = imon_resume,
- .id_table = imon_usb_id_table,
-};
-
-static struct usb_class_driver imon_class = {
- .name = DEVICE_NAME,
- .fops = &display_fops,
- .minor_base = DISPLAY_MINOR_BASE,
-};
-
-/* to prevent races between open() and disconnect(), probing, etc */
-static DEFINE_MUTEX(driver_lock);
-
-static int debug;
-
-/*** M O D U L E C O D E ***/
-
-MODULE_AUTHOR(MOD_AUTHOR);
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_VERSION(MOD_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
-
-static void free_imon_context(struct imon_context *context)
-{
- struct device *dev = context->driver->dev;
-
- usb_free_urb(context->tx_urb);
- usb_free_urb(context->rx_urb);
- lirc_buffer_free(context->driver->rbuf);
- kfree(context->driver->rbuf);
- kfree(context->driver);
- kfree(context);
-
- dev_dbg(dev, "%s: iMON context freed\n", __func__);
-}
-
-static void deregister_from_lirc(struct imon_context *context)
-{
- int retval;
- int minor = context->driver->minor;
-
- retval = lirc_unregister_driver(minor);
- if (retval)
- dev_err(&context->usbdev->dev,
- "unable to deregister from lirc(%d)", retval);
- else
- dev_info(&context->usbdev->dev,
- "Deregistered iMON driver (minor:%d)\n", minor);
-}
-
-/**
- * Called when the Display device (e.g. /dev/lcd0)
- * is opened by the application.
- */
-static int display_open(struct inode *inode, struct file *file)
-{
- struct usb_interface *interface;
- struct imon_context *context = NULL;
- int subminor;
- int retval = 0;
-
- /* prevent races with disconnect */
- mutex_lock(&driver_lock);
-
- subminor = iminor(inode);
- interface = usb_find_interface(&imon_driver, subminor);
- if (!interface) {
- pr_err("%s: could not find interface for minor %d\n",
- __func__, subminor);
- retval = -ENODEV;
- goto exit;
- }
- context = usb_get_intfdata(interface);
-
- if (!context) {
- dev_err(&interface->dev, "no context found for minor %d\n",
- subminor);
- retval = -ENODEV;
- goto exit;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->display) {
- dev_err(&interface->dev,
- "%s: display not supported by device\n", __func__);
- retval = -ENODEV;
- } else if (context->display_isopen) {
- dev_err(&interface->dev,
- "%s: display port is already open\n", __func__);
- retval = -EBUSY;
- } else {
- context->display_isopen = 1;
- file->private_data = context;
- dev_info(context->driver->dev, "display port opened\n");
- }
-
- mutex_unlock(&context->ctx_lock);
-
-exit:
- mutex_unlock(&driver_lock);
- return retval;
-}
-
-/**
- * Called when the display device (e.g. /dev/lcd0)
- * is closed by the application.
- */
-static int display_close(struct inode *inode, struct file *file)
-{
- struct imon_context *context = NULL;
- int retval = 0;
-
- context = file->private_data;
-
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->display) {
- dev_err(&context->usbdev->dev,
- "%s: display not supported by device\n", __func__);
- retval = -ENODEV;
- } else if (!context->display_isopen) {
- dev_err(&context->usbdev->dev,
- "%s: display is not open\n", __func__);
- retval = -EIO;
- } else {
- context->display_isopen = 0;
- dev_info(context->driver->dev, "display port closed\n");
- if (!context->dev_present && !context->ir_isopen) {
- /*
- * Device disconnected before close and IR port is not
- * open. If IR port is open, context will be deleted by
- * ir_close.
- */
- mutex_unlock(&context->ctx_lock);
- free_imon_context(context);
- return retval;
- }
- }
-
- mutex_unlock(&context->ctx_lock);
- return retval;
-}
-
-/**
- * Sends a packet to the device -- this function must be called
- * with context->ctx_lock held.
- */
-static int send_packet(struct imon_context *context)
-{
- unsigned int pipe;
- int interval = 0;
- int retval = 0;
-
- /* Check if we need to use control or interrupt urb */
- pipe = usb_sndintpipe(context->usbdev,
- context->tx_endpoint->bEndpointAddress);
- interval = context->tx_endpoint->bInterval;
-
- usb_fill_int_urb(context->tx_urb, context->usbdev, pipe,
- context->usb_tx_buf,
- sizeof(context->usb_tx_buf),
- usb_tx_callback, context, interval);
-
- context->tx_urb->actual_length = 0;
-
- reinit_completion(&context->tx.finished);
- atomic_set(&context->tx.busy, 1);
-
- retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
- if (retval) {
- atomic_set(&context->tx.busy, 0);
- dev_err(&context->usbdev->dev, "error submitting urb(%d)\n",
- retval);
- } else {
- /* Wait for transmission to complete (or abort) */
- mutex_unlock(&context->ctx_lock);
- retval = wait_for_completion_interruptible(
- &context->tx.finished);
- if (retval)
- dev_err(&context->usbdev->dev,
- "%s: task interrupted\n", __func__);
- mutex_lock(&context->ctx_lock);
-
- retval = context->tx.status;
- if (retval)
- dev_err(&context->usbdev->dev,
- "packet tx failed (%d)\n", retval);
- }
-
- return retval;
-}
-
-/**
- * Writes data to the VFD. The iMON VFD is 2x16 characters
- * and requires data in 5 consecutive USB interrupt packets,
- * each packet but the last carrying 7 bytes.
- *
- * I don't know if the VFD board supports features such as
- * scrolling, clearing rows, blanking, etc. so at
- * the caller must provide a full screen of data. If fewer
- * than 32 bytes are provided spaces will be appended to
- * generate a full screen.
- */
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos)
-{
- int i;
- int offset;
- int seq;
- int retval = 0;
- struct imon_context *context;
- const unsigned char vfd_packet6[] = {
- 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
- int *data_buf = NULL;
-
- context = file->private_data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->dev_present) {
- dev_err(&context->usbdev->dev,
- "%s: no iMON device present\n", __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- if (n_bytes <= 0 || n_bytes > IMON_DATA_BUF_SZ - 3) {
- dev_err(&context->usbdev->dev,
- "%s: invalid payload size\n", __func__);
- retval = -EINVAL;
- goto exit;
- }
-
- data_buf = memdup_user(buf, n_bytes);
- if (IS_ERR(data_buf)) {
- mutex_unlock(&context->ctx_lock);
- return PTR_ERR(data_buf);
- }
-
- memcpy(context->tx.data_buf, data_buf, n_bytes);
-
- /* Pad with spaces */
- for (i = n_bytes; i < IMON_DATA_BUF_SZ - 3; ++i)
- context->tx.data_buf[i] = ' ';
-
- for (i = IMON_DATA_BUF_SZ - 3; i < IMON_DATA_BUF_SZ; ++i)
- context->tx.data_buf[i] = 0xFF;
-
- offset = 0;
- seq = 0;
-
- do {
- memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7);
- context->usb_tx_buf[7] = (unsigned char)seq;
-
- retval = send_packet(context);
- if (retval) {
- dev_err(&context->usbdev->dev,
- "send packet failed for packet #%d\n",
- seq / 2);
- goto exit;
- } else {
- seq += 2;
- offset += 7;
- }
-
- } while (offset < IMON_DATA_BUF_SZ);
-
- if (context->vfd_proto_6p) {
- /* Send packet #6 */
- memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
- context->usb_tx_buf[7] = (unsigned char)seq;
- retval = send_packet(context);
- if (retval)
- dev_err(&context->usbdev->dev,
- "send packet failed for packet #%d\n",
- seq / 2);
- }
-
-exit:
- mutex_unlock(&context->ctx_lock);
- kfree(data_buf);
-
- return (!retval) ? n_bytes : retval;
-}
-
-/**
- * Callback function for USB core API: transmit data
- */
-static void usb_tx_callback(struct urb *urb)
-{
- struct imon_context *context;
-
- if (!urb)
- return;
- context = (struct imon_context *)urb->context;
- if (!context)
- return;
-
- context->tx.status = urb->status;
-
- /* notify waiters that write has finished */
- atomic_set(&context->tx.busy, 0);
- complete(&context->tx.finished);
-}
-
-/**
- * Called by lirc_dev when the application opens /dev/lirc
- */
-static int ir_open(void *data)
-{
- struct imon_context *context;
-
- /* prevent races with disconnect */
- mutex_lock(&driver_lock);
-
- context = data;
-
- /* initial IR protocol decode variables */
- context->rx.count = 0;
- context->rx.initial_space = 1;
- context->rx.prev_bit = 0;
-
- init_completion(&context->tx.finished);
-
- context->ir_isopen = 1;
- dev_info(context->driver->dev, "IR port opened\n");
-
- mutex_unlock(&driver_lock);
- return 0;
-}
-
-/**
- * Called by lirc_dev when the application closes /dev/lirc
- */
-static void ir_close(void *data)
-{
- struct imon_context *context;
-
- context = data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return;
- }
-
- mutex_lock(&context->ctx_lock);
-
- context->ir_isopen = 0;
- dev_info(context->driver->dev, "IR port closed\n");
-
- if (!context->dev_present) {
- /*
- * Device disconnected while IR port was still open. Driver
- * was not deregistered at disconnect time, so do it now.
- */
- deregister_from_lirc(context);
-
- if (!context->display_isopen) {
- mutex_unlock(&context->ctx_lock);
- free_imon_context(context);
- return;
- }
- /*
- * If display port is open, context will be deleted by
- * display_close
- */
- }
-
- mutex_unlock(&context->ctx_lock);
-}
-
-/**
- * Convert bit count to time duration (in us) and submit
- * the value to lirc_dev.
- */
-static void submit_data(struct imon_context *context)
-{
- unsigned char buf[4];
- int value = context->rx.count;
- int i;
-
- dev_dbg(context->driver->dev, "submitting data to LIRC\n");
-
- value *= BIT_DURATION;
- value &= PULSE_MASK;
- if (context->rx.prev_bit)
- value |= PULSE_BIT;
-
- for (i = 0; i < 4; ++i)
- buf[i] = value >> (i * 8);
-
- lirc_buffer_write(context->driver->rbuf, buf);
- wake_up(&context->driver->rbuf->wait_poll);
-}
-
-/**
- * Process the incoming packet
- */
-static void imon_incoming_packet(struct imon_context *context,
- struct urb *urb, int intf)
-{
- int len = urb->actual_length;
- unsigned char *buf = urb->transfer_buffer;
- struct device *dev = context->driver->dev;
- int octet, bit;
- unsigned char mask;
-
- /*
- * just bail out if no listening IR client
- */
- if (!context->ir_isopen)
- return;
-
- if (len != 8) {
- dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
- __func__, len, intf);
- return;
- }
-
- if (debug)
- dev_info(dev, "raw packet: %*ph\n", len, buf);
- /*
- * Translate received data to pulse and space lengths.
- * Received data is active low, i.e. pulses are 0 and
- * spaces are 1.
- *
- * My original algorithm was essentially similar to
- * Changwoo Ryu's with the exception that he switched
- * the incoming bits to active high and also fed an
- * initial space to LIRC at the start of a new sequence
- * if the previous bit was a pulse.
- *
- * I've decided to adopt his algorithm.
- */
-
- if (buf[7] == 1 && context->rx.initial_space) {
- /* LIRC requires a leading space */
- context->rx.prev_bit = 0;
- context->rx.count = 4;
- submit_data(context);
- context->rx.count = 0;
- }
-
- for (octet = 0; octet < 5; ++octet) {
- mask = 0x80;
- for (bit = 0; bit < 8; ++bit) {
- int curr_bit = !(buf[octet] & mask);
-
- if (curr_bit != context->rx.prev_bit) {
- if (context->rx.count) {
- submit_data(context);
- context->rx.count = 0;
- }
- context->rx.prev_bit = curr_bit;
- }
- ++context->rx.count;
- mask >>= 1;
- }
- }
-
- if (buf[7] == 10) {
- if (context->rx.count) {
- submit_data(context);
- context->rx.count = 0;
- }
- context->rx.initial_space = context->rx.prev_bit;
- }
-}
-
-/**
- * Callback function for USB core API: receive data
- */
-static void usb_rx_callback(struct urb *urb)
-{
- struct imon_context *context;
- int intfnum = 0;
-
- if (!urb)
- return;
-
- context = (struct imon_context *)urb->context;
- if (!context)
- return;
-
- switch (urb->status) {
- case -ENOENT: /* usbcore unlink successful! */
- return;
-
- case 0:
- imon_incoming_packet(context, urb, intfnum);
- break;
-
- default:
- dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n",
- __func__, urb->status);
- break;
- }
-
- usb_submit_urb(context->rx_urb, GFP_ATOMIC);
-}
-
-/**
- * Callback function for USB core API: Probe
- */
-static int imon_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *usbdev = NULL;
- struct usb_host_interface *iface_desc = NULL;
- struct usb_endpoint_descriptor *rx_endpoint = NULL;
- struct usb_endpoint_descriptor *tx_endpoint = NULL;
- struct urb *rx_urb = NULL;
- struct urb *tx_urb = NULL;
- struct lirc_driver *driver = NULL;
- struct lirc_buffer *rbuf = NULL;
- struct device *dev = &interface->dev;
- int ifnum;
- int lirc_minor = 0;
- int num_endpts;
- int retval = -ENOMEM;
- int display_ep_found = 0;
- int ir_ep_found = 0;
- int vfd_proto_6p = 0;
- struct imon_context *context = NULL;
- int i;
- u16 vendor, product;
-
- /* prevent races probing devices w/multiple interfaces */
- mutex_lock(&driver_lock);
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- goto driver_unlock;
-
- /*
- * Try to auto-detect the type of display if the user hasn't set
- * it by hand via the display_type modparam. Default is VFD.
- */
- if (usb_match_id(interface, ir_only_list))
- context->display = 0;
- else
- context->display = 1;
-
- usbdev = usb_get_dev(interface_to_usbdev(interface));
- iface_desc = interface->cur_altsetting;
- num_endpts = iface_desc->desc.bNumEndpoints;
- ifnum = iface_desc->desc.bInterfaceNumber;
- vendor = le16_to_cpu(usbdev->descriptor.idVendor);
- product = le16_to_cpu(usbdev->descriptor.idProduct);
-
- dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
- __func__, vendor, product, ifnum);
-
- /*
- * Scan the endpoint list and set:
- * first input endpoint = IR endpoint
- * first output endpoint = display endpoint
- */
- for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
- struct usb_endpoint_descriptor *ep;
- int ep_dir;
- int ep_type;
-
- ep = &iface_desc->endpoint[i].desc;
- ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep_type = usb_endpoint_type(ep);
-
- if (!ir_ep_found &&
- ep_dir == USB_DIR_IN &&
- ep_type == USB_ENDPOINT_XFER_INT) {
-
- rx_endpoint = ep;
- ir_ep_found = 1;
- dev_dbg(dev, "%s: found IR endpoint\n", __func__);
-
- } else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
- ep_type == USB_ENDPOINT_XFER_INT) {
- tx_endpoint = ep;
- display_ep_found = 1;
- dev_dbg(dev, "%s: found display endpoint\n", __func__);
- }
- }
-
- /*
- * Some iMON receivers have no display. Unfortunately, it seems
- * that SoundGraph recycles device IDs between devices both with
- * and without... :\
- */
- if (context->display == 0) {
- display_ep_found = 0;
- dev_dbg(dev, "%s: device has no display\n", __func__);
- }
-
- /* Input endpoint is mandatory */
- if (!ir_ep_found) {
- dev_err(dev, "%s: no valid input (IR) endpoint found.\n",
- __func__);
- retval = -ENODEV;
- goto free_context;
- }
-
- /* Determine if display requires 6 packets */
- if (display_ep_found) {
- if (usb_match_id(interface, vfd_proto_6p_list))
- vfd_proto_6p = 1;
-
- dev_dbg(dev, "%s: vfd_proto_6p: %d\n",
- __func__, vfd_proto_6p);
- }
-
- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
- if (!driver)
- goto free_context;
-
- rbuf = kmalloc(sizeof(*rbuf), GFP_KERNEL);
- if (!rbuf)
- goto free_driver;
-
- if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
- dev_err(dev, "%s: lirc_buffer_init failed\n", __func__);
- goto free_rbuf;
- }
- rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rx_urb)
- goto free_lirc_buf;
- tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_urb)
- goto free_rx_urb;
-
- mutex_init(&context->ctx_lock);
- context->vfd_proto_6p = vfd_proto_6p;
-
- strcpy(driver->name, MOD_NAME);
- driver->minor = -1;
- driver->code_length = BUF_CHUNK_SIZE * 8;
- driver->sample_rate = 0;
- driver->features = LIRC_CAN_REC_MODE2;
- driver->data = context;
- driver->rbuf = rbuf;
- driver->set_use_inc = ir_open;
- driver->set_use_dec = ir_close;
- driver->dev = &interface->dev;
- driver->owner = THIS_MODULE;
-
- mutex_lock(&context->ctx_lock);
-
- context->driver = driver;
- /* start out in keyboard mode */
-
- lirc_minor = lirc_register_driver(driver);
- if (lirc_minor < 0) {
- dev_err(dev, "%s: lirc_register_driver failed\n", __func__);
- goto free_tx_urb;
- }
-
- dev_info(dev, "Registered iMON driver (lirc minor: %d)\n",
- lirc_minor);
-
- /* Needed while unregistering! */
- driver->minor = lirc_minor;
-
- context->usbdev = usbdev;
- context->dev_present = 1;
- context->rx_endpoint = rx_endpoint;
- context->rx_urb = rx_urb;
-
- /*
- * tx is used to send characters to lcd/vfd, associate RF
- * remotes, set IR protocol, and maybe more...
- */
- context->tx_endpoint = tx_endpoint;
- context->tx_urb = tx_urb;
-
- if (display_ep_found)
- context->display = 1;
-
- usb_fill_int_urb(context->rx_urb, context->usbdev,
- usb_rcvintpipe(context->usbdev,
- context->rx_endpoint->bEndpointAddress),
- context->usb_rx_buf, sizeof(context->usb_rx_buf),
- usb_rx_callback, context,
- context->rx_endpoint->bInterval);
-
- retval = usb_submit_urb(context->rx_urb, GFP_KERNEL);
- if (retval) {
- dev_err(dev, "usb_submit_urb failed for intf0 (%d)\n", retval);
- goto unregister_lirc;
- }
-
- usb_set_intfdata(interface, context);
-
- if (context->display && ifnum == 0) {
- dev_dbg(dev, "%s: Registering iMON display with sysfs\n",
- __func__);
-
- if (usb_register_dev(interface, &imon_class)) {
- /* Not a fatal error, so ignore */
- dev_info(dev, "%s: could not get a minor number for display\n",
- __func__);
- }
- }
-
- dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
- vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum);
-
- /* Everything went fine. Just unlock and return retval (with is 0) */
- mutex_unlock(&context->ctx_lock);
- goto driver_unlock;
-
-unregister_lirc:
- lirc_unregister_driver(driver->minor);
-
-free_tx_urb:
- mutex_unlock(&context->ctx_lock);
- usb_free_urb(tx_urb);
-
-free_rx_urb:
- usb_free_urb(rx_urb);
-
-free_lirc_buf:
- lirc_buffer_free(rbuf);
-
-free_rbuf:
- kfree(rbuf);
-
-free_driver:
- kfree(driver);
-free_context:
- kfree(context);
- context = NULL;
-
-driver_unlock:
- mutex_unlock(&driver_lock);
-
- return retval;
-}
-
-/**
- * Callback function for USB core API: disconnect
- */
-static void imon_disconnect(struct usb_interface *interface)
-{
- struct imon_context *context;
- int ifnum;
-
- /* prevent races with ir_open()/display_open() */
- mutex_lock(&driver_lock);
-
- context = usb_get_intfdata(interface);
- ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
-
- mutex_lock(&context->ctx_lock);
-
- usb_set_intfdata(interface, NULL);
-
- /* Abort ongoing write */
- if (atomic_read(&context->tx.busy)) {
- usb_kill_urb(context->tx_urb);
- complete(&context->tx.finished);
- }
-
- context->dev_present = 0;
- usb_kill_urb(context->rx_urb);
- if (context->display)
- usb_deregister_dev(interface, &imon_class);
-
- if (!context->ir_isopen && !context->dev_present) {
- deregister_from_lirc(context);
- mutex_unlock(&context->ctx_lock);
- if (!context->display_isopen)
- free_imon_context(context);
- } else
- mutex_unlock(&context->ctx_lock);
-
- mutex_unlock(&driver_lock);
-
- dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n",
- __func__, ifnum);
-}
-
-static int imon_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct imon_context *context = usb_get_intfdata(intf);
-
- usb_kill_urb(context->rx_urb);
-
- return 0;
-}
-
-static int imon_resume(struct usb_interface *intf)
-{
- struct imon_context *context = usb_get_intfdata(intf);
-
- usb_fill_int_urb(context->rx_urb, context->usbdev,
- usb_rcvintpipe(context->usbdev,
- context->rx_endpoint->bEndpointAddress),
- context->usb_rx_buf, sizeof(context->usb_rx_buf),
- usb_rx_callback, context,
- context->rx_endpoint->bInterval);
-
- return usb_submit_urb(context->rx_urb, GFP_ATOMIC);
-}
-
-module_usb_driver(imon_driver);
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
deleted file mode 100644
index bfb76a45bfbf..000000000000
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ /dev/null
@@ -1,741 +0,0 @@
-/*
- * lirc_parallel.c
- *
- * lirc_parallel - device driver for infra-red signal receiving and
- * transmitting unit built by the author
- *
- * Copyright (C) 1998 Christoph Bartelmus <lirc@bartelmus.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-/*** Includes ***/
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/ktime.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/uaccess.h>
-#include <asm/div64.h>
-
-#include <linux/poll.h>
-#include <linux/parport.h>
-#include <linux/platform_device.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#include "lirc_parallel.h"
-
-#define LIRC_DRIVER_NAME "lirc_parallel"
-
-#ifndef LIRC_IRQ
-#define LIRC_IRQ 7
-#endif
-#ifndef LIRC_PORT
-#define LIRC_PORT 0x378
-#endif
-#ifndef LIRC_TIMER
-#define LIRC_TIMER 65536
-#endif
-
-/*** Global Variables ***/
-
-static bool debug;
-static bool check_pselecd;
-
-static unsigned int irq = LIRC_IRQ;
-static unsigned int io = LIRC_PORT;
-#ifdef LIRC_TIMER
-static unsigned int timer;
-static unsigned int default_timer = LIRC_TIMER;
-#endif
-
-#define RBUF_SIZE (256) /* this must be a power of 2 larger than 1 */
-
-static int rbuf[RBUF_SIZE];
-
-static DECLARE_WAIT_QUEUE_HEAD(lirc_wait);
-
-static unsigned int rptr;
-static unsigned int wptr;
-static unsigned int lost_irqs;
-static int is_open;
-
-static struct parport *pport;
-static struct pardevice *ppdevice;
-static int is_claimed;
-
-static unsigned int tx_mask = 1;
-
-/*** Internal Functions ***/
-
-static unsigned int in(int offset)
-{
- switch (offset) {
- case LIRC_LP_BASE:
- return parport_read_data(pport);
- case LIRC_LP_STATUS:
- return parport_read_status(pport);
- case LIRC_LP_CONTROL:
- return parport_read_control(pport);
- }
- return 0; /* make compiler happy */
-}
-
-static void out(int offset, int value)
-{
- switch (offset) {
- case LIRC_LP_BASE:
- parport_write_data(pport, value);
- break;
- case LIRC_LP_CONTROL:
- parport_write_control(pport, value);
- break;
- case LIRC_LP_STATUS:
- pr_info("attempt to write to status register\n");
- break;
- }
-}
-
-static unsigned int lirc_get_timer(void)
-{
- return in(LIRC_PORT_TIMER) & LIRC_PORT_TIMER_BIT;
-}
-
-static unsigned int lirc_get_signal(void)
-{
- return in(LIRC_PORT_SIGNAL) & LIRC_PORT_SIGNAL_BIT;
-}
-
-static void lirc_on(void)
-{
- out(LIRC_PORT_DATA, tx_mask);
-}
-
-static void lirc_off(void)
-{
- out(LIRC_PORT_DATA, 0);
-}
-
-static unsigned int init_lirc_timer(void)
-{
- ktime_t kt, now, timeout;
- unsigned int level, newlevel, timeelapsed, newtimer;
- int count = 0;
-
- kt = ktime_get();
- /* wait max. 1 sec. */
- timeout = ktime_add_ns(kt, NSEC_PER_SEC);
- level = lirc_get_timer();
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- count++;
- level = newlevel;
- now = ktime_get();
- } while (count < 1000 && (ktime_before(now, timeout)));
- timeelapsed = ktime_us_delta(now, kt);
- if (count >= 1000 && timeelapsed > 0) {
- if (default_timer == 0) {
- /* autodetect timer */
- newtimer = (1000000 * count) / timeelapsed;
- pr_info("%u Hz timer detected\n", newtimer);
- return newtimer;
- }
- newtimer = (1000000 * count) / timeelapsed;
- if (abs(newtimer - default_timer) > default_timer / 10) {
- /* bad timer */
- pr_notice("bad timer: %u Hz\n", newtimer);
- pr_notice("using default timer: %u Hz\n",
- default_timer);
- return default_timer;
- }
- pr_info("%u Hz timer detected\n", newtimer);
- return newtimer; /* use detected value */
- }
-
- pr_notice("no timer detected\n");
- return 0;
-}
-
-static int lirc_claim(void)
-{
- if (parport_claim(ppdevice) != 0) {
- pr_warn("could not claim port\n");
- pr_warn("waiting for port becoming available\n");
- if (parport_claim_or_block(ppdevice) < 0) {
- pr_notice("could not claim port, giving up\n");
- return 0;
- }
- }
- out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP);
- is_claimed = 1;
- return 1;
-}
-
-/*** interrupt handler ***/
-
-static void rbuf_write(int signal)
-{
- unsigned int nwptr;
-
- nwptr = (wptr + 1) & (RBUF_SIZE - 1);
- if (nwptr == rptr) {
- /* no new signals will be accepted */
- lost_irqs++;
- pr_notice("buffer overrun\n");
- return;
- }
- rbuf[wptr] = signal;
- wptr = nwptr;
-}
-
-static void lirc_lirc_irq_handler(void *blah)
-{
- ktime_t kt, delkt;
- static ktime_t lastkt;
- static int init;
- long signal;
- int data;
- unsigned int level, newlevel;
- unsigned int timeout;
-
- if (!is_open)
- return;
-
- if (!is_claimed)
- return;
-
-#if 0
- /* disable interrupt */
- disable_irq(irq);
- out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ) & (~LP_PINTEN));
-#endif
- if (check_pselecd && (in(1) & LP_PSELECD))
- return;
-
-#ifdef LIRC_TIMER
- if (init) {
- kt = ktime_get();
-
- delkt = ktime_sub(kt, lastkt);
- if (ktime_compare(delkt, ktime_set(15, 0)) > 0)
- /* really long time */
- data = PULSE_MASK;
- else
- data = (int)(ktime_to_us(delkt) + LIRC_SFH506_DELAY);
-
- rbuf_write(data); /* space */
- } else {
- if (timer == 0) {
- /*
- * wake up; we'll lose this signal, but it will be
- * garbage if the device is turned on anyway
- */
- timer = init_lirc_timer();
- /* enable_irq(irq); */
- return;
- }
- init = 1;
- }
-
- timeout = timer / 10; /* timeout after 1/10 sec. */
- signal = 1;
- level = lirc_get_timer();
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- signal++;
- level = newlevel;
-
- /* giving up */
- if (signal > timeout
- || (check_pselecd && (in(1) & LP_PSELECD))) {
- signal = 0;
- pr_notice("timeout\n");
- break;
- }
- } while (lirc_get_signal());
-
- if (signal != 0) {
- /* adjust value to usecs */
- __u64 helper;
-
- helper = ((__u64)signal) * 1000000;
- do_div(helper, timer);
- signal = (long)helper;
-
- if (signal > LIRC_SFH506_DELAY)
- data = signal - LIRC_SFH506_DELAY;
- else
- data = 1;
- rbuf_write(PULSE_BIT | data); /* pulse */
- }
- lastkt = ktime_get();
-#else
- /* add your code here */
-#endif
-
- wake_up_interruptible(&lirc_wait);
-
- /* enable interrupt */
- /*
- * enable_irq(irq);
- * out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN);
- */
-}
-
-/*** file operations ***/
-
-static loff_t lirc_lseek(struct file *filep, loff_t offset, int orig)
-{
- return -ESPIPE;
-}
-
-static ssize_t lirc_read(struct file *filep, char __user *buf, size_t n,
- loff_t *ppos)
-{
- int result = 0;
- int count = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- if (n % sizeof(int))
- return -EINVAL;
-
- add_wait_queue(&lirc_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- while (count < n) {
- if (rptr != wptr) {
- if (copy_to_user(buf + count, &rbuf[rptr],
- sizeof(int))) {
- result = -EFAULT;
- break;
- }
- rptr = (rptr + 1) & (RBUF_SIZE - 1);
- count += sizeof(int);
- } else {
- if (filep->f_flags & O_NONBLOCK) {
- result = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- result = -ERESTARTSYS;
- break;
- }
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- }
- remove_wait_queue(&lirc_wait, &wait);
- set_current_state(TASK_RUNNING);
- return count ? count : result;
-}
-
-static ssize_t lirc_write(struct file *filep, const char __user *buf, size_t n,
- loff_t *ppos)
-{
- int count;
- unsigned int i;
- unsigned int level, newlevel;
- unsigned long flags;
- int counttimer;
- int *wbuf;
- ssize_t ret;
-
- if (!is_claimed)
- return -EBUSY;
-
- count = n / sizeof(int);
-
- if (n % sizeof(int) || count % 2 == 0)
- return -EINVAL;
-
- wbuf = memdup_user(buf, n);
- if (IS_ERR(wbuf))
- return PTR_ERR(wbuf);
-
-#ifdef LIRC_TIMER
- if (timer == 0) {
- /* try again if device is ready */
- timer = init_lirc_timer();
- if (timer == 0) {
- ret = -EIO;
- goto out;
- }
- }
-
- /* adjust values from usecs */
- for (i = 0; i < count; i++) {
- __u64 helper;
-
- helper = ((__u64)wbuf[i]) * timer;
- do_div(helper, 1000000);
- wbuf[i] = (int)helper;
- }
-
- local_irq_save(flags);
- i = 0;
- while (i < count) {
- level = lirc_get_timer();
- counttimer = 0;
- lirc_on();
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- counttimer++;
- level = newlevel;
- if (check_pselecd && (in(1) & LP_PSELECD)) {
- lirc_off();
- local_irq_restore(flags);
- ret = -EIO;
- goto out;
- }
- } while (counttimer < wbuf[i]);
- i++;
-
- lirc_off();
- if (i == count)
- break;
- counttimer = 0;
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- counttimer++;
- level = newlevel;
- if (check_pselecd && (in(1) & LP_PSELECD)) {
- local_irq_restore(flags);
- ret = -EIO;
- goto out;
- }
- } while (counttimer < wbuf[i]);
- i++;
- }
- local_irq_restore(flags);
-#else
- /* place code that handles write without external timer here */
-#endif
- ret = n;
-out:
- kfree(wbuf);
-
- return ret;
-}
-
-static unsigned int lirc_poll(struct file *file, poll_table *wait)
-{
- poll_wait(file, &lirc_wait, wait);
- if (rptr != wptr)
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- int result;
- u32 __user *uptr = (u32 __user *)arg;
- u32 features = LIRC_CAN_SET_TRANSMITTER_MASK |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2;
- u32 mode;
- u32 value;
-
- switch (cmd) {
- case LIRC_GET_FEATURES:
- result = put_user(features, uptr);
- if (result)
- return result;
- break;
- case LIRC_GET_SEND_MODE:
- result = put_user(LIRC_MODE_PULSE, uptr);
- if (result)
- return result;
- break;
- case LIRC_GET_REC_MODE:
- result = put_user(LIRC_MODE_MODE2, uptr);
- if (result)
- return result;
- break;
- case LIRC_SET_SEND_MODE:
- result = get_user(mode, uptr);
- if (result)
- return result;
- if (mode != LIRC_MODE_PULSE)
- return -EINVAL;
- break;
- case LIRC_SET_REC_MODE:
- result = get_user(mode, uptr);
- if (result)
- return result;
- if (mode != LIRC_MODE_MODE2)
- return -ENOSYS;
- break;
- case LIRC_SET_TRANSMITTER_MASK:
- result = get_user(value, uptr);
- if (result)
- return result;
- if ((value & LIRC_PARALLEL_TRANSMITTER_MASK) != value)
- return LIRC_PARALLEL_MAX_TRANSMITTERS;
- tx_mask = value;
- break;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static int lirc_open(struct inode *node, struct file *filep)
-{
- if (is_open || !lirc_claim())
- return -EBUSY;
-
- parport_enable_irq(pport);
-
- /* init read ptr */
- rptr = 0;
- wptr = 0;
- lost_irqs = 0;
-
- is_open = 1;
- return 0;
-}
-
-static int lirc_close(struct inode *node, struct file *filep)
-{
- if (is_claimed) {
- is_claimed = 0;
- parport_release(ppdevice);
- }
- is_open = 0;
- return 0;
-}
-
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .llseek = lirc_lseek,
- .read = lirc_read,
- .write = lirc_write,
- .poll = lirc_poll,
- .unlocked_ioctl = lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_ioctl,
-#endif
- .open = lirc_open,
- .release = lirc_close
-};
-
-static int set_use_inc(void *data)
-{
- return 0;
-}
-
-static void set_use_dec(void *data)
-{
-}
-
-static struct lirc_driver driver = {
- .name = LIRC_DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .sample_rate = 0,
- .data = NULL,
- .add_to_buf = NULL,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .dev = NULL,
- .owner = THIS_MODULE,
-};
-
-static struct platform_device *lirc_parallel_dev;
-
-static int lirc_parallel_probe(struct platform_device *dev)
-{
- return 0;
-}
-
-static int lirc_parallel_remove(struct platform_device *dev)
-{
- return 0;
-}
-
-static int lirc_parallel_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return 0;
-}
-
-static int lirc_parallel_resume(struct platform_device *dev)
-{
- return 0;
-}
-
-static struct platform_driver lirc_parallel_driver = {
- .probe = lirc_parallel_probe,
- .remove = lirc_parallel_remove,
- .suspend = lirc_parallel_suspend,
- .resume = lirc_parallel_resume,
- .driver = {
- .name = LIRC_DRIVER_NAME,
- },
-};
-
-static int pf(void *handle)
-{
- parport_disable_irq(pport);
- is_claimed = 0;
- return 0;
-}
-
-static void kf(void *handle)
-{
- if (!is_open)
- return;
- if (!lirc_claim())
- return;
- parport_enable_irq(pport);
- lirc_off();
- /* this is a bit annoying when you actually print...*/
- /*
- * printk(KERN_INFO "%s: reclaimed port\n", LIRC_DRIVER_NAME);
- */
-}
-
-/*** module initialization and cleanup ***/
-
-static int __init lirc_parallel_init(void)
-{
- int result;
-
- result = platform_driver_register(&lirc_parallel_driver);
- if (result) {
- pr_notice("platform_driver_register returned %d\n", result);
- return result;
- }
-
- lirc_parallel_dev = platform_device_alloc(LIRC_DRIVER_NAME, 0);
- if (!lirc_parallel_dev) {
- result = -ENOMEM;
- goto exit_driver_unregister;
- }
-
- result = platform_device_add(lirc_parallel_dev);
- if (result)
- goto exit_device_put;
-
- pport = parport_find_base(io);
- if (!pport) {
- pr_notice("no port at %x found\n", io);
- result = -ENXIO;
- goto exit_device_del;
- }
- ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME,
- pf, kf, lirc_lirc_irq_handler, 0,
- NULL);
- parport_put_port(pport);
- if (!ppdevice) {
- pr_notice("parport_register_device() failed\n");
- result = -ENXIO;
- goto exit_device_del;
- }
- if (parport_claim(ppdevice) != 0)
- goto skip_init;
- is_claimed = 1;
- out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP);
-
-#ifdef LIRC_TIMER
- if (debug)
- out(LIRC_PORT_DATA, tx_mask);
-
- timer = init_lirc_timer();
-
-#if 0 /* continue even if device is offline */
- if (timer == 0) {
- is_claimed = 0;
- parport_release(pport);
- parport_unregister_device(ppdevice);
- result = -EIO;
- goto exit_device_del;
- }
-
-#endif
- if (debug)
- out(LIRC_PORT_DATA, 0);
-#endif
-
- is_claimed = 0;
- parport_release(ppdevice);
- skip_init:
- driver.dev = &lirc_parallel_dev->dev;
- driver.minor = lirc_register_driver(&driver);
- if (driver.minor < 0) {
- pr_notice("register_chrdev() failed\n");
- parport_unregister_device(ppdevice);
- result = -EIO;
- goto exit_device_del;
- }
- pr_info("installed using port 0x%04x irq %d\n", io, irq);
- return 0;
-
-exit_device_del:
- platform_device_del(lirc_parallel_dev);
-exit_device_put:
- platform_device_put(lirc_parallel_dev);
-exit_driver_unregister:
- platform_driver_unregister(&lirc_parallel_driver);
- return result;
-}
-
-static void __exit lirc_parallel_exit(void)
-{
- parport_unregister_device(ppdevice);
- lirc_unregister_driver(driver.minor);
-
- platform_device_unregister(lirc_parallel_dev);
- platform_driver_unregister(&lirc_parallel_driver);
-}
-
-module_init(lirc_parallel_init);
-module_exit(lirc_parallel_exit);
-
-MODULE_DESCRIPTION("Infrared receiver driver for parallel ports.");
-MODULE_AUTHOR("Christoph Bartelmus");
-MODULE_LICENSE("GPL");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3bc, 0x378 or 0x278)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (7 or 5)");
-
-module_param(tx_mask, int, S_IRUGO);
-MODULE_PARM_DESC(tx_mask, "Transmitter mask (default: 0x01)");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
-
-module_param(check_pselecd, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(check_pselecd, "Check for printer (default: 0)");
diff --git a/drivers/staging/media/lirc/lirc_parallel.h b/drivers/staging/media/lirc/lirc_parallel.h
deleted file mode 100644
index 4bed6afe0632..000000000000
--- a/drivers/staging/media/lirc/lirc_parallel.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* lirc_parallel.h */
-
-#ifndef _LIRC_PARALLEL_H
-#define _LIRC_PARALLEL_H
-
-#include <linux/lp.h>
-
-#define LIRC_PORT_LEN 3
-
-#define LIRC_LP_BASE 0
-#define LIRC_LP_STATUS 1
-#define LIRC_LP_CONTROL 2
-
-#define LIRC_PORT_DATA LIRC_LP_BASE /* base */
-#define LIRC_PORT_TIMER LIRC_LP_STATUS /* status port */
-#define LIRC_PORT_TIMER_BIT LP_PBUSY /* busy signal */
-#define LIRC_PORT_SIGNAL LIRC_LP_STATUS /* status port */
-#define LIRC_PORT_SIGNAL_BIT LP_PACK /* ack signal */
-#define LIRC_PORT_IRQ LIRC_LP_CONTROL /* control port */
-
-#define LIRC_SFH506_DELAY 0 /* delay t_phl in usecs */
-
-#define LIRC_PARALLEL_MAX_TRANSMITTERS 8
-#define LIRC_PARALLEL_TRANSMITTER_MASK ((1<<LIRC_PARALLEL_MAX_TRANSMITTERS) - 1)
-
-#endif
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index 4f326e97ad75..c75ae43095ba 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -1,7 +1,7 @@
/*
* LIRC SIR driver, (C) 2000 Milan Pikula <www@fornax.sk>
*
- * lirc_sir - Device driver for use with SIR (serial infra red)
+ * sir_ir - Device driver for use with SIR (serial infra red)
* mode of IrDA on many notebooks.
*
* This program is free software; you can redistribute it and/or modify
@@ -58,8 +58,7 @@
#include <linux/timer.h>
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
+#include <media/rc-core.h>
/* SECTION: Definitions */
@@ -87,11 +86,6 @@ static void init_act200(void);
static void init_act220(void);
#endif
-#define RBUF_LEN 1024
-#define WBUF_LEN 1024
-
-#define LIRC_DRIVER_NAME "lirc_sir"
-
#define PULSE '['
#ifndef LIRC_SIR_TEKRAM
@@ -131,28 +125,19 @@ static ktime_t last;
/* time of last UART data ready interrupt */
static ktime_t last_intr_time;
static int last_value;
+static struct rc_dev *rcdev;
-static DECLARE_WAIT_QUEUE_HEAD(lirc_read_queue);
+static struct platform_device *sir_ir_dev;
static DEFINE_SPINLOCK(hardware_lock);
-static int rx_buf[RBUF_LEN];
-static unsigned int rx_tail, rx_head;
-
static bool debug;
/* SECTION: Prototypes */
/* Communication with user-space */
-static unsigned int lirc_poll(struct file *file, poll_table *wait);
-static ssize_t lirc_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos);
-static ssize_t lirc_write(struct file *file, const char __user *buf, size_t n,
- loff_t *pos);
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
static void add_read_queue(int flag, unsigned long val);
static int init_chrdev(void);
-static void drop_chrdev(void);
/* Hardware */
static irqreturn_t sir_interrupt(int irq, void *dev_id);
static void send_space(unsigned long len);
@@ -189,72 +174,14 @@ static void safe_udelay(unsigned long usecs)
}
/* SECTION: Communication with user-space */
-
-static unsigned int lirc_poll(struct file *file, poll_table *wait)
-{
- poll_wait(file, &lirc_read_queue, wait);
- if (rx_head != rx_tail)
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-static ssize_t lirc_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- int n = 0;
- int retval = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- if (count % sizeof(int))
- return -EINVAL;
-
- add_wait_queue(&lirc_read_queue, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- while (n < count) {
- if (rx_head != rx_tail) {
- if (copy_to_user(buf + n,
- rx_buf + rx_head,
- sizeof(int))) {
- retval = -EFAULT;
- break;
- }
- rx_head = (rx_head + 1) & (RBUF_LEN - 1);
- n += sizeof(int);
- } else {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- }
- remove_wait_queue(&lirc_read_queue, &wait);
- set_current_state(TASK_RUNNING);
- return n ? n : retval;
-}
-static ssize_t lirc_write(struct file *file, const char __user *buf, size_t n,
- loff_t *pos)
+static int sir_tx_ir(struct rc_dev *dev, unsigned int *tx_buf,
+ unsigned int count)
{
unsigned long flags;
- int i, count;
- int *tx_buf;
-
- count = n / sizeof(int);
- if (n % sizeof(int) || count % 2 == 0)
- return -EINVAL;
- tx_buf = memdup_user(buf, n);
- if (IS_ERR(tx_buf))
- return PTR_ERR(tx_buf);
- i = 0;
+ int i;
+
local_irq_save(flags);
- while (1) {
- if (i >= count)
- break;
+ for (i = 0; i < count;) {
if (tx_buf[i])
send_pulse(tx_buf[i]);
i++;
@@ -265,138 +192,53 @@ static ssize_t lirc_write(struct file *file, const char __user *buf, size_t n,
i++;
}
local_irq_restore(flags);
- kfree(tx_buf);
- return count;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- u32 __user *uptr = (u32 __user *)arg;
- int retval = 0;
- u32 value = 0;
-
- if (cmd == LIRC_GET_FEATURES)
- value = LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2;
- else if (cmd == LIRC_GET_SEND_MODE)
- value = LIRC_MODE_PULSE;
- else if (cmd == LIRC_GET_REC_MODE)
- value = LIRC_MODE_MODE2;
-
- switch (cmd) {
- case LIRC_GET_FEATURES:
- case LIRC_GET_SEND_MODE:
- case LIRC_GET_REC_MODE:
- retval = put_user(value, uptr);
- break;
-
- case LIRC_SET_SEND_MODE:
- case LIRC_SET_REC_MODE:
- retval = get_user(value, uptr);
- break;
- default:
- retval = -ENOIOCTLCMD;
-
- }
-
- if (retval)
- return retval;
- if (cmd == LIRC_SET_REC_MODE) {
- if (value != LIRC_MODE_MODE2)
- retval = -ENOSYS;
- } else if (cmd == LIRC_SET_SEND_MODE) {
- if (value != LIRC_MODE_PULSE)
- retval = -ENOSYS;
- }
- return retval;
+ return count;
}
static void add_read_queue(int flag, unsigned long val)
{
- unsigned int new_rx_tail;
- int newval;
+ DEFINE_IR_RAW_EVENT(ev);
pr_debug("add flag %d with val %lu\n", flag, val);
- newval = val & PULSE_MASK;
-
/*
* statistically, pulses are ~TIME_CONST/2 too long. we could
* maybe make this more exact, but this is good enough
*/
if (flag) {
/* pulse */
- if (newval > TIME_CONST/2)
- newval -= TIME_CONST/2;
+ if (val > TIME_CONST / 2)
+ val -= TIME_CONST / 2;
else /* should not ever happen */
- newval = 1;
- newval |= PULSE_BIT;
+ val = 1;
+ ev.pulse = true;
} else {
- newval += TIME_CONST/2;
+ val += TIME_CONST / 2;
}
- new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1);
- if (new_rx_tail == rx_head) {
- pr_debug("Buffer overrun.\n");
- return;
- }
- rx_buf[rx_tail] = newval;
- rx_tail = new_rx_tail;
- wake_up_interruptible(&lirc_read_queue);
-}
+ ev.duration = US_TO_NS(val);
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .read = lirc_read,
- .write = lirc_write,
- .poll = lirc_poll,
- .unlocked_ioctl = lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_ioctl,
-#endif
- .open = lirc_dev_fop_open,
- .release = lirc_dev_fop_close,
- .llseek = no_llseek,
-};
-
-static int set_use_inc(void *data)
-{
- return 0;
+ ir_raw_event_store_with_filter(rcdev, &ev);
}
-static void set_use_dec(void *data)
-{
-}
-
-static struct lirc_driver driver = {
- .name = LIRC_DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .sample_rate = 0,
- .data = NULL,
- .add_to_buf = NULL,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .dev = NULL,
- .owner = THIS_MODULE,
-};
-
-static struct platform_device *lirc_sir_dev;
-
static int init_chrdev(void)
{
- driver.dev = &lirc_sir_dev->dev;
- driver.minor = lirc_register_driver(&driver);
- if (driver.minor < 0) {
- pr_err("init_chrdev() failed.\n");
- return -EIO;
- }
- return 0;
-}
-
-static void drop_chrdev(void)
-{
- lirc_unregister_driver(driver.minor);
+ rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
+
+ rcdev->input_phys = KBUILD_MODNAME "/input0";
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->tx_ir = sir_tx_ir;
+ rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->dev.parent = &sir_ir_dev->dev;
+
+ return devm_rc_register_device(&sir_ir_dev->dev, rcdev);
}
/* SECTION: Hardware */
@@ -420,14 +262,15 @@ static void sir_timeout(unsigned long data)
/* determine 'virtual' pulse end: */
pulse_end = min_t(unsigned long,
ktime_us_delta(last, last_intr_time),
- PULSE_MASK);
- dev_dbg(driver.dev, "timeout add %d for %lu usec\n",
- last_value, pulse_end);
+ IR_MAX_DURATION);
+ dev_dbg(&sir_ir_dev->dev, "timeout add %d for %lu usec\n",
+ last_value, pulse_end);
add_read_queue(last_value, pulse_end);
last_value = 0;
last = last_intr_time;
}
spin_unlock_irqrestore(&timer_lock, flags);
+ ir_raw_event_handle(rcdev);
}
static irqreturn_t sir_interrupt(int irq, void *dev_id)
@@ -462,20 +305,20 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
curr_time = ktime_get();
delt = min_t(unsigned long,
ktime_us_delta(last, curr_time),
- PULSE_MASK);
+ IR_MAX_DURATION);
deltintr = min_t(unsigned long,
ktime_us_delta(last_intr_time,
curr_time),
- PULSE_MASK);
- dev_dbg(driver.dev, "t %lu, d %d\n",
- deltintr, (int)data);
+ IR_MAX_DURATION);
+ dev_dbg(&sir_ir_dev->dev, "t %lu, d %d\n",
+ deltintr, (int)data);
/*
* if nothing came in last X cycles,
* it was gap
*/
if (deltintr > TIME_CONST * threshold) {
if (last_value) {
- dev_dbg(driver.dev, "GAP\n");
+ dev_dbg(&sir_ir_dev->dev, "GAP\n");
/* simulate signal change */
add_read_queue(last_value,
delt -
@@ -517,6 +360,7 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
break;
}
}
+ ir_raw_event_handle(rcdev);
return IRQ_RETVAL(IRQ_HANDLED);
}
@@ -655,12 +499,12 @@ static int init_port(void)
int retval;
/* get I/O port access and IRQ line */
- if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) {
+ if (!request_region(io, 8, KBUILD_MODNAME)) {
pr_err("i/o port 0x%.4x already in use.\n", io);
return -EBUSY;
}
retval = request_irq(irq, sir_interrupt, 0,
- LIRC_DRIVER_NAME, NULL);
+ KBUILD_MODNAME, NULL);
if (retval < 0) {
release_region(io, 8);
pr_err("IRQ %d already in use.\n", irq);
@@ -882,11 +726,10 @@ void init_act220(void)
}
#endif
-static int init_lirc_sir(void)
+static int init_sir_ir(void)
{
int retval;
- init_waitqueue_head(&lirc_read_queue);
retval = init_port();
if (retval < 0)
return retval;
@@ -895,42 +738,42 @@ static int init_lirc_sir(void)
return 0;
}
-static int lirc_sir_probe(struct platform_device *dev)
+static int sir_ir_probe(struct platform_device *dev)
{
return 0;
}
-static int lirc_sir_remove(struct platform_device *dev)
+static int sir_ir_remove(struct platform_device *dev)
{
return 0;
}
-static struct platform_driver lirc_sir_driver = {
- .probe = lirc_sir_probe,
- .remove = lirc_sir_remove,
+static struct platform_driver sir_ir_driver = {
+ .probe = sir_ir_probe,
+ .remove = sir_ir_remove,
.driver = {
- .name = "lirc_sir",
+ .name = "sir_ir",
},
};
-static int __init lirc_sir_init(void)
+static int __init sir_ir_init(void)
{
int retval;
- retval = platform_driver_register(&lirc_sir_driver);
+ retval = platform_driver_register(&sir_ir_driver);
if (retval) {
pr_err("Platform driver register failed!\n");
return -ENODEV;
}
- lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
- if (!lirc_sir_dev) {
+ sir_ir_dev = platform_device_alloc("sir_ir", 0);
+ if (!sir_ir_dev) {
pr_err("Platform device alloc failed!\n");
retval = -ENOMEM;
goto pdev_alloc_fail;
}
- retval = platform_device_add(lirc_sir_dev);
+ retval = platform_device_add(sir_ir_dev);
if (retval) {
pr_err("Platform device add failed!\n");
retval = -ENODEV;
@@ -941,35 +784,32 @@ static int __init lirc_sir_init(void)
if (retval < 0)
goto fail;
- retval = init_lirc_sir();
- if (retval) {
- drop_chrdev();
+ retval = init_sir_ir();
+ if (retval)
goto fail;
- }
return 0;
fail:
- platform_device_del(lirc_sir_dev);
+ platform_device_del(sir_ir_dev);
pdev_add_fail:
- platform_device_put(lirc_sir_dev);
+ platform_device_put(sir_ir_dev);
pdev_alloc_fail:
- platform_driver_unregister(&lirc_sir_driver);
+ platform_driver_unregister(&sir_ir_driver);
return retval;
}
-static void __exit lirc_sir_exit(void)
+static void __exit sir_ir_exit(void)
{
drop_hardware();
- drop_chrdev();
drop_port();
- platform_device_unregister(lirc_sir_dev);
- platform_driver_unregister(&lirc_sir_driver);
+ platform_device_unregister(sir_ir_dev);
+ platform_driver_unregister(&sir_ir_driver);
pr_info("Uninstalled.\n");
}
-module_init(lirc_sir_init);
-module_exit(lirc_sir_exit);
+module_init(sir_ir_init);
+module_exit(sir_ir_exit);
#ifdef LIRC_SIR_TEKRAM
MODULE_DESCRIPTION("Infrared receiver driver for Tekram Irmate 210");
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index c16927ac8eb0..bb0e3b4a4558 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -205,21 +205,21 @@ iss_video_remote_subdev(struct iss_video *video, u32 *pad)
static struct iss_video *
iss_video_far_end(struct iss_video *video)
{
- struct media_entity_graph graph;
+ struct media_graph graph;
struct media_entity *entity = &video->video.entity;
struct media_device *mdev = entity->graph_obj.mdev;
struct iss_video *far_end = NULL;
mutex_lock(&mdev->graph_mutex);
- if (media_entity_graph_walk_init(&graph, mdev)) {
+ if (media_graph_walk_init(&graph, mdev)) {
mutex_unlock(&mdev->graph_mutex);
return NULL;
}
- media_entity_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph))) {
+ while ((entity = media_graph_walk_next(&graph))) {
if (entity == &video->video.entity)
continue;
@@ -235,7 +235,7 @@ iss_video_far_end(struct iss_video *video)
mutex_unlock(&mdev->graph_mutex);
- media_entity_graph_walk_cleanup(&graph);
+ media_graph_walk_cleanup(&graph);
return far_end;
}
@@ -854,7 +854,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
- struct media_entity_graph graph;
+ struct media_graph graph;
struct media_entity *entity = &video->video.entity;
enum iss_pipeline_state state;
struct iss_pipeline *pipe;
@@ -880,19 +880,19 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret)
goto err_graph_walk_init;
- ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
+ ret = media_graph_walk_init(&graph, entity->graph_obj.mdev);
if (ret)
goto err_graph_walk_init;
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, true);
- ret = media_entity_pipeline_start(entity, &pipe->pipe);
+ ret = media_pipeline_start(entity, &pipe->pipe);
if (ret < 0)
- goto err_media_entity_pipeline_start;
+ goto err_media_pipeline_start;
- media_entity_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph)))
+ media_graph_walk_start(&graph, entity);
+ while ((entity = media_graph_walk_next(&graph)))
media_entity_enum_set(&pipe->ent_enum, entity);
/* Verify that the currently configured format matches the output of
@@ -963,7 +963,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
spin_unlock_irqrestore(&video->qlock, flags);
}
- media_entity_graph_walk_cleanup(&graph);
+ media_graph_walk_cleanup(&graph);
mutex_unlock(&video->stream_lock);
@@ -972,13 +972,13 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
err_omap4iss_set_stream:
vb2_streamoff(&vfh->queue, type);
err_iss_video_check_format:
- media_entity_pipeline_stop(&video->video.entity);
-err_media_entity_pipeline_start:
+ media_pipeline_stop(&video->video.entity);
+err_media_pipeline_start:
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, false);
video->queue = NULL;
- media_entity_graph_walk_cleanup(&graph);
+ media_graph_walk_cleanup(&graph);
err_graph_walk_init:
media_entity_enum_cleanup(&pipe->ent_enum);
@@ -1026,7 +1026,7 @@ iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, false);
- media_entity_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(&video->video.entity);
done:
mutex_unlock(&video->stream_lock);
@@ -1141,6 +1141,7 @@ static int iss_video_open(struct file *file)
done:
if (ret < 0) {
v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
kfree(handle);
}
@@ -1162,6 +1163,7 @@ static int iss_video_release(struct file *file)
vb2_queue_release(&handle->queue);
v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
kfree(handle);
file->private_data = NULL;
diff --git a/drivers/staging/media/s5p-cec/Kconfig b/drivers/staging/media/s5p-cec/Kconfig
index ddfd955da0d4..7a3489df3e70 100644
--- a/drivers/staging/media/s5p-cec/Kconfig
+++ b/drivers/staging/media/s5p-cec/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_SAMSUNG_S5P_CEC
tristate "Samsung S5P CEC driver"
- depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
+ depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (ARCH_EXYNOS || COMPILE_TEST)
---help---
This is a driver for Samsung S5P HDMI CEC interface. It uses the
generic CEC framework interface.
diff --git a/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h b/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h
index 3e4fc7b05e83..7d9453505dce 100644
--- a/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h
+++ b/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h
@@ -14,7 +14,6 @@
#define _EXYNOS_HDMI_CEC_H_ __FILE__
#include <linux/regmap.h>
-#include <linux/miscdevice.h>
#include "s5p_cec.h"
void s5p_cec_set_divider(struct s5p_cec_dev *cec);
diff --git a/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c
index ce95e0fcd882..1edf667d562a 100644
--- a/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c
@@ -87,7 +87,6 @@ void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec)
reg |= S5P_CEC_IRQ_TX_DONE;
reg |= S5P_CEC_IRQ_TX_ERROR;
writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
-
}
void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec)
@@ -186,13 +185,13 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
void s5p_clr_pending_tx(struct s5p_cec_dev *cec)
{
writeb(S5P_CEC_IRQ_TX_DONE | S5P_CEC_IRQ_TX_ERROR,
- cec->reg + S5P_CEC_IRQ_CLEAR);
+ cec->reg + S5P_CEC_IRQ_CLEAR);
}
void s5p_clr_pending_rx(struct s5p_cec_dev *cec)
{
writeb(S5P_CEC_IRQ_RX_DONE | S5P_CEC_IRQ_RX_ERROR,
- cec->reg + S5P_CEC_IRQ_CLEAR);
+ cec->reg + S5P_CEC_IRQ_CLEAR);
}
void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer)
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index fb0928a4fb97..781ef623233e 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -155,7 +155,6 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
skb_reserve(skb, BYTE_OFFSET);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, skb->dev);
- skb->dev->last_rx = jiffies;
netif_rx(skb);
/* Fill rx ring */
skb_data = xlr_alloc_skb();
@@ -397,14 +396,6 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
TX_DROP_FRAME_COUNTER);
}
-static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats
- )
-{
- xlr_stats(ndev, stats);
- return stats;
-}
-
static const struct net_device_ops xlr_netdev_ops = {
.ndo_open = xlr_net_open,
.ndo_stop = xlr_net_stop,
@@ -412,7 +403,7 @@ static const struct net_device_ops xlr_netdev_ops = {
.ndo_select_queue = xlr_net_select_queue,
.ndo_set_mac_address = xlr_net_set_mac_addr,
.ndo_set_rx_mode = xlr_set_rx_mode,
- .ndo_get_stats64 = xlr_get_stats64,
+ .ndo_get_stats64 = xlr_stats,
};
/*
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index f0900d1c4d7b..fc849d4a1b5d 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (rx_count < budget) {
/* No more work */
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
enable_irq(rx_group->irq);
}
return rx_count;
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 6b4c20872323..0b8053205091 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -23,6 +23,7 @@
#endif /* CONFIG_XFRM */
#include <linux/atomic.h>
+#include <net/sch_generic.h>
#include <asm/octeon/octeon.h>
@@ -369,9 +370,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
-#ifdef CONFIG_NET_CLS_ACT
- skb->tc_verd = 0;
-#endif /* CONFIG_NET_CLS_ACT */
+ skb_reset_tc(skb);
#endif /* CONFIG_NET_SCHED */
#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index e5ba7d1a809f..43a77745e6fb 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1375,7 +1375,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->LinkDetectInfo.NumRecvDataInPeriod++;
ieee->LinkDetectInfo.NumRxOkInPeriod++;
}
- dev->last_rx = jiffies;
/* Data frame - extract src/dst addresses */
rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 82f654305414..b1f2fdfcb718 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1103,11 +1103,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
stats = hostap_get_stats(dev);
from_assoc_ap = 1;
}
-#endif
-
- dev->last_rx = jiffies;
-#ifdef NOT_YET
if ((ieee->iw_mode == IW_MODE_MASTER ||
ieee->iw_mode == IW_MODE_REPEAT) &&
!from_assoc_ap) {
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index c1f674f5268c..ca3743d273e0 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
/* If there aren't any more packets to receive stop the poll */
if (rx_count < budget)
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
return rx_count;
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 4fe037aeef12..6134eba5cad4 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3409,7 +3409,6 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
&usbin->rxfrm.desc.frame_control, hdrlen);
skb->dev = wlandev->netdev;
- skb->dev->last_rx = jiffies;
/* And set the frame length properly */
skb_trim(skb, data_len + hdrlen);
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 73fcf07254fe..53dbbd69e552 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -252,7 +252,6 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev,
}
if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) {
- skb->dev->last_rx = jiffies;
wlandev->netdev->stats.rx_packets++;
wlandev->netdev->stats.rx_bytes += skb->len;
netif_rx_ni(skb);
@@ -287,7 +286,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_80211_RAW);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 257361280510..e2bc99980f75 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -4,6 +4,7 @@ menuconfig TARGET_CORE
depends on SCSI && BLOCK
select CONFIGFS_FS
select CRC_T10DIF
+ select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 450f51deb2a2..eab274d17b5c 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -17,6 +17,7 @@
******************************************************************************/
#include <crypto/hash.h>
+#include <linux/module.h>
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/idr.h>
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 1ebd13ef7bd3..26929c44d703 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -352,7 +352,15 @@ int core_enable_device_list_for_node(
kfree(new);
return -EINVAL;
}
- BUG_ON(orig->se_lun_acl != NULL);
+ if (orig->se_lun_acl != NULL) {
+ pr_warn_ratelimited("Detected existing explicit"
+ " se_lun_acl->se_lun_group reference for %s"
+ " mapped_lun: %llu, failing\n",
+ nacl->initiatorname, mapped_lun);
+ mutex_unlock(&nacl->lun_entry_mutex);
+ kfree(new);
+ return -EINVAL;
+ }
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d761025144f9..e18051185846 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -788,7 +788,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* __core_scsi3_add_registration()
*/
dest_lun = rcu_dereference_check(deve_tmp->se_lun,
- atomic_read(&deve_tmp->pr_kref.refcount) != 0);
+ kref_read(&deve_tmp->pr_kref) != 0);
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, dest_lun, deve_tmp,
@@ -1463,7 +1463,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
* For nacl->dynamic_node_acl=1
*/
lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- atomic_read(&se_deve->pr_kref.refcount) != 0);
+ kref_read(&se_deve->pr_kref) != 0);
if (!lun_acl)
return 0;
@@ -1478,7 +1478,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
* For nacl->dynamic_node_acl=1
*/
lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- atomic_read(&se_deve->pr_kref.refcount) != 0);
+ kref_read(&se_deve->pr_kref) != 0);
if (!lun_acl) {
kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
@@ -1759,7 +1759,7 @@ core_scsi3_decode_spec_i_port(
* 2nd loop which will never fail.
*/
dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+ kref_read(&dest_se_deve->pr_kref) != 0);
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_lun, dest_se_deve,
@@ -3466,7 +3466,7 @@ after_iport_check:
iport_ptr);
if (!dest_pr_reg) {
struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+ kref_read(&dest_se_deve->pr_kref) != 0);
spin_unlock(&dev->dev_reservation_lock);
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 04d7aa7390d0..a8f8e53f2f57 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1005,7 +1005,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
scsi_command_size(cmd->t_task_cdb));
req = blk_get_request(pdv->pdv_sd->request_queue,
- (cmd->data_direction == DMA_TO_DEVICE),
+ cmd->data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed\n");
@@ -1013,7 +1014,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
goto fail;
}
- blk_rq_set_block_pc(req);
+ scsi_req_init(req);
if (sgl) {
ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
@@ -1023,10 +1024,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->end_io = pscsi_req_done;
req->end_io_data = cmd;
- req->cmd_len = scsi_command_size(pt->pscsi_cdb);
- req->cmd = &pt->pscsi_cdb[0];
- req->sense = &pt->pscsi_sense[0];
- req->sense_len = 0;
+ scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
+ scsi_req(req)->cmd = &pt->pscsi_cdb[0];
if (pdv->pdv_sd->type == TYPE_DISK)
req->timeout = PS_TIMEOUT_DISK;
else
@@ -1075,7 +1074,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
struct pscsi_plugin_task *pt = cmd->priv;
pt->pscsi_result = req->errors;
- pt->pscsi_resid = req->resid_len;
+ pt->pscsi_resid = scsi_req(req)->resid_len;
cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
if (cmd->scsi_status) {
@@ -1096,6 +1095,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
break;
}
+ memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER);
__blk_put_request(req->q, req);
kfree(pt);
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 4879e70e2eef..df7b6e95c019 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -451,6 +451,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret = TCM_NO_SENSE;
/*
* Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
@@ -458,9 +459,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
* sent to the backend driver.
*/
spin_lock_irq(&cmd->t_state_lock);
- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+ if (cmd->transport_state & CMD_T_SENT) {
cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
*post_ret = 1;
+
+ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_unlock_irq(&cmd->t_state_lock);
@@ -470,7 +474,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
*/
up(&dev->caw_sem);
- return TCM_NO_SENSE;
+ return ret;
}
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 1cadc9eefa21..437591bc7c08 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref)
{
struct se_node_acl *nacl = container_of(kref,
struct se_node_acl, acl_kref);
+ struct se_portal_group *se_tpg = nacl->se_tpg;
- complete(&nacl->acl_free_comp);
+ if (!nacl->dynamic_stop) {
+ complete(&nacl->acl_free_comp);
+ return;
+ }
+
+ mutex_lock(&se_tpg->acl_node_mutex);
+ list_del(&nacl->acl_list);
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+ core_free_device_list_for_node(nacl, se_tpg);
+ kfree(nacl);
}
void target_put_nacl(struct se_node_acl *nacl)
@@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
void transport_free_session(struct se_session *se_sess)
{
struct se_node_acl *se_nacl = se_sess->se_node_acl;
+
/*
* Drop the se_node_acl->nacl_kref obtained from within
* core_tpg_get_initiator_node_acl().
*/
if (se_nacl) {
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
+ unsigned long flags;
+
se_sess->se_node_acl = NULL;
+
+ /*
+ * Also determine if we need to drop the extra ->cmd_kref if
+ * it had been previously dynamically generated, and
+ * the endpoint is not caching dynamic ACLs.
+ */
+ mutex_lock(&se_tpg->acl_node_mutex);
+ if (se_nacl->dynamic_node_acl &&
+ !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+ if (list_empty(&se_nacl->acl_sess_list))
+ se_nacl->dynamic_stop = true;
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+ if (se_nacl->dynamic_stop)
+ list_del(&se_nacl->acl_list);
+ }
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+ if (se_nacl->dynamic_stop)
+ target_put_nacl(se_nacl);
+
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
@@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session);
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
- const struct target_core_fabric_ops *se_tfo;
- struct se_node_acl *se_nacl;
unsigned long flags;
- bool drop_nacl = false;
if (!se_tpg) {
transport_free_session(se_sess);
return;
}
- se_tfo = se_tpg->se_tpg_tfo;
spin_lock_irqsave(&se_tpg->session_lock, flags);
list_del(&se_sess->sess_list);
@@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess)
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
- /*
- * Determine if we need to do extra work for this initiator node's
- * struct se_node_acl if it had been previously dynamically generated.
- */
- se_nacl = se_sess->se_node_acl;
-
- mutex_lock(&se_tpg->acl_node_mutex);
- if (se_nacl && se_nacl->dynamic_node_acl) {
- if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
- list_del(&se_nacl->acl_list);
- drop_nacl = true;
- }
- }
- mutex_unlock(&se_tpg->acl_node_mutex);
-
- if (drop_nacl) {
- core_tpg_wait_for_nacl_pr_ref(se_nacl);
- core_free_device_list_for_node(se_nacl, se_tpg);
- se_sess->se_node_acl = NULL;
- kfree(se_nacl);
- }
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
/*
* If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context from within transport_free_session() code.
+ *
+ * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
+ * to release all remaining generate_node_acl=1 created ACL resources.
*/
transport_free_session(se_sess);
@@ -3110,7 +3127,6 @@ static void target_tmr_work(struct work_struct *work)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_stop;
}
- cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
cmd->se_tfo->queue_tm_rsp(cmd);
@@ -3123,11 +3139,25 @@ int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
unsigned long flags;
+ bool aborted = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state |= CMD_T_ACTIVE;
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ aborted = true;
+ } else {
+ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE;
+ }
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (aborted) {
+ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
+ "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
+ cmd->se_tmr_req->ref_task_tag, cmd->tag);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return 0;
+ }
+
INIT_WORK(&cmd->work, target_tmr_work);
queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index d828b3b5000b..cac5a20a4de0 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -864,7 +864,7 @@ out:
" CHECK_CONDITION -> sending response\n", rc);
ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
}
- target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+ target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
}
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index fd5c3de79470..c91979c1463d 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -454,7 +454,7 @@ static void ft_sess_free(struct kref *kref)
void ft_sess_put(struct ft_sess *sess)
{
- int sess_held = atomic_read(&sess->kref.refcount);
+ int sess_held = kref_read(&sess->kref);
BUG_ON(!sess_held);
kref_put(&sess->kref, ft_sess_free);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 9ce0e9eef923..85fdbf762fa0 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -297,8 +297,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
if (!power_table)
return -ENOMEM;
- rcu_read_lock();
-
for (freq = 0, i = 0;
opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
freq++, i++) {
@@ -306,13 +304,13 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
u64 power;
if (i >= num_opps) {
- rcu_read_unlock();
ret = -EAGAIN;
goto free_power_table;
}
freq_mhz = freq / 1000000;
voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
/*
* Do the multiplication with MHz and millivolt so as
@@ -328,8 +326,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
power_table[i].power = power;
}
- rcu_read_unlock();
-
if (i != num_opps) {
ret = PTR_ERR(opp);
goto free_power_table;
@@ -433,13 +429,10 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
return 0;
}
- rcu_read_lock();
-
opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
true);
voltage = dev_pm_opp_get_voltage(opp);
-
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
if (voltage == 0) {
dev_warn_ratelimited(cpufreq_device->cpu_dev,
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 5a737fd5f1aa..ba7a5cd994dc 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -113,15 +113,15 @@ static int partition_enable_opps(struct devfreq_cooling_device *dfc,
unsigned int freq = dfc->freq_table[i];
bool want_enable = i >= cdev_state ? true : false;
- rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(dev, freq, !want_enable);
- rcu_read_unlock();
if (PTR_ERR(opp) == -ERANGE)
continue;
else if (IS_ERR(opp))
return PTR_ERR(opp);
+ dev_pm_opp_put(opp);
+
if (want_enable)
ret = dev_pm_opp_enable(dev, freq);
else
@@ -221,15 +221,12 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
if (!dfc->power_ops->get_static_power)
return 0;
- rcu_read_lock();
-
opp = dev_pm_opp_find_freq_exact(dev, freq, true);
if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE))
opp = dev_pm_opp_find_freq_exact(dev, freq, false);
voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
-
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
if (voltage == 0) {
dev_warn_ratelimited(dev,
@@ -412,18 +409,14 @@ static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc)
unsigned long power_dyn, voltage;
struct dev_pm_opp *opp;
- rcu_read_lock();
-
opp = dev_pm_opp_find_freq_floor(dev, &freq);
if (IS_ERR(opp)) {
- rcu_read_unlock();
ret = PTR_ERR(opp);
goto free_tables;
}
voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
-
- rcu_read_unlock();
+ dev_pm_opp_put(opp);
if (dfc->power_ops) {
power_dyn = get_dynamic_power(dfc, freq, voltage);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index b811b0fb61b1..4c7796512453 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
void (*control)(void __iomem *reg, bool on);
/* Per-sensor methods */
- int (*get_temp)(struct chip_tsadc_table table,
+ int (*get_temp)(const struct chip_tsadc_table *table,
int chn, void __iomem *reg, int *temp);
- void (*set_alarm_temp)(struct chip_tsadc_table table,
- int chn, void __iomem *reg, int temp);
- void (*set_tshut_temp)(struct chip_tsadc_table table,
- int chn, void __iomem *reg, int temp);
+ int (*set_alarm_temp)(const struct chip_tsadc_table *table,
+ int chn, void __iomem *reg, int temp);
+ int (*set_tshut_temp)(const struct chip_tsadc_table *table,
+ int chn, void __iomem *reg, int temp);
void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
/* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
{3452, 115000},
{3437, 120000},
{3421, 125000},
+ {0, 125000},
};
static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
{TSADCV3_DATA_MASK, 125000},
};
-static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
+static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
int temp)
{
int high, low, mid;
- u32 error = 0;
+ unsigned long num;
+ unsigned int denom;
+ u32 error = table->data_mask;
low = 0;
- high = table.length - 1;
+ high = (table->length - 1) - 1; /* ignore the last check for table */
mid = (high + low) / 2;
/* Return mask code data when the temp is over table range */
- if (temp < table.id[low].temp || temp > table.id[high].temp) {
- error = table.data_mask;
+ if (temp < table->id[low].temp || temp > table->id[high].temp)
goto exit;
- }
while (low <= high) {
- if (temp == table.id[mid].temp)
- return table.id[mid].code;
- else if (temp < table.id[mid].temp)
+ if (temp == table->id[mid].temp)
+ return table->id[mid].code;
+ else if (temp < table->id[mid].temp)
high = mid - 1;
else
low = mid + 1;
mid = (low + high) / 2;
}
+ /*
+ * The conversion code granularity provided by the table. Let's
+ * assume that the relationship between temperature and
+ * analog value between 2 table entries is linear and interpolate
+ * to produce less granular result.
+ */
+ num = abs(table->id[mid + 1].code - table->id[mid].code);
+ num *= temp - table->id[mid].temp;
+ denom = table->id[mid + 1].temp - table->id[mid].temp;
+
+ switch (table->mode) {
+ case ADC_DECREMENT:
+ return table->id[mid].code - (num / denom);
+ case ADC_INCREMENT:
+ return table->id[mid].code + (num / denom);
+ default:
+ pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+ return error;
+ }
+
exit:
- pr_err("Invalid the conversion, error=%d\n", error);
+ pr_err("%s: invalid temperature, temp=%d error=%d\n",
+ __func__, temp, error);
return error;
}
-static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
- int *temp)
+static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
+ u32 code, int *temp)
{
unsigned int low = 1;
- unsigned int high = table.length - 1;
+ unsigned int high = table->length - 1;
unsigned int mid = (low + high) / 2;
unsigned int num;
unsigned long denom;
- WARN_ON(table.length < 2);
+ WARN_ON(table->length < 2);
- switch (table.mode) {
+ switch (table->mode) {
case ADC_DECREMENT:
- code &= table.data_mask;
- if (code < table.id[high].code)
+ code &= table->data_mask;
+ if (code <= table->id[high].code)
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code >= table.id[mid].code &&
- code < table.id[mid - 1].code)
+ if (code >= table->id[mid].code &&
+ code < table->id[mid - 1].code)
break;
- else if (code < table.id[mid].code)
+ else if (code < table->id[mid].code)
low = mid + 1;
else
high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
}
break;
case ADC_INCREMENT:
- code &= table.data_mask;
- if (code < table.id[low].code)
+ code &= table->data_mask;
+ if (code < table->id[low].code)
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code <= table.id[mid].code &&
- code > table.id[mid - 1].code)
+ if (code <= table->id[mid].code &&
+ code > table->id[mid - 1].code)
break;
- else if (code > table.id[mid].code)
+ else if (code > table->id[mid].code)
low = mid + 1;
else
high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
}
break;
default:
- pr_err("Invalid the conversion table\n");
+ pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+ return -EINVAL;
}
/*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
* temperature between 2 table entries is linear and interpolate
* to produce less granular result.
*/
- num = table.id[mid].temp - table.id[mid - 1].temp;
- num *= abs(table.id[mid - 1].code - code);
- denom = abs(table.id[mid - 1].code - table.id[mid].code);
- *temp = table.id[mid - 1].temp + (num / denom);
+ num = table->id[mid].temp - table->id[mid - 1].temp;
+ num *= abs(table->id[mid - 1].code - code);
+ denom = abs(table->id[mid - 1].code - table->id[mid].code);
+ *temp = table->id[mid - 1].temp + (num / denom);
return 0;
}
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
writel_relaxed(val, regs + TSADCV2_AUTO_CON);
}
-static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
+static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
int chn, void __iomem *regs, int *temp)
{
u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
return rk_tsadcv2_code_to_temp(table, val, temp);
}
-static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
- int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
+ int chn, void __iomem *regs, int temp)
{
- u32 alarm_value, int_en;
+ u32 alarm_value;
+ u32 int_en, int_clr;
+
+ /*
+ * In some cases, some sensors didn't need the trip points, the
+ * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
+ * in the end, ignore this case and disable the high temperature
+ * interrupt.
+ */
+ if (temp == INT_MAX) {
+ int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
+ int_clr &= ~TSADCV2_INT_SRC_EN(chn);
+ writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
+ return 0;
+ }
/* Make sure the value is valid */
alarm_value = rk_tsadcv2_temp_to_code(table, temp);
- if (alarm_value == table.data_mask)
- return;
+ if (alarm_value == table->data_mask)
+ return -ERANGE;
- writel_relaxed(alarm_value & table.data_mask,
+ writel_relaxed(alarm_value & table->data_mask,
regs + TSADCV2_COMP_INT(chn));
int_en = readl_relaxed(regs + TSADCV2_INT_EN);
int_en |= TSADCV2_INT_SRC_EN(chn);
writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+
+ return 0;
}
-static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
- int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
+ int chn, void __iomem *regs, int temp)
{
u32 tshut_value, val;
/* Make sure the value is valid */
tshut_value = rk_tsadcv2_temp_to_code(table, temp);
- if (tshut_value == table.data_mask)
- return;
+ if (tshut_value == table->data_mask)
+ return -ERANGE;
writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
/* TSHUT will be valid */
val = readl_relaxed(regs + TSADCV2_AUTO_CON);
writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+
+ return 0;
}
static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
__func__, sensor->id, low, high);
- tsadc->set_alarm_temp(tsadc->table,
- sensor->id, thermal->regs, high);
-
- return 0;
+ return tsadc->set_alarm_temp(&tsadc->table,
+ sensor->id, thermal->regs, high);
}
static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
int retval;
- retval = tsadc->get_temp(tsadc->table,
+ retval = tsadc->get_temp(&tsadc->table,
sensor->id, thermal->regs, out_temp);
dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
int error;
tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
- tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
+
+ error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
thermal->tshut_temp);
+ if (error)
+ dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+ __func__, thermal->tshut_temp, error);
sensor->thermal = thermal;
sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
thermal->chip->set_tshut_mode(id, thermal->regs,
thermal->tshut_mode);
- thermal->chip->set_tshut_temp(thermal->chip->table,
+
+ error = thermal->chip->set_tshut_temp(&thermal->chip->table,
id, thermal->regs,
thermal->tshut_temp);
+ if (error)
+ dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+ __func__, thermal->tshut_temp, error);
}
thermal->chip->control(thermal->regs, true);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 641faab6e24b..655591316a88 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
if (!strncmp(dev_name(dev), "thermal_zone",
sizeof("thermal_zone") - 1)) {
tz = to_thermal_zone(dev);
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ kfree(tz->trip_hyst_attrs);
+ kfree(tz->trips_attribute_group.attrs);
+ kfree(tz->device.groups);
kfree(tz);
} else if (!strncmp(dev_name(dev), "cooling_device",
sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, 0);
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- kfree(tz->trip_hyst_attrs);
- kfree(tz->trips_attribute_group.attrs);
thermal_set_governor(tz, NULL);
thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
idr_destroy(&tz->idr);
mutex_destroy(&tz->lock);
device_unregister(&tz->device);
- kfree(tz->device.groups);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 1bf8ed13f827..9229de43e19d 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -200,7 +200,6 @@ static struct ld_semaphore __sched *
down_read_failed(struct ld_semaphore *sem, long count, long timeout)
{
struct ldsem_waiter waiter;
- struct task_struct *tsk = current;
long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
/* set up my own style of waitqueue */
@@ -221,8 +220,8 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
list_add_tail(&waiter.list, &sem->read_wait);
sem->wait_readers++;
- waiter.task = tsk;
- get_task_struct(tsk);
+ waiter.task = current;
+ get_task_struct(current);
/* if there are no active locks, wake the new lock owner(s) */
if ((count & LDSEM_ACTIVE_MASK) == 0)
@@ -232,7 +231,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
/* wait to be given the lock */
for (;;) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
if (!waiter.task)
break;
@@ -241,7 +240,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
timeout = schedule_timeout(timeout);
}
- __set_task_state(tsk, TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
if (!timeout) {
/* lock timed out but check if this task was just
@@ -268,7 +267,6 @@ static struct ld_semaphore __sched *
down_write_failed(struct ld_semaphore *sem, long count, long timeout)
{
struct ldsem_waiter waiter;
- struct task_struct *tsk = current;
long adjust = -LDSEM_ACTIVE_BIAS;
int locked = 0;
@@ -289,16 +287,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
list_add_tail(&waiter.list, &sem->write_wait);
- waiter.task = tsk;
+ waiter.task = current;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
for (;;) {
if (!timeout)
break;
raw_spin_unlock_irq(&sem->wait_lock);
timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->wait_lock);
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
locked = writer_trylock(sem);
if (locked)
break;
@@ -309,7 +307,7 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
- __set_task_state(tsk, TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
/* lock wait may have timed out */
if (!locked)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d2e50a27140c..24f9f98968a5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* CBM - Flash disk */
{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* WORLDE easy key (easykey.25) MIDI controller */
+ { USB_DEVICE(0x0218, 0x0401), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* HP 5300/5370C scanner */
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9548d3e03453..302b8f5f7d27 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -513,8 +513,8 @@ struct dwc2_core_params {
/* Gadget parameters */
bool g_dma;
bool g_dma_desc;
- u16 g_rx_fifo_size;
- u16 g_np_tx_fifo_size;
+ u32 g_rx_fifo_size;
+ u32 g_np_tx_fifo_size;
u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index c55db4aa54d6..77c5fcf3a5bf 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
(hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
__func__, epctrl, epctrl_reg);
/* Allocate DMA descriptor chain for non-ctrl endpoints */
- if (using_desc_dma(hsotg)) {
- hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+ if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+ hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
MAX_DMA_DESC_NUM_GENERIC *
sizeof(struct dwc2_dma_desc),
&hs_ep->desc_list_dma, GFP_ATOMIC);
@@ -3872,7 +3872,7 @@ error1:
error2:
if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
- dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
sizeof(struct dwc2_dma_desc),
hs_ep->desc_list, hs_ep->desc_list_dma);
hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
- /* Remove DMA memory allocated for non-control Endpoints */
- if (using_desc_dma(hsotg)) {
- dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
- sizeof(struct dwc2_dma_desc),
- hs_ep->desc_list, hs_ep->desc_list_dma);
- hs_ep->desc_list = NULL;
- }
-
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
/* set the PLL on, remove the HNP/SRP and set the PHY */
trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 911c3b36ac06..46d0ad5105e4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
+ if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+ goto unlock;
+
if (!hsotg->params.hibernation)
goto skip_power_saving;
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
{
#ifdef VERBOSE_DEBUG
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- char *pipetype;
- char *speed;
+ char *pipetype = NULL;
+ char *speed = NULL;
dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
dev_vdbg(hsotg->dev, " Device address: %d\n",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 11fe68a4627b..bcd1e19b4076 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
}
/**
- * dwc2_set_param_u16() - Set a u16 parameter
+ * dwc2_set_param_u32() - Set a u32 parameter
*
* See dwc2_set_param().
*/
-static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
bool lookup, char *property, u16 legacy,
u16 def, u16 min, u16 max)
{
dwc2_set_param(hsotg, param, lookup, property,
- legacy, def, min, max, 2);
+ legacy, def, min, max, 4);
}
/**
@@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
* auto-detect if the hardware does not support the
* default.
*/
- dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+ dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
true, "g-rx-fifo-size", 2048,
hw->rx_fifo_size,
16, hw->rx_fifo_size);
- dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+ dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
true, "g-np-tx-fifo-size", 1024,
hw->dev_nperio_tx_fifo_size,
16, hw->dev_nperio_tx_fifo_size);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e27899bb5706..e956306d9b0f 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
if (IS_ERR(exynos->axius_clk)) {
dev_err(dev, "no AXI UpScaler clk specified\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto axius_clk_err;
}
clk_prepare_enable(exynos->axius_clk);
} else {
@@ -196,6 +197,7 @@ err3:
regulator_disable(exynos->vdd33);
err2:
clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk);
return ret;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 002822d98fda..49d685ad0da9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
ret = -ENOMEM;
- kfree(cdev->os_desc_req);
+ usb_ep_free_request(ep0, cdev->os_desc_req);
goto end;
}
cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5e746adc8a2d..e6a17455adac 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while (count--) {
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
__ffs_epfile_read_buffer_free(epfile);
++epfile;
}
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while(count--) {
struct usb_endpoint_descriptor *ds;
int desc_idx;
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
++ep;
++epfile;
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
return ret;
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
return -EINVAL;
length = le32_to_cpu(d->dwSize);
+ if (len < length)
+ return -EINVAL;
type = le32_to_cpu(d->dwPropertyDataType);
if (type < USB_EXT_PROP_UNICODE ||
type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
return -EINVAL;
}
pnl = le16_to_cpu(d->wPropertyNameLength);
+ if (length < 14 + pnl) {
+ pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
+ length, pnl, type);
+ return -EINVAL;
+ }
pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
if (length != 14 + pnl + pdl) {
pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
}
}
if (flags & (1 << i)) {
+ if (len < 4) {
+ goto error;
+ }
os_descs_count = get_unaligned_le32(data);
data += 4;
len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ENTER();
- if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+ if (unlikely(len < 16 ||
+ get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
get_unaligned_le32(data + 4) != len))
goto error;
str_count = get_unaligned_le32(data + 8);
@@ -3448,12 +3459,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
/* cleanup after autoconfig */
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while (count--) {
if (ep->ep && ep->req)
usb_ep_free_request(ep->ep, ep->req);
ep->req = NULL;
++ep;
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
kfree(func->eps);
func->eps = NULL;
@@ -3687,7 +3698,7 @@ static void ffs_closed(struct ffs_data *ffs)
goto done;
if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
- || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+ || !kref_read(&opts->func_inst.group.cg_item.ci_kref))
goto done;
ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f3212db9bc37..12c7687216e6 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+ sprintf(ep->name, "ep%d", ep->index);
+ ep->ep.name = ep->name;
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d589dfa..b03b2ebfc53a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@ struct usba_ep {
void __iomem *ep_regs;
void __iomem *dma_regs;
void __iomem *fifo;
+ char name[8];
struct usb_ep ep;
struct usba_udc *udc;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ddfab301e366..e5834dd9bcde 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
return -ENODEV;
/* Try to set 64-bit DMA first */
- if (WARN_ON(!pdev->dev.dma_mask))
+ if (!pdev->dev.dma_mask)
/* Platform did not initialize dma_mask */
ret = dma_coerce_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(64));
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 33ff49c4cea4..46847340b819 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -409,7 +409,7 @@ static void __exit mon_exit(void)
printk(KERN_ERR TAG
": Outstanding opens (%d) on usb%d, leaking...\n",
mbus->nreaders, mbus->u_bus->busnum);
- atomic_set(&mbus->ref.refcount, 2); /* Force leak */
+ kref_get(&mbus->ref); /* Force leak */
}
mon_dissolve(mbus, mbus->u_bus);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fca288bbc800..772f15821242 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
| MUSB_PORT_STAT_RESUME;
musb->rh_timer = jiffies
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- musb->need_finish_resume = 1;
-
musb->xceiv->otg->state = OTG_STATE_A_HOST;
musb->is_active = 1;
musb_host_resume_root_hub(musb);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
break;
case OTG_STATE_B_WAIT_ACON:
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work.work);
+ int error;
+
+ error = pm_runtime_get_sync(musb->controller);
+ if (error < 0) {
+ dev_err(musb->controller, "Could not enable: %i\n", error);
+
+ return;
+ }
musb_pm_runtime_check_session(musb);
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
musb->xceiv_old_state = musb->xceiv->otg->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
}
static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
if ((devctl & mask) != (musb->context.devctl & mask))
musb->port1_status = 0;
- if (musb->need_finish_resume) {
- musb->need_finish_resume = 0;
- schedule_delayed_work(&musb->finish_resume_work,
- msecs_to_jiffies(USB_RESUME_TIMEOUT));
- }
/*
* The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
musb_restore_context(musb);
- if (musb->need_finish_resume) {
- musb->need_finish_resume = 0;
- schedule_delayed_work(&musb->finish_resume_work,
- msecs_to_jiffies(USB_RESUME_TIMEOUT));
- }
-
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
if (error)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index ade902ea1221..ce5a18c98c6d 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -410,7 +410,6 @@ struct musb {
/* is_suspended means USB B_PERIPHERAL suspend */
unsigned is_suspended:1;
- unsigned need_finish_resume :1;
/* may_wakeup means remote wakeup is enabled */
unsigned may_wakeup:1;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fff718352e0c..5d61d0871f2e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1329,17 +1329,20 @@ static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
return 0;
}
-static int cp210x_gpio_set_single_ended(struct gpio_chip *gc, unsigned int gpio,
- enum single_ended_mode mode)
+static int cp210x_gpio_set_config(struct gpio_chip *gc, unsigned int gpio,
+ unsigned long config)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ enum pin_config_param param = pinconf_to_config_param(config);
/* Succeed only if in correct mode (this can't be set at runtime) */
- if ((mode == LINE_MODE_PUSH_PULL) && (priv->gpio_mode & BIT(gpio)))
+ if ((param == PIN_CONFIG_DRIVE_PUSH_PULL) &&
+ (priv->gpio_mode & BIT(gpio)))
return 0;
- if ((mode == LINE_MODE_OPEN_DRAIN) && !(priv->gpio_mode & BIT(gpio)))
+ if ((param == PIN_CONFIG_DRIVE_OPEN_DRAIN) &&
+ !(priv->gpio_mode & BIT(gpio)))
return 0;
return -ENOTSUPP;
@@ -1402,7 +1405,7 @@ static int cp2105_shared_gpio_init(struct usb_serial *serial)
priv->gc.direction_output = cp210x_gpio_direction_output;
priv->gc.get = cp210x_gpio_get;
priv->gc.set = cp210x_gpio_set;
- priv->gc.set_single_ended = cp210x_gpio_set_single_ended;
+ priv->gc.set_config = cp210x_gpio_set_config;
priv->gc.owner = THIS_MODULE;
priv->gc.parent = &serial->interface->dev;
priv->gc.base = -1;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7ce31a4c7e7f..42cc72e54c05 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 46fca6b75846..1db4b61bdf7b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index e3b7af8adfb7..09d9be88209e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
+#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
#define IODATA_PRODUCT_ID 0x0a03
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1bc6089b9008..696458db7e3c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
+ {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
{USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
{USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
{USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c8823578a1b2..59b3f62a2d64 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
mutex_lock(&container->lock);
ret = tce_iommu_create_default_window(container);
- if (ret)
- return ret;
-
- ret = tce_iommu_create_window(container, create.page_shift,
- create.window_size, create.levels,
- &create.start_addr);
+ if (!ret)
+ ret = tce_iommu_create_window(container,
+ create.page_shift,
+ create.window_size, create.levels,
+ &create.start_addr);
mutex_unlock(&container->lock);
@@ -1246,6 +1245,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
+ long i, ret = 0;
+
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@@ -1254,7 +1255,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
table_group->ops->take_ownership(table_group);
+ /* Set all windows to the new group */
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ struct iommu_table *tbl = container->tables[i];
+
+ if (!tbl)
+ continue;
+
+ ret = table_group->ops->set_window(table_group, i, tbl);
+ if (ret)
+ goto release_exit;
+ }
+
return 0;
+
+release_exit:
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+ table_group->ops->unset_window(table_group, i);
+
+ table_group->ops->release_ownership(table_group);
+
+ return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
@@ -1270,6 +1291,10 @@ static int tce_iommu_attach_group(void *iommu_data,
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
+ if (!table_group) {
+ ret = -ENODEV;
+ goto unlock_exit;
+ }
if (tce_groups_attached(container) && (!table_group->ops ||
!table_group->ops->take_ownership ||
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3cc33fa6d26..bd6f293c4ebd 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -38,6 +38,8 @@
#include <linux/workqueue.h>
#include <linux/mdev.h>
#include <linux/notifier.h>
+#include <linux/dma-iommu.h>
+#include <linux/irqdomain.h>
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL;
}
+static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
+ phys_addr_t *base)
+{
+ struct list_head group_resv_regions;
+ struct iommu_resv_region *region, *next;
+ bool ret = false;
+
+ INIT_LIST_HEAD(&group_resv_regions);
+ iommu_get_group_resv_regions(group, &group_resv_regions);
+ list_for_each_entry(region, &group_resv_regions, list) {
+ if (region->type & IOMMU_RESV_MSI) {
+ *base = region->start;
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ list_for_each_entry_safe(region, next, &group_resv_regions, list)
+ kfree(region);
+ return ret;
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct vfio_domain *domain, *d;
struct bus_type *bus = NULL, *mdev_bus;
int ret;
+ bool resv_msi, msi_remap;
+ phys_addr_t resv_msi_base;
mutex_lock(&iommu->lock);
@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_domain;
+ resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
+
INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list);
- if (!allow_unsafe_interrupts &&
- !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
+ msi_remap = resv_msi ? irq_domain_check_msi_remap() :
+ iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
+
+ if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
__func__);
ret = -EPERM;
@@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_detach;
+ if (resv_msi) {
+ ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
+ if (ret)
+ goto out_detach;
+ }
+
list_add(&domain->next, &iommu->domain_list);
mutex_unlock(&iommu->lock);
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 40764ecad9ce..cfdecea5078f 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,6 +1,6 @@
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
- depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
+ depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
select VHOST
---help---
This kernel module can be loaded in host kernel to accelerate
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5dc34653274a..2fe35354f20e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -24,6 +24,7 @@
#include <linux/if_arp.h>
#include <linux/if_tun.h>
#include <linux/if_macvlan.h>
+#include <linux/if_tap.h>
#include <linux/if_vlan.h>
#include <net/sock.h>
@@ -351,6 +352,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
return r;
}
+static bool vhost_exceeds_maxpend(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+
+ return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
+ == nvq->done_idx;
+}
+
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
@@ -394,8 +404,7 @@ static void handle_tx(struct vhost_net *net)
/* If more outstanding DMAs, queue the work.
* Handle upend_idx wrap around
*/
- if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
- % UIO_MAXIOV == nvq->done_idx))
+ if (unlikely(vhost_exceeds_maxpend(net)))
break;
head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
@@ -454,6 +463,16 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = NULL;
ubufs = NULL;
}
+
+ total_len += len;
+ if (total_len < VHOST_NET_WEIGHT &&
+ !vhost_vq_avail_empty(&net->dev, vq) &&
+ likely(!vhost_exceeds_maxpend(net))) {
+ msg.msg_flags |= MSG_MORE;
+ } else {
+ msg.msg_flags &= ~MSG_MORE;
+ }
+
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
@@ -472,7 +491,6 @@ static void handle_tx(struct vhost_net *net)
vhost_add_used_and_signal(&net->dev, vq, head, 0);
else
vhost_zerocopy_signal_used(net, vq);
- total_len += len;
vhost_net_tx_packet(net);
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
@@ -943,7 +961,7 @@ static struct socket *get_tap_socket(int fd)
sock = tun_get_socket(file);
if (!IS_ERR(sock))
return sock;
- sock = macvtap_get_socket(file);
+ sock = tap_get_socket(file);
if (IS_ERR(sock))
fput(file);
return sock;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310cdaaca..fd6c8b66f06f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter out_iter, in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- unsigned out, in;
+ unsigned int out = 0, in = 0;
int head, ret, prot_bytes;
size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops vhost_scsi_ops = {
+static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.name = "vhost",
.get_fabric_name = vhost_scsi_get_fabric_name,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d6432603880c..4269e621e254 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
- vq->is_le = true;
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+ || virtio_legacy_is_little_endian();
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{
- vq->is_le = virtio_legacy_is_little_endian();
+ vhost_init_is_le(vq);
}
struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
int r;
bool is_le = vq->is_le;
- if (!vq->private_data) {
- vhost_reset_is_le(vq);
+ if (!vq->private_data)
return 0;
- }
vhost_init_is_le(vq);
@@ -2241,11 +2239,15 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__virtio16 avail_idx;
int r;
+ if (vq->avail_idx != vq->last_avail_idx)
+ return false;
+
r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
- if (r)
+ if (unlikely(r))
return false;
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
- return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
+ return vq->avail_idx == vq->last_avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bbbf588540ed..ce5e63d2c66a 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
static int vhost_vsock_start(struct vhost_vsock *vsock)
{
+ struct vhost_virtqueue *vq;
size_t i;
int ret;
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
if (!vhost_vq_access_ok(vq)) {
ret = -EFAULT;
- mutex_unlock(&vq->mutex);
goto err_vq;
}
if (!vq->private_data) {
vq->private_data = vsock;
- vhost_vq_init_access(vq);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
}
mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0;
err_vq:
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->private_data = NULL;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index f89245b8ba8e..68a113594808 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 7062bb0975a5..400d70b69379 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -100,11 +100,6 @@ static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
dev->id.device, dev->id.vendor);
}
-static void add_status(struct virtio_device *dev, unsigned status)
-{
- dev->config->set_status(dev, dev->config->get_status(dev) | status);
-}
-
void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
unsigned int fbit)
{
@@ -145,14 +140,15 @@ void virtio_config_changed(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_config_changed);
-static void virtio_config_disable(struct virtio_device *dev)
+void virtio_config_disable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
dev->config_enabled = false;
spin_unlock_irq(&dev->config_lock);
}
+EXPORT_SYMBOL_GPL(virtio_config_disable);
-static void virtio_config_enable(struct virtio_device *dev)
+void virtio_config_enable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
dev->config_enabled = true;
@@ -161,8 +157,15 @@ static void virtio_config_enable(struct virtio_device *dev)
dev->config_change_pending = false;
spin_unlock_irq(&dev->config_lock);
}
+EXPORT_SYMBOL_GPL(virtio_config_enable);
+
+void virtio_add_status(struct virtio_device *dev, unsigned int status)
+{
+ dev->config->set_status(dev, dev->config->get_status(dev) | status);
+}
+EXPORT_SYMBOL_GPL(virtio_add_status);
-static int virtio_finalize_features(struct virtio_device *dev)
+int virtio_finalize_features(struct virtio_device *dev)
{
int ret = dev->config->finalize_features(dev);
unsigned status;
@@ -173,7 +176,7 @@ static int virtio_finalize_features(struct virtio_device *dev)
if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1))
return 0;
- add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
status = dev->config->get_status(dev);
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
dev_err(&dev->dev, "virtio: device refuses features: %x\n",
@@ -182,6 +185,7 @@ static int virtio_finalize_features(struct virtio_device *dev)
}
return 0;
}
+EXPORT_SYMBOL_GPL(virtio_finalize_features);
static int virtio_dev_probe(struct device *_d)
{
@@ -193,7 +197,7 @@ static int virtio_dev_probe(struct device *_d)
u64 driver_features_legacy;
/* We have a driver! */
- add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
/* Figure out what features the device supports. */
device_features = dev->config->get_features(dev);
@@ -247,7 +251,7 @@ static int virtio_dev_probe(struct device *_d)
return 0;
err:
- add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
}
@@ -265,7 +269,7 @@ static int virtio_dev_remove(struct device *_d)
WARN_ON_ONCE(dev->config->get_status(dev));
/* Acknowledge the device's existence again. */
- add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
return 0;
}
@@ -316,7 +320,7 @@ int register_virtio_device(struct virtio_device *dev)
dev->config->reset(dev);
/* Acknowledge that we've seen the device. */
- add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs);
@@ -325,7 +329,7 @@ int register_virtio_device(struct virtio_device *dev)
err = device_register(&dev->dev);
out:
if (err)
- add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
}
EXPORT_SYMBOL_GPL(register_virtio_device);
@@ -365,18 +369,18 @@ int virtio_device_restore(struct virtio_device *dev)
dev->config->reset(dev);
/* Acknowledge that we've seen the device. */
- add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
/* Maybe driver failed before freeze.
* Restore the failed status, for debugging. */
if (dev->failed)
- add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
if (!drv)
return 0;
/* We have a driver! */
- add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
ret = virtio_finalize_features(dev);
if (ret)
@@ -389,14 +393,14 @@ int virtio_device_restore(struct virtio_device *dev)
}
/* Finally, tell the device we're all set */
- add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
virtio_config_enable(dev);
return 0;
err:
- add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
}
EXPORT_SYMBOL_GPL(virtio_device_restore);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index d47a2fcef818..c71fde5fe835 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -59,6 +59,7 @@
#define pr_fmt(fmt) "virtio-mmio: " fmt
#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
struct virtio_mmio_device *vm_dev;
struct resource *mem;
unsigned long magic;
+ int rc;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
- if (vm_dev->version == 1)
+ if (vm_dev->version == 1) {
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ /*
+ * In the legacy case, ensure our coherently-allocated virtio
+ * ring will be at an address expressable as a 32-bit PFN.
+ */
+ if (!rc)
+ dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32 + PAGE_SHIFT));
+ } else {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ }
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
platform_set_drvdata(pdev, vm_dev);
return register_virtio_device(&vm_dev->vdev);
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 5676aefdf2bc..0003912a8111 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -68,13 +68,12 @@ static void vcpu_hotplug(unsigned int cpu)
}
static void handle_vcpu_hotplug_event(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
unsigned int cpu;
char *cpustr;
- const char *node = vec[XS_WATCH_PATH];
- cpustr = strstr(node, "cpu/");
+ cpustr = strstr(path, "cpu/");
if (cpustr != NULL) {
sscanf(cpustr, "cpu/%u", &cpu);
vcpu_hotplug(cpu);
@@ -107,7 +106,7 @@ static int __init setup_vcpu_hotplug_event(void)
.notifier_call = setup_cpu_watcher };
#ifdef CONFIG_X86
- if (!xen_pv_domain())
+ if (!xen_pv_domain() && !xen_pvh_domain())
#else
if (!xen_domain())
#endif
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index fd8e872d2943..6a53577772c9 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1704,7 +1704,6 @@ void __init xen_init_IRQ(void)
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
- /* TODO: No PVH support for PIRQ EOI */
if (rc != 0) {
free_page((unsigned long) pirq_eoi_map);
pirq_eoi_map = NULL;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index bb36b1e1dbcc..d6786b87e13b 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1146,13 +1146,13 @@ EXPORT_SYMBOL_GPL(gnttab_init);
static int __gnttab_init(void)
{
+ if (!xen_domain())
+ return -ENODEV;
+
/* Delay grant-table initialization in the PV on HVM case */
- if (xen_hvm_domain())
+ if (xen_hvm_domain() && !xen_pvh_domain())
return 0;
- if (!xen_pv_domain())
- return -ENODEV;
-
return gnttab_init();
}
/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 26e5e8507f03..c1ec8ee80924 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -218,7 +218,7 @@ static struct shutdown_handler shutdown_handlers[] = {
};
static void shutdown_handler(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
char *str;
struct xenbus_transaction xbt;
@@ -266,8 +266,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
}
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
- unsigned int len)
+static void sysrq_handler(struct xenbus_watch *watch, const char *path,
+ const char *token)
{
char sysrq_key = '\0';
struct xenbus_transaction xbt;
@@ -277,7 +277,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
err = xenbus_transaction_start(&xbt);
if (err)
return;
- if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
+ if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) < 0) {
pr_err("Unable to read sysrq code in control/sysrq\n");
xenbus_transaction_end(xbt, 1);
return;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 112ce422dc22..2a165cc8a43c 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
+static uint64_t callback_via;
static unsigned long alloc_xen_mmio(unsigned long len)
{
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
return addr;
}
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+ u8 pin;
+ int irq;
+
+ irq = pdev->irq;
+ if (irq < 16)
+ return irq; /* ISA IRQ */
+
+ pin = pdev->pin;
+
+ /* We don't know the GSI. Specify the PCI INTx line instead. */
+ return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+ ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+ ((uint64_t)pdev->bus->number << 16) |
+ ((uint64_t)(pdev->devfn & 0xff) << 8) |
+ ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+ xen_hvm_evtchn_do_upcall();
+ return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+ return request_irq(pdev->irq, do_hvm_evtchn_intr,
+ IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+ int err;
+ if (!xen_pv_domain())
+ return 0;
+ err = xen_set_callback_via(callback_via);
+ if (err) {
+ dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ return err;
+ }
+ return 0;
+}
+
static int platform_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
+ /*
+ * Xen HVM guests always use the vector callback mechanism.
+ * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+ * HVM environment. It needs the platform-pci driver to get
+ * notifications from L0 Xen, but it cannot use the vector callback
+ * as it is not exported by L1 Xen.
+ */
+ if (xen_pv_domain()) {
+ ret = xen_allocate_irq(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+ goto out;
+ }
+ callback_via = get_callback_via(pdev);
+ ret = xen_set_callback_via(callback_via);
+ if (ret) {
+ dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+ "err=%d\n", ret);
+ goto out;
+ }
+ }
+
max_nr_gframes = gnttab_max_grant_frames();
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
+#ifdef CONFIG_PM
+ .resume_early = platform_pci_resume,
+#endif
};
builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 6e3306f4a525..2077a3ac7c0c 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -22,6 +22,7 @@
#include <linux/pagemap.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
+#include <linux/moduleparam.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -32,6 +33,7 @@
#include <xen/xen.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
+#include <xen/interface/hvm/dm_op.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
@@ -43,16 +45,36 @@ MODULE_LICENSE("GPL");
#define PRIV_VMA_LOCKED ((void *)1)
+static unsigned int privcmd_dm_op_max_num = 16;
+module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
+MODULE_PARM_DESC(dm_op_max_nr_bufs,
+ "Maximum number of buffers per dm_op hypercall");
+
+static unsigned int privcmd_dm_op_buf_max_size = 4096;
+module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
+ 0644);
+MODULE_PARM_DESC(dm_op_buf_max_size,
+ "Maximum size of a dm_op hypercall buffer");
+
+struct privcmd_data {
+ domid_t domid;
+};
+
static int privcmd_vma_range_is_mapped(
struct vm_area_struct *vma,
unsigned long addr,
unsigned long nr_pages);
-static long privcmd_ioctl_hypercall(void __user *udata)
+static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
{
+ struct privcmd_data *data = file->private_data;
struct privcmd_hypercall hypercall;
long ret;
+ /* Disallow arbitrary hypercalls if restricted */
+ if (data->domid != DOMID_INVALID)
+ return -EPERM;
+
if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
return -EFAULT;
@@ -229,8 +251,9 @@ static int mmap_gfn_range(void *data, void *state)
return 0;
}
-static long privcmd_ioctl_mmap(void __user *udata)
+static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
{
+ struct privcmd_data *data = file->private_data;
struct privcmd_mmap mmapcmd;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -245,6 +268,10 @@ static long privcmd_ioctl_mmap(void __user *udata)
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT;
+ /* If restriction is in place, check the domid matches */
+ if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
+ return -EPERM;
+
rc = gather_array(&pagelist,
mmapcmd.num, sizeof(struct privcmd_mmap_entry),
mmapcmd.entry);
@@ -416,8 +443,10 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
static const struct vm_operations_struct privcmd_vm_ops;
-static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
+static long privcmd_ioctl_mmap_batch(
+ struct file *file, void __user *udata, int version)
{
+ struct privcmd_data *data = file->private_data;
int ret;
struct privcmd_mmapbatch_v2 m;
struct mm_struct *mm = current->mm;
@@ -446,6 +475,10 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
return -EINVAL;
}
+ /* If restriction is in place, check the domid matches */
+ if (data->domid != DOMID_INVALID && data->domid != m.dom)
+ return -EPERM;
+
nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
return -EINVAL;
@@ -548,37 +581,210 @@ out_unlock:
goto out;
}
+static int lock_pages(
+ struct privcmd_dm_op_buf kbufs[], unsigned int num,
+ struct page *pages[], unsigned int nr_pages)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ unsigned int requested;
+ int pinned;
+
+ requested = DIV_ROUND_UP(
+ offset_in_page(kbufs[i].uptr) + kbufs[i].size,
+ PAGE_SIZE);
+ if (requested > nr_pages)
+ return -ENOSPC;
+
+ pinned = get_user_pages_fast(
+ (unsigned long) kbufs[i].uptr,
+ requested, FOLL_WRITE, pages);
+ if (pinned < 0)
+ return pinned;
+
+ nr_pages -= pinned;
+ pages += pinned;
+ }
+
+ return 0;
+}
+
+static void unlock_pages(struct page *pages[], unsigned int nr_pages)
+{
+ unsigned int i;
+
+ if (!pages)
+ return;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pages[i])
+ put_page(pages[i]);
+ }
+}
+
+static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
+{
+ struct privcmd_data *data = file->private_data;
+ struct privcmd_dm_op kdata;
+ struct privcmd_dm_op_buf *kbufs;
+ unsigned int nr_pages = 0;
+ struct page **pages = NULL;
+ struct xen_dm_op_buf *xbufs = NULL;
+ unsigned int i;
+ long rc;
+
+ if (copy_from_user(&kdata, udata, sizeof(kdata)))
+ return -EFAULT;
+
+ /* If restriction is in place, check the domid matches */
+ if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
+ return -EPERM;
+
+ if (kdata.num == 0)
+ return 0;
+
+ if (kdata.num > privcmd_dm_op_max_num)
+ return -E2BIG;
+
+ kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
+ if (!kbufs)
+ return -ENOMEM;
+
+ if (copy_from_user(kbufs, kdata.ubufs,
+ sizeof(*kbufs) * kdata.num)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ for (i = 0; i < kdata.num; i++) {
+ if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
+ kbufs[i].size)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ nr_pages += DIV_ROUND_UP(
+ offset_in_page(kbufs[i].uptr) + kbufs[i].size,
+ PAGE_SIZE);
+ }
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
+ if (!xbufs) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < kdata.num; i++) {
+ set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
+ xbufs[i].size = kbufs[i].size;
+ }
+
+ xen_preemptible_hcall_begin();
+ rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
+ xen_preemptible_hcall_end();
+
+out:
+ unlock_pages(pages, nr_pages);
+ kfree(xbufs);
+ kfree(pages);
+ kfree(kbufs);
+
+ return rc;
+}
+
+static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
+{
+ struct privcmd_data *data = file->private_data;
+ domid_t dom;
+
+ if (copy_from_user(&dom, udata, sizeof(dom)))
+ return -EFAULT;
+
+ /* Set restriction to the specified domain, or check it matches */
+ if (data->domid == DOMID_INVALID)
+ data->domid = dom;
+ else if (data->domid != dom)
+ return -EINVAL;
+
+ return 0;
+}
+
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
- int ret = -ENOSYS;
+ int ret = -ENOTTY;
void __user *udata = (void __user *) data;
switch (cmd) {
case IOCTL_PRIVCMD_HYPERCALL:
- ret = privcmd_ioctl_hypercall(udata);
+ ret = privcmd_ioctl_hypercall(file, udata);
break;
case IOCTL_PRIVCMD_MMAP:
- ret = privcmd_ioctl_mmap(udata);
+ ret = privcmd_ioctl_mmap(file, udata);
break;
case IOCTL_PRIVCMD_MMAPBATCH:
- ret = privcmd_ioctl_mmap_batch(udata, 1);
+ ret = privcmd_ioctl_mmap_batch(file, udata, 1);
break;
case IOCTL_PRIVCMD_MMAPBATCH_V2:
- ret = privcmd_ioctl_mmap_batch(udata, 2);
+ ret = privcmd_ioctl_mmap_batch(file, udata, 2);
+ break;
+
+ case IOCTL_PRIVCMD_DM_OP:
+ ret = privcmd_ioctl_dm_op(file, udata);
+ break;
+
+ case IOCTL_PRIVCMD_RESTRICT:
+ ret = privcmd_ioctl_restrict(file, udata);
break;
default:
- ret = -EINVAL;
break;
}
return ret;
}
+static int privcmd_open(struct inode *ino, struct file *file)
+{
+ struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ /* DOMID_INVALID implies no restriction */
+ data->domid = DOMID_INVALID;
+
+ file->private_data = data;
+ return 0;
+}
+
+static int privcmd_release(struct inode *ino, struct file *file)
+{
+ struct privcmd_data *data = file->private_data;
+
+ kfree(data);
+ return 0;
+}
+
static void privcmd_close(struct vm_area_struct *vma)
{
struct page **pages = vma->vm_private_data;
@@ -647,6 +853,8 @@ static int privcmd_vma_range_is_mapped(
const struct file_operations xen_privcmd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = privcmd_ioctl,
+ .open = privcmd_open,
+ .release = privcmd_release,
.mmap = privcmd_mmap,
};
EXPORT_SYMBOL_GPL(xen_privcmd_fops);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f905d6eeb048..f8afc6dcc29f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
- dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sgl) = 0;
return 0;
}
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
attrs);
- sg->dma_address = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 79865b8901ba..e7715cb62eef 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -55,7 +55,7 @@ static int register_balloon(struct device *dev);
/* React to a change in the target key */
static void watch_target(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
unsigned long long new_target;
int err;
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 3f0aee0a068b..3814b44bf1f7 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -652,7 +652,7 @@ out:
}
static void xen_pcibk_be_watch(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
struct xen_pcibk_device *pdev =
container_of(watch, struct xen_pcibk_device, be_watch);
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
new file mode 100644
index 000000000000..149c5e7efc89
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus.h
@@ -0,0 +1,135 @@
+/*
+ * Private include for xenbus communications.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 XenSource Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XENBUS_XENBUS_H
+#define _XENBUS_XENBUS_H
+
+#include <linux/mutex.h>
+#include <linux/uio.h>
+#include <xen/xenbus.h>
+
+#define XEN_BUS_ID_SIZE 20
+
+struct xen_bus_type {
+ char *root;
+ unsigned int levels;
+ int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
+ int (*probe)(struct xen_bus_type *bus, const char *type,
+ const char *dir);
+ void (*otherend_changed)(struct xenbus_watch *watch, const char *path,
+ const char *token);
+ struct bus_type bus;
+};
+
+enum xenstore_init {
+ XS_UNKNOWN,
+ XS_PV,
+ XS_HVM,
+ XS_LOCAL,
+};
+
+struct xs_watch_event {
+ struct list_head list;
+ unsigned int len;
+ struct xenbus_watch *handle;
+ const char *path;
+ const char *token;
+ char body[];
+};
+
+enum xb_req_state {
+ xb_req_state_queued,
+ xb_req_state_wait_reply,
+ xb_req_state_got_reply,
+ xb_req_state_aborted
+};
+
+struct xb_req_data {
+ struct list_head list;
+ wait_queue_head_t wq;
+ struct xsd_sockmsg msg;
+ enum xsd_sockmsg_type type;
+ char *body;
+ const struct kvec *vec;
+ int num_vecs;
+ int err;
+ enum xb_req_state state;
+ void (*cb)(struct xb_req_data *);
+ void *par;
+};
+
+extern enum xenstore_init xen_store_domain_type;
+extern const struct attribute_group *xenbus_dev_groups[];
+extern struct mutex xs_response_mutex;
+extern struct list_head xs_reply_list;
+extern struct list_head xb_write_list;
+extern wait_queue_head_t xb_waitq;
+extern struct mutex xb_write_mutex;
+
+int xs_init(void);
+int xb_init_comms(void);
+void xb_deinit_comms(void);
+int xs_watch_msg(struct xs_watch_event *event);
+void xs_request_exit(struct xb_req_data *req);
+
+int xenbus_match(struct device *_dev, struct device_driver *_drv);
+int xenbus_dev_probe(struct device *_dev);
+int xenbus_dev_remove(struct device *_dev);
+int xenbus_register_driver_common(struct xenbus_driver *drv,
+ struct xen_bus_type *bus,
+ struct module *owner,
+ const char *mod_name);
+int xenbus_probe_node(struct xen_bus_type *bus,
+ const char *type,
+ const char *nodename);
+int xenbus_probe_devices(struct xen_bus_type *bus);
+
+void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
+
+void xenbus_dev_shutdown(struct device *_dev);
+
+int xenbus_dev_suspend(struct device *dev);
+int xenbus_dev_resume(struct device *dev);
+int xenbus_dev_cancel(struct device *dev);
+
+void xenbus_otherend_changed(struct xenbus_watch *watch,
+ const char *path, const char *token,
+ int ignore_on_shutdown);
+
+int xenbus_read_otherend_details(struct xenbus_device *xendev,
+ char *id_node, char *path_node);
+
+void xenbus_ring_ops_init(void);
+
+int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
+void xenbus_dev_queue_reply(struct xb_req_data *req);
+
+#endif
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 056da6ee1a35..82a8866758ee 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -47,7 +47,7 @@
#include <xen/xen.h>
#include <xen/features.h>
-#include "xenbus_probe.h"
+#include "xenbus.h"
#define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
void (*callback)(struct xenbus_watch *,
- const char **, unsigned int))
+ const char *, const char *))
{
int err;
@@ -153,7 +153,7 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
int xenbus_watch_pathfmt(struct xenbus_device *dev,
struct xenbus_watch *watch,
void (*callback)(struct xenbus_watch *,
- const char **, unsigned int),
+ const char *, const char *),
const char *pathfmt, ...)
{
int err;
@@ -259,53 +259,34 @@ int xenbus_frontend_closed(struct xenbus_device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
-/**
- * Return the path to the error node for the given device, or NULL on failure.
- * If the value returned is non-NULL, then it is the caller's to kfree.
- */
-static char *error_path(struct xenbus_device *dev)
-{
- return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
-}
-
-
static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
const char *fmt, va_list ap)
{
unsigned int len;
- char *printf_buffer = NULL;
- char *path_buffer = NULL;
+ char *printf_buffer;
+ char *path_buffer;
#define PRINTF_BUFFER_SIZE 4096
+
printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
- if (printf_buffer == NULL)
- goto fail;
+ if (!printf_buffer)
+ return;
len = sprintf(printf_buffer, "%i ", -err);
- vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
+ vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
dev_err(&dev->dev, "%s\n", printf_buffer);
- path_buffer = error_path(dev);
-
- if (path_buffer == NULL) {
+ path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
+ if (!path_buffer ||
+ xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer))
dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
- dev->nodename, printf_buffer);
- goto fail;
- }
+ dev->nodename, printf_buffer);
- if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
- dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
- dev->nodename, printf_buffer);
- goto fail;
- }
-
-fail:
kfree(printf_buffer);
kfree(path_buffer);
}
-
/**
* xenbus_dev_error
* @dev: xenbus device
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index ecdecce80a6c..856ada5d39c9 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -34,19 +34,31 @@
#include <linux/wait.h>
#include <linux/interrupt.h>
+#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <xen/xenbus.h>
#include <asm/xen/hypervisor.h>
#include <xen/events.h>
#include <xen/page.h>
-#include "xenbus_comms.h"
+#include "xenbus.h"
+
+/* A list of replies. Currently only one will ever be outstanding. */
+LIST_HEAD(xs_reply_list);
+
+/* A list of write requests. */
+LIST_HEAD(xb_write_list);
+DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
+DEFINE_MUTEX(xb_write_mutex);
+
+/* Protect xenbus reader thread against save/restore. */
+DEFINE_MUTEX(xs_response_mutex);
static int xenbus_irq;
+static struct task_struct *xenbus_task;
static DECLARE_WORK(probe_work, xenbus_probe);
-static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
static irqreturn_t wake_waiting(int irq, void *unused)
{
@@ -84,30 +96,31 @@ static const void *get_input_chunk(XENSTORE_RING_IDX cons,
return buf + MASK_XENSTORE_IDX(cons);
}
+static int xb_data_to_write(void)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+
+ return (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE &&
+ !list_empty(&xb_write_list);
+}
+
/**
* xb_write - low level write
* @data: buffer to send
* @len: length of buffer
*
- * Returns 0 on success, error otherwise.
+ * Returns number of bytes written or -err.
*/
-int xb_write(const void *data, unsigned len)
+static int xb_write(const void *data, unsigned int len)
{
struct xenstore_domain_interface *intf = xen_store_interface;
XENSTORE_RING_IDX cons, prod;
- int rc;
+ unsigned int bytes = 0;
while (len != 0) {
void *dst;
unsigned int avail;
- rc = wait_event_interruptible(
- xb_waitq,
- (intf->req_prod - intf->req_cons) !=
- XENSTORE_RING_SIZE);
- if (rc < 0)
- return rc;
-
/* Read indexes, then verify. */
cons = intf->req_cons;
prod = intf->req_prod;
@@ -115,6 +128,11 @@ int xb_write(const void *data, unsigned len)
intf->req_cons = intf->req_prod = 0;
return -EIO;
}
+ if (!xb_data_to_write())
+ return bytes;
+
+ /* Must write data /after/ reading the consumer index. */
+ virt_mb();
dst = get_output_chunk(cons, prod, intf->req, &avail);
if (avail == 0)
@@ -122,52 +140,45 @@ int xb_write(const void *data, unsigned len)
if (avail > len)
avail = len;
- /* Must write data /after/ reading the consumer index. */
- virt_mb();
-
memcpy(dst, data, avail);
data += avail;
len -= avail;
+ bytes += avail;
/* Other side must not see new producer until data is there. */
virt_wmb();
intf->req_prod += avail;
/* Implies mb(): other side will see the updated producer. */
- notify_remote_via_evtchn(xen_store_evtchn);
+ if (prod <= intf->req_cons)
+ notify_remote_via_evtchn(xen_store_evtchn);
}
- return 0;
+ return bytes;
}
-int xb_data_to_read(void)
+static int xb_data_to_read(void)
{
struct xenstore_domain_interface *intf = xen_store_interface;
return (intf->rsp_cons != intf->rsp_prod);
}
-int xb_wait_for_data_to_read(void)
-{
- return wait_event_interruptible(xb_waitq, xb_data_to_read());
-}
-
-int xb_read(void *data, unsigned len)
+static int xb_read(void *data, unsigned int len)
{
struct xenstore_domain_interface *intf = xen_store_interface;
XENSTORE_RING_IDX cons, prod;
- int rc;
+ unsigned int bytes = 0;
while (len != 0) {
unsigned int avail;
const char *src;
- rc = xb_wait_for_data_to_read();
- if (rc < 0)
- return rc;
-
/* Read indexes, then verify. */
cons = intf->rsp_cons;
prod = intf->rsp_prod;
+ if (cons == prod)
+ return bytes;
+
if (!check_indexes(cons, prod)) {
intf->rsp_cons = intf->rsp_prod = 0;
return -EIO;
@@ -185,17 +196,243 @@ int xb_read(void *data, unsigned len)
memcpy(data, src, avail);
data += avail;
len -= avail;
+ bytes += avail;
/* Other side must not see free space until we've copied out */
virt_mb();
intf->rsp_cons += avail;
- pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
-
/* Implies mb(): other side will see the updated consumer. */
- notify_remote_via_evtchn(xen_store_evtchn);
+ if (intf->rsp_prod - cons >= XENSTORE_RING_SIZE)
+ notify_remote_via_evtchn(xen_store_evtchn);
+ }
+
+ return bytes;
+}
+
+static int process_msg(void)
+{
+ static struct {
+ struct xsd_sockmsg msg;
+ char *body;
+ union {
+ void *alloc;
+ struct xs_watch_event *watch;
+ };
+ bool in_msg;
+ bool in_hdr;
+ unsigned int read;
+ } state;
+ struct xb_req_data *req;
+ int err;
+ unsigned int len;
+
+ if (!state.in_msg) {
+ state.in_msg = true;
+ state.in_hdr = true;
+ state.read = 0;
+
+ /*
+ * We must disallow save/restore while reading a message.
+ * A partial read across s/r leaves us out of sync with
+ * xenstored.
+ * xs_response_mutex is locked as long as we are processing one
+ * message. state.in_msg will be true as long as we are holding
+ * the lock here.
+ */
+ mutex_lock(&xs_response_mutex);
+
+ if (!xb_data_to_read()) {
+ /* We raced with save/restore: pending data 'gone'. */
+ mutex_unlock(&xs_response_mutex);
+ state.in_msg = false;
+ return 0;
+ }
+ }
+
+ if (state.in_hdr) {
+ if (state.read != sizeof(state.msg)) {
+ err = xb_read((void *)&state.msg + state.read,
+ sizeof(state.msg) - state.read);
+ if (err < 0)
+ goto out;
+ state.read += err;
+ if (state.read != sizeof(state.msg))
+ return 0;
+ if (state.msg.len > XENSTORE_PAYLOAD_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ len = state.msg.len + 1;
+ if (state.msg.type == XS_WATCH_EVENT)
+ len += sizeof(*state.watch);
+
+ state.alloc = kmalloc(len, GFP_NOIO | __GFP_HIGH);
+ if (!state.alloc)
+ return -ENOMEM;
+
+ if (state.msg.type == XS_WATCH_EVENT)
+ state.body = state.watch->body;
+ else
+ state.body = state.alloc;
+ state.in_hdr = false;
+ state.read = 0;
+ }
+
+ err = xb_read(state.body + state.read, state.msg.len - state.read);
+ if (err < 0)
+ goto out;
+
+ state.read += err;
+ if (state.read != state.msg.len)
+ return 0;
+
+ state.body[state.msg.len] = '\0';
+
+ if (state.msg.type == XS_WATCH_EVENT) {
+ state.watch->len = state.msg.len;
+ err = xs_watch_msg(state.watch);
+ } else {
+ err = -ENOENT;
+ mutex_lock(&xb_write_mutex);
+ list_for_each_entry(req, &xs_reply_list, list) {
+ if (req->msg.req_id == state.msg.req_id) {
+ if (req->state == xb_req_state_wait_reply) {
+ req->msg.type = state.msg.type;
+ req->msg.len = state.msg.len;
+ req->body = state.body;
+ req->state = xb_req_state_got_reply;
+ list_del(&req->list);
+ req->cb(req);
+ } else {
+ list_del(&req->list);
+ kfree(req);
+ }
+ err = 0;
+ break;
+ }
+ }
+ mutex_unlock(&xb_write_mutex);
+ if (err)
+ goto out;
}
+ mutex_unlock(&xs_response_mutex);
+
+ state.in_msg = false;
+ state.alloc = NULL;
+ return err;
+
+ out:
+ mutex_unlock(&xs_response_mutex);
+ state.in_msg = false;
+ kfree(state.alloc);
+ state.alloc = NULL;
+ return err;
+}
+
+static int process_writes(void)
+{
+ static struct {
+ struct xb_req_data *req;
+ int idx;
+ unsigned int written;
+ } state;
+ void *base;
+ unsigned int len;
+ int err = 0;
+
+ if (!xb_data_to_write())
+ return 0;
+
+ mutex_lock(&xb_write_mutex);
+
+ if (!state.req) {
+ state.req = list_first_entry(&xb_write_list,
+ struct xb_req_data, list);
+ state.idx = -1;
+ state.written = 0;
+ }
+
+ if (state.req->state == xb_req_state_aborted)
+ goto out_err;
+
+ while (state.idx < state.req->num_vecs) {
+ if (state.idx < 0) {
+ base = &state.req->msg;
+ len = sizeof(state.req->msg);
+ } else {
+ base = state.req->vec[state.idx].iov_base;
+ len = state.req->vec[state.idx].iov_len;
+ }
+ err = xb_write(base + state.written, len - state.written);
+ if (err < 0)
+ goto out_err;
+ state.written += err;
+ if (state.written != len)
+ goto out;
+
+ state.idx++;
+ state.written = 0;
+ }
+
+ list_del(&state.req->list);
+ state.req->state = xb_req_state_wait_reply;
+ list_add_tail(&state.req->list, &xs_reply_list);
+ state.req = NULL;
+
+ out:
+ mutex_unlock(&xb_write_mutex);
+
+ return 0;
+
+ out_err:
+ state.req->msg.type = XS_ERROR;
+ state.req->err = err;
+ list_del(&state.req->list);
+ if (state.req->state == xb_req_state_aborted)
+ kfree(state.req);
+ else {
+ state.req->state = xb_req_state_got_reply;
+ wake_up(&state.req->wq);
+ }
+
+ mutex_unlock(&xb_write_mutex);
+
+ state.req = NULL;
+
+ return err;
+}
+
+static int xb_thread_work(void)
+{
+ return xb_data_to_read() || xb_data_to_write();
+}
+
+static int xenbus_thread(void *unused)
+{
+ int err;
+
+ while (!kthread_should_stop()) {
+ if (wait_event_interruptible(xb_waitq, xb_thread_work()))
+ continue;
+
+ err = process_msg();
+ if (err == -ENOMEM)
+ schedule();
+ else if (err)
+ pr_warn_ratelimited("error %d while reading message\n",
+ err);
+
+ err = process_writes();
+ if (err)
+ pr_warn_ratelimited("error %d while writing message\n",
+ err);
+ }
+
+ xenbus_task = NULL;
return 0;
}
@@ -223,6 +460,7 @@ int xb_init_comms(void)
rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
} else {
int err;
+
err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
0, "xenbus", &xb_waitq);
if (err < 0) {
@@ -231,6 +469,13 @@ int xb_init_comms(void)
}
xenbus_irq = err;
+
+ if (!xenbus_task) {
+ xenbus_task = kthread_run(xenbus_thread, NULL,
+ "xenbus");
+ if (IS_ERR(xenbus_task))
+ return PTR_ERR(xenbus_task);
+ }
}
return 0;
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
deleted file mode 100644
index 867a2e425208..000000000000
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Private include for xenbus communications.
- *
- * Copyright (C) 2005 Rusty Russell, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _XENBUS_COMMS_H
-#define _XENBUS_COMMS_H
-
-#include <linux/fs.h>
-
-int xs_init(void);
-int xb_init_comms(void);
-void xb_deinit_comms(void);
-
-/* Low level routines. */
-int xb_write(const void *data, unsigned len);
-int xb_read(void *data, unsigned len);
-int xb_data_to_read(void);
-int xb_wait_for_data_to_read(void);
-extern struct xenstore_domain_interface *xen_store_interface;
-extern int xen_store_evtchn;
-extern enum xenstore_init xen_store_domain_type;
-
-extern const struct file_operations xen_xenbus_fops;
-
-#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index 4a41ac9af966..1126701e212e 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -16,7 +16,7 @@
#include <xen/events.h>
#include <asm/xen/hypervisor.h>
-#include "xenbus_comms.h"
+#include "xenbus.h"
static int xenbus_backend_open(struct inode *inode, struct file *filp)
{
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 79130b310247..4d343eed08f5 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -57,12 +57,12 @@
#include <linux/miscdevice.h>
#include <linux/init.h>
-#include "xenbus_comms.h"
-
#include <xen/xenbus.h>
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
+#include "xenbus.h"
+
/*
* An element of a list of outstanding transactions, for which we're
* still waiting a reply.
@@ -113,6 +113,7 @@ struct xenbus_file_priv {
struct list_head read_buffers;
wait_queue_head_t read_waitq;
+ struct kref kref;
};
/* Read out any raw xenbus messages queued up. */
@@ -258,26 +259,23 @@ out_fail:
}
static void watch_fired(struct xenbus_watch *watch,
- const char **vec,
- unsigned int len)
+ const char *path,
+ const char *token)
{
struct watch_adapter *adap;
struct xsd_sockmsg hdr;
- const char *path, *token;
- int path_len, tok_len, body_len, data_len = 0;
+ const char *token_caller;
+ int path_len, tok_len, body_len;
int ret;
LIST_HEAD(staging_q);
adap = container_of(watch, struct watch_adapter, watch);
- path = vec[XS_WATCH_PATH];
- token = adap->token;
+ token_caller = adap->token;
path_len = strlen(path) + 1;
- tok_len = strlen(token) + 1;
- if (len > 2)
- data_len = vec[len] - vec[2] + 1;
- body_len = path_len + tok_len + data_len;
+ tok_len = strlen(token_caller) + 1;
+ body_len = path_len + tok_len;
hdr.type = XS_WATCH_EVENT;
hdr.len = body_len;
@@ -288,9 +286,7 @@ static void watch_fired(struct xenbus_watch *watch,
if (!ret)
ret = queue_reply(&staging_q, path, path_len);
if (!ret)
- ret = queue_reply(&staging_q, token, tok_len);
- if (!ret && len > 2)
- ret = queue_reply(&staging_q, vec[2], data_len);
+ ret = queue_reply(&staging_q, token_caller, tok_len);
if (!ret) {
/* success: pass reply list onto watcher */
@@ -302,6 +298,107 @@ static void watch_fired(struct xenbus_watch *watch,
mutex_unlock(&adap->dev_data->reply_mutex);
}
+static void xenbus_file_free(struct kref *kref)
+{
+ struct xenbus_file_priv *u;
+ struct xenbus_transaction_holder *trans, *tmp;
+ struct watch_adapter *watch, *tmp_watch;
+ struct read_buffer *rb, *tmp_rb;
+
+ u = container_of(kref, struct xenbus_file_priv, kref);
+
+ /*
+ * No need for locking here because there are no other users,
+ * by definition.
+ */
+
+ list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
+ xenbus_transaction_end(trans->handle, 1);
+ list_del(&trans->list);
+ kfree(trans);
+ }
+
+ list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+ unregister_xenbus_watch(&watch->watch);
+ list_del(&watch->list);
+ free_watch_adapter(watch);
+ }
+
+ list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
+ list_del(&rb->list);
+ kfree(rb);
+ }
+ kfree(u);
+}
+
+static struct xenbus_transaction_holder *xenbus_get_transaction(
+ struct xenbus_file_priv *u, uint32_t tx_id)
+{
+ struct xenbus_transaction_holder *trans;
+
+ list_for_each_entry(trans, &u->transactions, list)
+ if (trans->handle.id == tx_id)
+ return trans;
+
+ return NULL;
+}
+
+void xenbus_dev_queue_reply(struct xb_req_data *req)
+{
+ struct xenbus_file_priv *u = req->par;
+ struct xenbus_transaction_holder *trans = NULL;
+ int rc;
+ LIST_HEAD(staging_q);
+
+ xs_request_exit(req);
+
+ mutex_lock(&u->msgbuffer_mutex);
+
+ if (req->type == XS_TRANSACTION_START) {
+ trans = xenbus_get_transaction(u, 0);
+ if (WARN_ON(!trans))
+ goto out;
+ if (req->msg.type == XS_ERROR) {
+ list_del(&trans->list);
+ kfree(trans);
+ } else {
+ rc = kstrtou32(req->body, 10, &trans->handle.id);
+ if (WARN_ON(rc))
+ goto out;
+ }
+ } else if (req->msg.type == XS_TRANSACTION_END) {
+ trans = xenbus_get_transaction(u, req->msg.tx_id);
+ if (WARN_ON(!trans))
+ goto out;
+ list_del(&trans->list);
+ kfree(trans);
+ }
+
+ mutex_unlock(&u->msgbuffer_mutex);
+
+ mutex_lock(&u->reply_mutex);
+ rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg));
+ if (!rc)
+ rc = queue_reply(&staging_q, req->body, req->msg.len);
+ if (!rc) {
+ list_splice_tail(&staging_q, &u->read_buffers);
+ wake_up(&u->read_waitq);
+ } else {
+ queue_cleanup(&staging_q);
+ }
+ mutex_unlock(&u->reply_mutex);
+
+ kfree(req->body);
+ kfree(req);
+
+ kref_put(&u->kref, xenbus_file_free);
+
+ return;
+
+ out:
+ mutex_unlock(&u->msgbuffer_mutex);
+}
+
static int xenbus_command_reply(struct xenbus_file_priv *u,
unsigned int msg_type, const char *reply)
{
@@ -322,6 +419,9 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
wake_up(&u->read_waitq);
mutex_unlock(&u->reply_mutex);
+ if (!rc)
+ kref_put(&u->kref, xenbus_file_free);
+
return rc;
}
@@ -329,57 +429,22 @@ static int xenbus_write_transaction(unsigned msg_type,
struct xenbus_file_priv *u)
{
int rc;
- void *reply;
struct xenbus_transaction_holder *trans = NULL;
- LIST_HEAD(staging_q);
if (msg_type == XS_TRANSACTION_START) {
- trans = kmalloc(sizeof(*trans), GFP_KERNEL);
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
if (!trans) {
rc = -ENOMEM;
goto out;
}
- } else if (u->u.msg.tx_id != 0) {
- list_for_each_entry(trans, &u->transactions, list)
- if (trans->handle.id == u->u.msg.tx_id)
- break;
- if (&trans->list == &u->transactions)
- return xenbus_command_reply(u, XS_ERROR, "ENOENT");
- }
-
- reply = xenbus_dev_request_and_reply(&u->u.msg);
- if (IS_ERR(reply)) {
- if (msg_type == XS_TRANSACTION_START)
- kfree(trans);
- rc = PTR_ERR(reply);
- goto out;
- }
+ list_add(&trans->list, &u->transactions);
+ } else if (u->u.msg.tx_id != 0 &&
+ !xenbus_get_transaction(u, u->u.msg.tx_id))
+ return xenbus_command_reply(u, XS_ERROR, "ENOENT");
- if (msg_type == XS_TRANSACTION_START) {
- if (u->u.msg.type == XS_ERROR)
- kfree(trans);
- else {
- trans->handle.id = simple_strtoul(reply, NULL, 0);
- list_add(&trans->list, &u->transactions);
- }
- } else if (u->u.msg.type == XS_TRANSACTION_END) {
- list_del(&trans->list);
+ rc = xenbus_dev_request_and_reply(&u->u.msg, u);
+ if (rc)
kfree(trans);
- }
-
- mutex_lock(&u->reply_mutex);
- rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
- if (!rc)
- rc = queue_reply(&staging_q, reply, u->u.msg.len);
- if (!rc) {
- list_splice_tail(&staging_q, &u->read_buffers);
- wake_up(&u->read_waitq);
- } else {
- queue_cleanup(&staging_q);
- }
- mutex_unlock(&u->reply_mutex);
-
- kfree(reply);
out:
return rc;
@@ -511,6 +576,8 @@ static ssize_t xenbus_file_write(struct file *filp,
* OK, now we have a complete message. Do something with it.
*/
+ kref_get(&u->kref);
+
msg_type = u->u.msg.type;
switch (msg_type) {
@@ -525,8 +592,10 @@ static ssize_t xenbus_file_write(struct file *filp,
ret = xenbus_write_transaction(msg_type, u);
break;
}
- if (ret != 0)
+ if (ret != 0) {
rc = ret;
+ kref_put(&u->kref, xenbus_file_free);
+ }
/* Buffered message consumed */
u->len = 0;
@@ -551,6 +620,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
if (u == NULL)
return -ENOMEM;
+ kref_init(&u->kref);
+
INIT_LIST_HEAD(&u->transactions);
INIT_LIST_HEAD(&u->watches);
INIT_LIST_HEAD(&u->read_buffers);
@@ -567,32 +638,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
static int xenbus_file_release(struct inode *inode, struct file *filp)
{
struct xenbus_file_priv *u = filp->private_data;
- struct xenbus_transaction_holder *trans, *tmp;
- struct watch_adapter *watch, *tmp_watch;
- struct read_buffer *rb, *tmp_rb;
-
- /*
- * No need for locking here because there are no other users,
- * by definition.
- */
-
- list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
- xenbus_transaction_end(trans->handle, 1);
- list_del(&trans->list);
- kfree(trans);
- }
-
- list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
- unregister_xenbus_watch(&watch->watch);
- list_del(&watch->list);
- free_watch_adapter(watch);
- }
- list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
- list_del(&rb->list);
- kfree(rb);
- }
- kfree(u);
+ kref_put(&u->kref, xenbus_file_free);
return 0;
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 4bdf654041e9..74888cacd0b0 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -62,8 +62,7 @@
#include <xen/hvm.h>
-#include "xenbus_comms.h"
-#include "xenbus_probe.h"
+#include "xenbus.h"
int xen_store_evtchn;
@@ -170,7 +169,7 @@ int xenbus_read_otherend_details(struct xenbus_device *xendev,
EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
void xenbus_otherend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len,
+ const char *path, const char *token,
int ignore_on_shutdown)
{
struct xenbus_device *dev =
@@ -181,18 +180,15 @@ void xenbus_otherend_changed(struct xenbus_watch *watch,
/* Protect us against watches firing on old details when the otherend
details change, say immediately after a resume. */
if (!dev->otherend ||
- strncmp(dev->otherend, vec[XS_WATCH_PATH],
- strlen(dev->otherend))) {
- dev_dbg(&dev->dev, "Ignoring watch at %s\n",
- vec[XS_WATCH_PATH]);
+ strncmp(dev->otherend, path, strlen(dev->otherend))) {
+ dev_dbg(&dev->dev, "Ignoring watch at %s\n", path);
return;
}
state = xenbus_read_driver_state(dev->otherend);
dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
- state, xenbus_strstate(state), dev->otherend_watch.node,
- vec[XS_WATCH_PATH]);
+ state, xenbus_strstate(state), dev->otherend_watch.node, path);
/*
* Ignore xenbus transitions during shutdown. This prevents us doing
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
deleted file mode 100644
index c9ec7ca1f7ab..000000000000
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- * xenbus_probe.h
- *
- * Talks to Xen Store to figure out what devices we have.
- *
- * Copyright (C) 2005 Rusty Russell, IBM Corporation
- * Copyright (C) 2005 XenSource Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _XENBUS_PROBE_H
-#define _XENBUS_PROBE_H
-
-#define XEN_BUS_ID_SIZE 20
-
-struct xen_bus_type {
- char *root;
- unsigned int levels;
- int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
- int (*probe)(struct xen_bus_type *bus, const char *type,
- const char *dir);
- void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
- unsigned int len);
- struct bus_type bus;
-};
-
-enum xenstore_init {
- XS_UNKNOWN,
- XS_PV,
- XS_HVM,
- XS_LOCAL,
-};
-
-extern const struct attribute_group *xenbus_dev_groups[];
-
-extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
-extern int xenbus_dev_probe(struct device *_dev);
-extern int xenbus_dev_remove(struct device *_dev);
-extern int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name);
-extern int xenbus_probe_node(struct xen_bus_type *bus,
- const char *type,
- const char *nodename);
-extern int xenbus_probe_devices(struct xen_bus_type *bus);
-
-extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
-
-extern void xenbus_dev_shutdown(struct device *_dev);
-
-extern int xenbus_dev_suspend(struct device *dev);
-extern int xenbus_dev_resume(struct device *dev);
-extern int xenbus_dev_cancel(struct device *dev);
-
-extern void xenbus_otherend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len,
- int ignore_on_shutdown);
-
-extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
- char *id_node, char *path_node);
-
-void xenbus_ring_ops_init(void);
-
-#endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 37929df829a3..b0bed4faf44c 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -53,8 +53,7 @@
#include <xen/xenbus.h>
#include <xen/features.h>
-#include "xenbus_comms.h"
-#include "xenbus_probe.h"
+#include "xenbus.h"
/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
@@ -182,9 +181,9 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
}
static void frontend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
- xenbus_otherend_changed(watch, vec, len, 0);
+ xenbus_otherend_changed(watch, path, token, 0);
}
static struct xen_bus_type xenbus_backend = {
@@ -205,11 +204,11 @@ static struct xen_bus_type xenbus_backend = {
};
static void backend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
DPRINTK("");
- xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
+ xenbus_dev_changed(path, &xenbus_backend);
}
static struct xenbus_watch be_watch = {
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 6d40a972ffb2..19e45ce21f89 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -27,8 +27,7 @@
#include <xen/platform_pci.h>
-#include "xenbus_comms.h"
-#include "xenbus_probe.h"
+#include "xenbus.h"
@@ -87,9 +86,9 @@ static int xenbus_uevent_frontend(struct device *_dev,
static void backend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
- xenbus_otherend_changed(watch, vec, len, 1);
+ xenbus_otherend_changed(watch, path, token, 1);
}
static void xenbus_frontend_delayed_resume(struct work_struct *w)
@@ -154,11 +153,11 @@ static struct xen_bus_type xenbus_frontend = {
};
static void frontend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+ const char *path, const char *token)
{
DPRINTK("");
- xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+ xenbus_dev_changed(path, &xenbus_frontend);
}
@@ -333,13 +332,13 @@ static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
static int backend_state;
static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
- const char **v, unsigned int l)
+ const char *path, const char *token)
{
- if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i",
+ if (xenbus_scanf(XBT_NIL, path, "", "%i",
&backend_state) != 1)
backend_state = XenbusStateUnknown;
printk(KERN_DEBUG "XENBUS: backend %s %s\n",
- v[XS_WATCH_PATH], xenbus_strstate(backend_state));
+ path, xenbus_strstate(backend_state));
wake_up(&backend_state_wq);
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 6afb993c5809..e46080214955 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -43,69 +43,36 @@
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/kthread.h>
+#include <linux/reboot.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <asm/xen/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/xen.h>
-#include "xenbus_comms.h"
-#include "xenbus_probe.h"
-
-struct xs_stored_msg {
- struct list_head list;
-
- struct xsd_sockmsg hdr;
-
- union {
- /* Queued replies. */
- struct {
- char *body;
- } reply;
-
- /* Queued watch events. */
- struct {
- struct xenbus_watch *handle;
- char **vec;
- unsigned int vec_size;
- } watch;
- } u;
-};
+#include "xenbus.h"
-struct xs_handle {
- /* A list of replies. Currently only one will ever be outstanding. */
- struct list_head reply_list;
- spinlock_t reply_lock;
- wait_queue_head_t reply_waitq;
-
- /*
- * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
- * response_mutex is never taken simultaneously with the other three.
- *
- * transaction_mutex must be held before incrementing
- * transaction_count. The mutex is held when a suspend is in
- * progress to prevent new transactions starting.
- *
- * When decrementing transaction_count to zero the wait queue
- * should be woken up, the suspend code waits for count to
- * reach zero.
- */
-
- /* One request at a time. */
- struct mutex request_mutex;
-
- /* Protect xenbus reader thread against save/restore. */
- struct mutex response_mutex;
-
- /* Protect transactions against save/restore. */
- struct mutex transaction_mutex;
- atomic_t transaction_count;
- wait_queue_head_t transaction_wq;
-
- /* Protect watch (de)register against save/restore. */
- struct rw_semaphore watch_mutex;
-};
+/*
+ * Framework to protect suspend/resume handling against normal Xenstore
+ * message handling:
+ * During suspend/resume there must be no open transaction and no pending
+ * Xenstore request.
+ * New watch events happening in this time can be ignored by firing all watches
+ * after resume.
+ */
+
+/* Lock protecting enter/exit critical region. */
+static DEFINE_SPINLOCK(xs_state_lock);
+/* Number of users in critical region (protected by xs_state_lock). */
+static unsigned int xs_state_users;
+/* Suspend handler waiting or already active (protected by xs_state_lock)? */
+static int xs_suspend_active;
+/* Unique Xenstore request id (protected by xs_state_lock). */
+static uint32_t xs_request_id;
-static struct xs_handle xs_state;
+/* Wait queue for all callers waiting for critical region to become usable. */
+static DECLARE_WAIT_QUEUE_HEAD(xs_state_enter_wq);
+/* Wait queue for suspend handling waiting for critical region being empty. */
+static DECLARE_WAIT_QUEUE_HEAD(xs_state_exit_wq);
/* List of registered watches, and a lock to protect it. */
static LIST_HEAD(watches);
@@ -115,6 +82,9 @@ static DEFINE_SPINLOCK(watches_lock);
static LIST_HEAD(watch_events);
static DEFINE_SPINLOCK(watch_events_lock);
+/* Protect watch (de)register against save/restore. */
+static DECLARE_RWSEM(xs_watch_rwsem);
+
/*
* Details of the xenwatch callback kernel thread. The thread waits on the
* watch_events_waitq for work to do (queued on watch_events list). When it
@@ -125,6 +95,59 @@ static pid_t xenwatch_pid;
static DEFINE_MUTEX(xenwatch_mutex);
static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
+static void xs_suspend_enter(void)
+{
+ spin_lock(&xs_state_lock);
+ xs_suspend_active++;
+ spin_unlock(&xs_state_lock);
+ wait_event(xs_state_exit_wq, xs_state_users == 0);
+}
+
+static void xs_suspend_exit(void)
+{
+ spin_lock(&xs_state_lock);
+ xs_suspend_active--;
+ spin_unlock(&xs_state_lock);
+ wake_up_all(&xs_state_enter_wq);
+}
+
+static uint32_t xs_request_enter(struct xb_req_data *req)
+{
+ uint32_t rq_id;
+
+ req->type = req->msg.type;
+
+ spin_lock(&xs_state_lock);
+
+ while (!xs_state_users && xs_suspend_active) {
+ spin_unlock(&xs_state_lock);
+ wait_event(xs_state_enter_wq, xs_suspend_active == 0);
+ spin_lock(&xs_state_lock);
+ }
+
+ if (req->type == XS_TRANSACTION_START)
+ xs_state_users++;
+ xs_state_users++;
+ rq_id = xs_request_id++;
+
+ spin_unlock(&xs_state_lock);
+
+ return rq_id;
+}
+
+void xs_request_exit(struct xb_req_data *req)
+{
+ spin_lock(&xs_state_lock);
+ xs_state_users--;
+ if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
+ req->type == XS_TRANSACTION_END)
+ xs_state_users--;
+ spin_unlock(&xs_state_lock);
+
+ if (xs_suspend_active && !xs_state_users)
+ wake_up(&xs_state_exit_wq);
+}
+
static int get_error(const char *errorstring)
{
unsigned int i;
@@ -162,21 +185,24 @@ static bool xenbus_ok(void)
}
return false;
}
-static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
+
+static bool test_reply(struct xb_req_data *req)
{
- struct xs_stored_msg *msg;
- char *body;
+ if (req->state == xb_req_state_got_reply || !xenbus_ok())
+ return true;
+
+ /* Make sure to reread req->state each time. */
+ barrier();
- spin_lock(&xs_state.reply_lock);
+ return false;
+}
+
+static void *read_reply(struct xb_req_data *req)
+{
+ while (req->state != xb_req_state_got_reply) {
+ wait_event(req->wq, test_reply(req));
- while (list_empty(&xs_state.reply_list)) {
- spin_unlock(&xs_state.reply_lock);
- if (xenbus_ok())
- /* XXX FIXME: Avoid synchronous wait for response here. */
- wait_event_timeout(xs_state.reply_waitq,
- !list_empty(&xs_state.reply_list),
- msecs_to_jiffies(500));
- else {
+ if (!xenbus_ok())
/*
* If we are in the process of being shut-down there is
* no point of trying to contact XenBus - it is either
@@ -184,76 +210,82 @@ static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
* has been killed or is unreachable.
*/
return ERR_PTR(-EIO);
- }
- spin_lock(&xs_state.reply_lock);
+ if (req->err)
+ return ERR_PTR(req->err);
+
}
- msg = list_entry(xs_state.reply_list.next,
- struct xs_stored_msg, list);
- list_del(&msg->list);
+ return req->body;
+}
- spin_unlock(&xs_state.reply_lock);
+static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
+{
+ bool notify;
- *type = msg->hdr.type;
- if (len)
- *len = msg->hdr.len;
- body = msg->u.reply.body;
+ req->msg = *msg;
+ req->err = 0;
+ req->state = xb_req_state_queued;
+ init_waitqueue_head(&req->wq);
- kfree(msg);
+ req->msg.req_id = xs_request_enter(req);
- return body;
-}
+ mutex_lock(&xb_write_mutex);
+ list_add_tail(&req->list, &xb_write_list);
+ notify = list_is_singular(&xb_write_list);
+ mutex_unlock(&xb_write_mutex);
-static void transaction_start(void)
-{
- mutex_lock(&xs_state.transaction_mutex);
- atomic_inc(&xs_state.transaction_count);
- mutex_unlock(&xs_state.transaction_mutex);
+ if (notify)
+ wake_up(&xb_waitq);
}
-static void transaction_end(void)
+static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
{
- if (atomic_dec_and_test(&xs_state.transaction_count))
- wake_up(&xs_state.transaction_wq);
-}
+ void *ret;
-static void transaction_suspend(void)
-{
- mutex_lock(&xs_state.transaction_mutex);
- wait_event(xs_state.transaction_wq,
- atomic_read(&xs_state.transaction_count) == 0);
+ ret = read_reply(req);
+
+ xs_request_exit(req);
+
+ msg->type = req->msg.type;
+ msg->len = req->msg.len;
+
+ mutex_lock(&xb_write_mutex);
+ if (req->state == xb_req_state_queued ||
+ req->state == xb_req_state_wait_reply)
+ req->state = xb_req_state_aborted;
+ else
+ kfree(req);
+ mutex_unlock(&xb_write_mutex);
+
+ return ret;
}
-static void transaction_resume(void)
+static void xs_wake_up(struct xb_req_data *req)
{
- mutex_unlock(&xs_state.transaction_mutex);
+ wake_up(&req->wq);
}
-void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
+int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
{
- void *ret;
- enum xsd_sockmsg_type type = msg->type;
- int err;
-
- if (type == XS_TRANSACTION_START)
- transaction_start();
+ struct xb_req_data *req;
+ struct kvec *vec;
- mutex_lock(&xs_state.request_mutex);
+ req = kmalloc(sizeof(*req) + sizeof(*vec), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
- err = xb_write(msg, sizeof(*msg) + msg->len);
- if (err) {
- msg->type = XS_ERROR;
- ret = ERR_PTR(err);
- } else
- ret = read_reply(&msg->type, &msg->len);
+ vec = (struct kvec *)(req + 1);
+ vec->iov_len = msg->len;
+ vec->iov_base = msg + 1;
- mutex_unlock(&xs_state.request_mutex);
+ req->vec = vec;
+ req->num_vecs = 1;
+ req->cb = xenbus_dev_queue_reply;
+ req->par = par;
- if ((msg->type == XS_TRANSACTION_END) ||
- ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
- transaction_end();
+ xs_send(req, msg);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(xenbus_dev_request_and_reply);
@@ -264,37 +296,31 @@ static void *xs_talkv(struct xenbus_transaction t,
unsigned int num_vecs,
unsigned int *len)
{
+ struct xb_req_data *req;
struct xsd_sockmsg msg;
void *ret = NULL;
unsigned int i;
int err;
+ req = kmalloc(sizeof(*req), GFP_NOIO | __GFP_HIGH);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->vec = iovec;
+ req->num_vecs = num_vecs;
+ req->cb = xs_wake_up;
+
msg.tx_id = t.id;
- msg.req_id = 0;
msg.type = type;
msg.len = 0;
for (i = 0; i < num_vecs; i++)
msg.len += iovec[i].iov_len;
- mutex_lock(&xs_state.request_mutex);
-
- err = xb_write(&msg, sizeof(msg));
- if (err) {
- mutex_unlock(&xs_state.request_mutex);
- return ERR_PTR(err);
- }
-
- for (i = 0; i < num_vecs; i++) {
- err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
- if (err) {
- mutex_unlock(&xs_state.request_mutex);
- return ERR_PTR(err);
- }
- }
-
- ret = read_reply(&msg.type, len);
+ xs_send(req, &msg);
- mutex_unlock(&xs_state.request_mutex);
+ ret = xs_wait_for_reply(req, &msg);
+ if (len)
+ *len = msg.len;
if (IS_ERR(ret))
return ret;
@@ -501,13 +527,9 @@ int xenbus_transaction_start(struct xenbus_transaction *t)
{
char *id_str;
- transaction_start();
-
id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
- if (IS_ERR(id_str)) {
- transaction_end();
+ if (IS_ERR(id_str))
return PTR_ERR(id_str);
- }
t->id = simple_strtoul(id_str, NULL, 0);
kfree(id_str);
@@ -521,18 +543,13 @@ EXPORT_SYMBOL_GPL(xenbus_transaction_start);
int xenbus_transaction_end(struct xenbus_transaction t, int abort)
{
char abortstr[2];
- int err;
if (abort)
strcpy(abortstr, "F");
else
strcpy(abortstr, "T");
- err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
-
- transaction_end();
-
- return err;
+ return xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
}
EXPORT_SYMBOL_GPL(xenbus_transaction_end);
@@ -665,6 +682,30 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
+
+int xs_watch_msg(struct xs_watch_event *event)
+{
+ if (count_strings(event->body, event->len) != 2) {
+ kfree(event);
+ return -EINVAL;
+ }
+ event->path = (const char *)event->body;
+ event->token = (const char *)strchr(event->body, '\0') + 1;
+
+ spin_lock(&watches_lock);
+ event->handle = find_watch(event->token);
+ if (event->handle != NULL) {
+ spin_lock(&watch_events_lock);
+ list_add_tail(&event->list, &watch_events);
+ wake_up(&watch_events_waitq);
+ spin_unlock(&watch_events_lock);
+ } else
+ kfree(event);
+ spin_unlock(&watches_lock);
+
+ return 0;
+}
+
/*
* Certain older XenBus toolstack cannot handle reading values that are
* not populated. Some Xen 3.4 installation are incapable of doing this
@@ -713,7 +754,7 @@ int register_xenbus_watch(struct xenbus_watch *watch)
sprintf(token, "%lX", (long)watch);
- down_read(&xs_state.watch_mutex);
+ down_read(&xs_watch_rwsem);
spin_lock(&watches_lock);
BUG_ON(find_watch(token));
@@ -728,7 +769,7 @@ int register_xenbus_watch(struct xenbus_watch *watch)
spin_unlock(&watches_lock);
}
- up_read(&xs_state.watch_mutex);
+ up_read(&xs_watch_rwsem);
return err;
}
@@ -736,13 +777,13 @@ EXPORT_SYMBOL_GPL(register_xenbus_watch);
void unregister_xenbus_watch(struct xenbus_watch *watch)
{
- struct xs_stored_msg *msg, *tmp;
+ struct xs_watch_event *event, *tmp;
char token[sizeof(watch) * 2 + 1];
int err;
sprintf(token, "%lX", (long)watch);
- down_read(&xs_state.watch_mutex);
+ down_read(&xs_watch_rwsem);
spin_lock(&watches_lock);
BUG_ON(!find_watch(token));
@@ -753,7 +794,7 @@ void unregister_xenbus_watch(struct xenbus_watch *watch)
if (err)
pr_warn("Failed to release watch %s: %i\n", watch->node, err);
- up_read(&xs_state.watch_mutex);
+ up_read(&xs_watch_rwsem);
/* Make sure there are no callbacks running currently (unless
its us) */
@@ -762,12 +803,11 @@ void unregister_xenbus_watch(struct xenbus_watch *watch)
/* Cancel pending watch events. */
spin_lock(&watch_events_lock);
- list_for_each_entry_safe(msg, tmp, &watch_events, list) {
- if (msg->u.watch.handle != watch)
+ list_for_each_entry_safe(event, tmp, &watch_events, list) {
+ if (event->handle != watch)
continue;
- list_del(&msg->list);
- kfree(msg->u.watch.vec);
- kfree(msg);
+ list_del(&event->list);
+ kfree(event);
}
spin_unlock(&watch_events_lock);
@@ -778,10 +818,10 @@ EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
void xs_suspend(void)
{
- transaction_suspend();
- down_write(&xs_state.watch_mutex);
- mutex_lock(&xs_state.request_mutex);
- mutex_lock(&xs_state.response_mutex);
+ xs_suspend_enter();
+
+ down_write(&xs_watch_rwsem);
+ mutex_lock(&xs_response_mutex);
}
void xs_resume(void)
@@ -791,31 +831,31 @@ void xs_resume(void)
xb_init_comms();
- mutex_unlock(&xs_state.response_mutex);
- mutex_unlock(&xs_state.request_mutex);
- transaction_resume();
+ mutex_unlock(&xs_response_mutex);
+
+ xs_suspend_exit();
- /* No need for watches_lock: the watch_mutex is sufficient. */
+ /* No need for watches_lock: the xs_watch_rwsem is sufficient. */
list_for_each_entry(watch, &watches, list) {
sprintf(token, "%lX", (long)watch);
xs_watch(watch->node, token);
}
- up_write(&xs_state.watch_mutex);
+ up_write(&xs_watch_rwsem);
}
void xs_suspend_cancel(void)
{
- mutex_unlock(&xs_state.response_mutex);
- mutex_unlock(&xs_state.request_mutex);
- up_write(&xs_state.watch_mutex);
- mutex_unlock(&xs_state.transaction_mutex);
+ mutex_unlock(&xs_response_mutex);
+ up_write(&xs_watch_rwsem);
+
+ xs_suspend_exit();
}
static int xenwatch_thread(void *unused)
{
struct list_head *ent;
- struct xs_stored_msg *msg;
+ struct xs_watch_event *event;
for (;;) {
wait_event_interruptible(watch_events_waitq,
@@ -833,13 +873,10 @@ static int xenwatch_thread(void *unused)
spin_unlock(&watch_events_lock);
if (ent != &watch_events) {
- msg = list_entry(ent, struct xs_stored_msg, list);
- msg->u.watch.handle->callback(
- msg->u.watch.handle,
- (const char **)msg->u.watch.vec,
- msg->u.watch.vec_size);
- kfree(msg->u.watch.vec);
- kfree(msg);
+ event = list_entry(ent, struct xs_watch_event, list);
+ event->handle->callback(event->handle, event->path,
+ event->token);
+ kfree(event);
}
mutex_unlock(&xenwatch_mutex);
@@ -848,126 +885,37 @@ static int xenwatch_thread(void *unused)
return 0;
}
-static int process_msg(void)
+/*
+ * Wake up all threads waiting for a xenstore reply. In case of shutdown all
+ * pending replies will be marked as "aborted" in order to let the waiters
+ * return in spite of xenstore possibly no longer being able to reply. This
+ * will avoid blocking shutdown by a thread waiting for xenstore but being
+ * necessary for shutdown processing to proceed.
+ */
+static int xs_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
{
- struct xs_stored_msg *msg;
- char *body;
- int err;
-
- /*
- * We must disallow save/restore while reading a xenstore message.
- * A partial read across s/r leaves us out of sync with xenstored.
- */
- for (;;) {
- err = xb_wait_for_data_to_read();
- if (err)
- return err;
- mutex_lock(&xs_state.response_mutex);
- if (xb_data_to_read())
- break;
- /* We raced with save/restore: pending data 'disappeared'. */
- mutex_unlock(&xs_state.response_mutex);
- }
-
-
- msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH);
- if (msg == NULL) {
- err = -ENOMEM;
- goto out;
- }
-
- err = xb_read(&msg->hdr, sizeof(msg->hdr));
- if (err) {
- kfree(msg);
- goto out;
- }
-
- if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
- kfree(msg);
- err = -EINVAL;
- goto out;
- }
-
- body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
- if (body == NULL) {
- kfree(msg);
- err = -ENOMEM;
- goto out;
- }
-
- err = xb_read(body, msg->hdr.len);
- if (err) {
- kfree(body);
- kfree(msg);
- goto out;
- }
- body[msg->hdr.len] = '\0';
-
- if (msg->hdr.type == XS_WATCH_EVENT) {
- msg->u.watch.vec = split(body, msg->hdr.len,
- &msg->u.watch.vec_size);
- if (IS_ERR(msg->u.watch.vec)) {
- err = PTR_ERR(msg->u.watch.vec);
- kfree(msg);
- goto out;
- }
-
- spin_lock(&watches_lock);
- msg->u.watch.handle = find_watch(
- msg->u.watch.vec[XS_WATCH_TOKEN]);
- if (msg->u.watch.handle != NULL) {
- spin_lock(&watch_events_lock);
- list_add_tail(&msg->list, &watch_events);
- wake_up(&watch_events_waitq);
- spin_unlock(&watch_events_lock);
- } else {
- kfree(msg->u.watch.vec);
- kfree(msg);
- }
- spin_unlock(&watches_lock);
- } else {
- msg->u.reply.body = body;
- spin_lock(&xs_state.reply_lock);
- list_add_tail(&msg->list, &xs_state.reply_list);
- spin_unlock(&xs_state.reply_lock);
- wake_up(&xs_state.reply_waitq);
- }
+ struct xb_req_data *req;
- out:
- mutex_unlock(&xs_state.response_mutex);
- return err;
+ mutex_lock(&xb_write_mutex);
+ list_for_each_entry(req, &xs_reply_list, list)
+ wake_up(&req->wq);
+ list_for_each_entry(req, &xb_write_list, list)
+ wake_up(&req->wq);
+ mutex_unlock(&xb_write_mutex);
+ return NOTIFY_DONE;
}
-static int xenbus_thread(void *unused)
-{
- int err;
-
- for (;;) {
- err = process_msg();
- if (err)
- pr_warn("error %d while reading message\n", err);
- if (kthread_should_stop())
- break;
- }
-
- return 0;
-}
+static struct notifier_block xs_reboot_nb = {
+ .notifier_call = xs_reboot_notify,
+};
int xs_init(void)
{
int err;
struct task_struct *task;
- INIT_LIST_HEAD(&xs_state.reply_list);
- spin_lock_init(&xs_state.reply_lock);
- init_waitqueue_head(&xs_state.reply_waitq);
-
- mutex_init(&xs_state.request_mutex);
- mutex_init(&xs_state.response_mutex);
- mutex_init(&xs_state.transaction_mutex);
- init_rwsem(&xs_state.watch_mutex);
- atomic_set(&xs_state.transaction_count, 0);
- init_waitqueue_head(&xs_state.transaction_wq);
+ register_reboot_notifier(&xs_reboot_nb);
/* Initialize the shared memory rings to talk to xenstored */
err = xb_init_comms();
@@ -979,10 +927,6 @@ int xs_init(void)
return PTR_ERR(task);
xenwatch_pid = task->pid;
- task = kthread_run(xenbus_thread, NULL, "xenbus");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
/* shutdown watches for kexec boot */
xs_reset_watches();
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 8559a71f36b1..328c3987b112 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -16,10 +16,10 @@
#include <linux/magic.h>
#include <xen/xen.h>
+#include <xen/xenbus.h>
#include "xenfs.h"
#include "../privcmd.h"
-#include "../xenbus/xenbus_comms.h"
#include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
index fef20dbc6a5c..82fd2a396d96 100644
--- a/drivers/xen/xenfs/xenstored.c
+++ b/drivers/xen/xenfs/xenstored.c
@@ -4,9 +4,9 @@
#include <linux/fs.h>
#include <xen/page.h>
+#include <xen/xenbus.h>
#include "xenfs.h"
-#include "../xenbus/xenbus_comms.h"
static ssize_t xsd_read(struct file *file, char __user *buf,
size_t size, loff_t *off)